query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
The selector function accepts a component instance and returns the appropriate key to index plot_classes dictionary.
def __init__(self, selector, plot_classes, allow_mismatch=False): self.selector = selector self.plot_classes = OrderedDict(plot_classes) interface = self._define_interface(self.plot_classes.values(), allow_mismatch) self.style_opts, self.plot_options = interface
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def node_selector(self) -> Dict[str, str]:\n return self._node_selector", "def lookup_class_idx(self,label):\r\n \r\n return self.class_labels[label]", "def getSelector(self, node):\n self.checkModelOpen()\n calcEngine = CalcEngine.factory(self.client_session)\n return calcEngine.getSelector(node)", "def _idx(self, class_, key):\n return u':'.join((class_, key))", "def selector(self):\n return self._selector", "def selector(self) -> Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']]:\n return pulumi.get(self, \"selector\")", "def node_selector(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"node_selector\")", "def node_selector(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"node_selector\")", "def node_selector(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"node_selector\")", "def node_selector(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"node_selector\")", "def node_selector(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"node_selector\")", "def node_selector(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"node_selector\")", "def node_selector(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"node_selector\")", "def node_selector(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"node_selector\")", "def node_selector(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"node_selector\")", "def node_selector(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"node_selector\")", "def node_selector(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"node_selector\")", "def node_selector(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"node_selector\")", "def node_selector(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"node_selector\")", "def node_selector(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"node_selector\")", "def node_selector(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"node_selector\")", "def node_selector(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"node_selector\")", "def node_selector(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"node_selector\")", "def node_selector(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"node_selector\")", "def node_selector(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"node_selector\")", "def node_selector(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"node_selector\")", "def lookup(x):\n options = cls.lookup_options(x, opt_type)\n selected = {o: options.options[o]\n for o in opts if o in options.options}\n if opt_type == 'plot' and defaults:\n plot = Store.registry[cls.backend].get(type(x))\n selected['defaults'] = {o: getattr(plot, o) for o in opts\n if o not in selected and hasattr(plot, o)}\n key = keyfn(x) if keyfn else None\n return (key, selected)", "def selector(**kwargs):\n return kwargs", "def selector(self) -> Optional[pulumi.Input['_meta.v1.LabelSelectorPatchArgs']]:\n return pulumi.get(self, \"selector\")", "def selector(self) -> Optional['_meta.v1.outputs.LabelSelectorPatch']:\n return pulumi.get(self, \"selector\")", "def head_service_selector(cluster_name: str) -> Dict[str, str]:\n return {RAY_COMPONENT_LABEL: f'{cluster_name}-ray-head'}", "def get_index(observable_nodes, label):\n for k in observable_nodes:\n if label in observable_nodes[k]:\n return observable_nodes[k][label]['category']", "def findReclassificationClass(self, button):\n for category in self.reclassificationDict.keys():\n if category == 'version' or category == 'uiParameterJsonDict':\n continue\n for edgvClass in self.reclassificationDict[category].keys():\n for buttonName in self.reclassificationDict[category][edgvClass].keys():\n if button == buttonName:\n #returning the desired edgvClass\n return (category, edgvClass)\n return ()", "def selector(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"selector\")", "def selector(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"selector\")", "def identify_class(self, cls):", "def selector(self) -> Optional['_meta.v1.outputs.LabelSelector']:\n return pulumi.get(self, \"selector\")", "def clas(self, x):\n if isinstance(x, tuple):\n index = self.tuple_to_index[x]\n elif isinstance(x, str):\n index = self.string_to_index[x]\n else:\n raise ValueError('x should be string or int; received {}'.format(\n x))\n return self.classes[index]", "def object_selector(self) -> Optional['_meta.v1.outputs.LabelSelector']:\n return pulumi.get(self, \"object_selector\")", "def object_selector(self) -> Optional['_meta.v1.outputs.LabelSelector']:\n return pulumi.get(self, \"object_selector\")", "def object_selector(self) -> Optional['_meta.v1.outputs.LabelSelector']:\n return pulumi.get(self, \"object_selector\")", "def object_selector(self) -> Optional['_meta.v1.outputs.LabelSelectorPatch']:\n return pulumi.get(self, \"object_selector\")", "def object_selector(self) -> Optional['_meta.v1.outputs.LabelSelectorPatch']:\n return pulumi.get(self, \"object_selector\")", "def object_selector(self) -> Optional['_meta.v1.outputs.LabelSelectorPatch']:\n return pulumi.get(self, \"object_selector\")", "def selector(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"selector\")", "def getn_selector(*args):\n return _ida_segment.getn_selector(*args)", "def choose_class(self, *args, **kwargs):", "def get_group_selector(*args):\n return _ida_segment.get_group_selector(*args)", "def cl(self, clname):\n out = ParameterDictionary()\n for k,p in self.iteritems():\n if \"class\" in p:\n if clname in p['class'].split(\"|\"):\n out[k] = p\n return out", "def component(self, index):\n return self.components[index]", "def needs_selector(self):\n return self.options[\"general\"][\"selector\"]", "def getClassName(self, value):\n\n className = None\n\n for target in self:\n #print \"target\", target\n #print \"value\", value\n #print \"self[target].isMember(value)\", self[target].isMember(value)\n labelIdentifier = self[target] \n if labelIdentifier.isMember(value):\n className = labelIdentifier.objectName\n\n return className", "def get_component(self, name):\n for cmpt in self.components:\n if cmpt['name'] == name:\n return cmpt", "def plotting_class(cls, obj):\n if isinstance(obj, AdjointLayout) or obj is AdjointLayout:\n obj = Layout\n if isinstance(obj, type):\n element_type = obj\n else:\n element_type = obj.type if isinstance(obj, HoloMap) else type(obj)\n if element_type is None:\n raise SkipRendering(f\"{type(obj).__name__} was empty, could not determine plotting class.\")\n try:\n plotclass = Store.registry[cls.backend][element_type]\n except KeyError:\n raise SkipRendering(f\"No plotting class for {element_type.__name__} found.\")\n return plotclass", "def find_selector(*args):\n return _ida_segment.find_selector(*args)", "def get_selector_str(page: awe.data.set.pages.Page, key: str):\n\n selector = page.labels.get_selector(key)\n s = '' if selector is None else f'({selector=})'\n return f'{key!r}{s}'", "def class_key(cls):\n return 5, 0, cls.__name__", "def set_selector(*args):\n return _ida_segment.set_selector(*args)", "def get_class_index(self, label):\n assert label in CLASSES\n return CLASSES.index(label)", "def selectorFor(callable, argIndex=-1):\n if argIndex == -1:\n for arg in callable.__metadata__()['arguments']:\n if arg['type'] == _C_SEL and 'sel_of_type' in arg:\n signature = arg['sel_of_type']\n break\n else:\n raise ValueError(\"No selector argument with type information\")\n\n else:\n try:\n signature = callable.__metadata__()['arguments'][argIndex]['sel_of_type']\n except (IndexError, KeyError):\n raise ValueError(\"Not a selector argument with type information\")\n\n def addSignature(function):\n return selector(function, signature=signature)\n\n return addSignature", "def __getitem__(self, index):\n return self.components[index]", "def extract_label(selector):\n return selector.split('=')[-1][:-1]", "def pick_class(classes, sort=False, **kwargs):\n def _label(c):\n try:\n return c.LABEL\n except AttributeError:\n return c.__name__\n\n if sort:\n classes = sorted(classes, key=lambda x: _label(x))\n choices = [_label(c) for c in classes]\n return pick_item(classes, choices, **kwargs)", "def key(cls, id):\n\n if isinstance(cls, type):\n cls = cls.__name__\n return str(cls) + '.' + str(id)", "def select_class_for_slot(class_name, slot_number):\n return True", "def find_component(component):\n if component == 'C1':\n return C1\n elif component == 'C2':\n return C2\n elif component == 'C3':\n return C3\n elif component == 'C4':\n return C4\n elif component == 'C5':\n return C5\n elif component == 'C6':\n return C6\n elif component == 'AS':\n return AS", "def determine_object_class(self, components_detected):\n for subimage, components in components_detected.items():\n\n for component in components:\n\n if component.class_id == 0:\n component.object_name = \"insl\" # Insulator\n\n elif component.class_id == 1:\n component.object_name = \"dump\" # Vibration dumper\n\n else:\n component.object_name = \"pillar\"", "def __getitem__(self, comp):\n \n return self.compartimentos[comp]", "def classifier(self, xvalue):\n # Initialize key variables\n probability = {}\n classes = self.classes()\n\n # Get probability of each class\n probability = self.probability(xvalue)\n\n # Reassign variables for readability\n prob_c0 = probability[classes[0]]\n prob_c1 = probability[classes[1]]\n\n # Evaluate probabilities\n if prob_c0 + prob_c1 == 0:\n selection = None\n else:\n if prob_c0 > prob_c1:\n selection = classes[0]\n elif prob_c0 < prob_c1:\n selection = classes[1]\n else:\n selection = None\n\n # Return\n return selection", "def _get_component_label(self):\n labels = df_parser(self.workflow.builder.df_path, workflow=self.workflow).labels\n if self.pdc_component_df_label not in labels:\n raise PluginFailedException('No %s label in Dockerfile, can\\'t get PDC component',\n self.pdc_component_df_label)\n return labels[self.pdc_component_df_label]", "def service_selector(self) -> ConfigNodePropertyString:\n return self._service_selector", "def label_selectors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"label_selectors\")", "def _classify(tree, x):\n # YOUR CODE HERE\n # begin answer\n feature_name=list(tree.keys())[0] #first element\n secondDict=tree[feature_name] \n key=x.loc[feature_name] #extract value from x\n for key_val in secondDict:\n feature_val=key_val[0]\n valueOfKey=secondDict[(feature_val, key>=feature_val)]\n if isinstance(valueOfKey,dict):\n label=_classify(valueOfKey,x)\n else:\n label=valueOfKey\n return label\n # end answer", "def __getitem__(cls, key):\n return cls(cls._nameToValue[key])", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def _get_classifier(self):\n return self.__classifier", "def get_class_label(index):\n if isinstance(index,str):\n index = int(index)\n # print(type(index))\n if index < len(class_label):\n return class_label[index]\n basic.outputlogMessage('class index: %d not found in the class list' % index)\n assert (False)\n return False", "def getclskey(cls, tmpcls, op, slot):\n return cls.getClsStagePri(tmpcls, op, slot)", "def choose_color_group(color_series):\n vc = color_series.value_counts()\n return {\n 'n_cg': len(vc), \n 'cg': vc.idxmax(), \n 'nnz_cg': len(vc.drop(0, errors='ignore'))\n }", "def get_subdocument_key(crawler=None, parser_id=None):\n for extractor in crawler['parsers']:\n if extractor.get(\"parser_id\") == parser_id:\n for selector in extractor.get('data_selectors', []):\n if selector.get('selector_attribute') == 'element':\n return selector.get(\"selector_id\")\n return", "def parameterize_class_name(cls: Type, idx: int, input_dicts: Mapping[Any, Any]):\n suffix = \"_\".join(f\"{k}_{v}\" for k, v in input_dicts.items())\n return f\"{cls.__name__}_{suffix}\"", "def class_or_id(selector):\n\n if selector[0] == '.':\n soup_selector = 'class'\n elif selector[0] == '#':\n soup_selector = 'id'\n else:\n soup_selector = ''\n\n return [soup_selector, selector[1:]]", "def XPGetWidgetClassFunc(inWidgetClass):\n pass", "def create_class_indices(self) -> None:\n\n categories = list(np.loadtxt(self.path_manager.categories_file(), delimiter=\",\", dtype=str))\n\n if self.include_noise_samples and not self.multi_label_classification:\n categories.append(\"noise\")\n\n self.class_to_idx = {}\n\n for idx, class_name in enumerate(sorted(categories)):\n self.class_to_idx[class_name] = idx", "def __getattr__(self, item):\n if item != 'driver':\n return getattr(self.driver, item)\n raise KeyError(item)", "def _selection ( self, nick ) :\n \n if not self.__selections_.has_key ( self.name() ) :\n self.__selections_[ self.name() ] = {} \n \n return self.__selections_[ self.name() ].get( nick , None )", "def category(self):\r\n return lambda cls : self.__named(cls, CategoryContext)", "def sdcToClassifier_keyword(self, sdc):\n for srname, engine in self.engineMap.iteritems():\n if(srname in sdc[\"spatialRelation\"].text):\n return engine\n return None", "def get_class(self, key: Union[int, str]) -> Union[GDScriptClass, None]:\n if (isinstance(key, str)):\n if (key.startswith(\"res://\")):\n return self._classes_by_resource_path[key]\n else:\n return self._classes_by_name[key]\n elif isinstance(key, int):\n return self._classes_by_type_id[key]\n\n raise Exception(\"Key must be str or int\")", "def setNodeClassSelector(self, class_selecting_function: callable):\n self.node_class_selector = class_selecting_function", "def _updateSelectedItem(self):\n plot = self.plot\n if plot is not None:\n selected = plot.selection().getSelectedItems()\n # Give priority to image over scatter\n for klass in (items.ImageBase, items.Scatter):\n for item in selected:\n if isinstance(item, klass):\n # Found a matching item, use it\n self.getHistogramWidget().setItem(item)\n return\n self.getHistogramWidget().setItem(None)", "def select_classifier(model, X, A, n_splits=5, loss_type='01', seed=None):\n if isinstance(model, (GridSearchCV, RandomizedSearchCV)):\n selected_model = _select_classifier_from_sk_search(model, X, A)\n elif isinstance(model, list):\n selected_model = _select_classifier_from_list(candidates=model, X=X, A=A, n_splits=n_splits, seed=seed,\n loss_type=loss_type)\n elif isinstance(model, dict):\n selected_model = _select_classifier_from_grid(X=X, A=A, n_splits=n_splits, seed=seed, **model,\n loss_type=loss_type)\n else: # A regular classifier was passed\n selected_model = model\n return selected_model", "def get_by_label(self, label):\n # label = label.replace(\"-\", \"\") FLB: problem with - in variable\n # Check for name in all categories in self\n for category in categories:\n method = getattr(self, category)\n for entity in method():\n if label in entity.label:\n return entity\n # Check for special names\n d = {\n 'Nothing': Nothing,\n }\n if label in d:\n return d[label]\n # Check whether `label` matches a Python class name of any category\n # l = [cls for cls in itertools.chain.from_iterable(\n # getattr(self, category)() for category in categories)\n # if hasattr(cls, '__name__') and cls.__name__ == label]\n # if len(l) == 1:\n # return l[0]\n # elif len(l) > 1:\n # raise NoSuchLabelError('There is more than one Python class with '\n # 'name %r'%label)\n # # Check imported ontologies\n # for onto in self.imported_ontologies:\n # onto.__class__ = self.__class__ # magically change type of onto\n # try:\n # return onto.get_by_label(label)\n # except NoSuchLabelError:\n # pass" ]
[ "0.54593", "0.53855276", "0.5289531", "0.52059805", "0.51838773", "0.5170124", "0.51380694", "0.51380694", "0.51380694", "0.51380694", "0.51380694", "0.51380694", "0.51380694", "0.51380694", "0.51380694", "0.51380694", "0.51380694", "0.51380694", "0.51380694", "0.51380694", "0.51380694", "0.51380694", "0.51380694", "0.51380694", "0.51380694", "0.51380694", "0.5136642", "0.51234514", "0.51014394", "0.50776386", "0.50721675", "0.50712216", "0.50676185", "0.5046028", "0.5046028", "0.5039571", "0.5029729", "0.5008384", "0.4984248", "0.4984248", "0.4984248", "0.4974738", "0.4974738", "0.4974738", "0.49688056", "0.4935476", "0.49108428", "0.4888566", "0.4859514", "0.4858431", "0.48218143", "0.48209092", "0.48179424", "0.48108062", "0.4803054", "0.47711197", "0.4756267", "0.4731256", "0.47299826", "0.4719632", "0.46883598", "0.4682042", "0.46710163", "0.46607003", "0.46474496", "0.46384653", "0.46333736", "0.4620729", "0.46141255", "0.46085876", "0.46021545", "0.45793298", "0.4568351", "0.45632908", "0.455388", "0.455388", "0.455388", "0.455388", "0.455388", "0.455388", "0.455388", "0.455388", "0.455388", "0.45415002", "0.45315185", "0.45247716", "0.45079824", "0.4505298", "0.44885045", "0.44864428", "0.44802186", "0.44663993", "0.446295", "0.44544408", "0.44495136", "0.4445843", "0.44457904", "0.44434077", "0.44382018", "0.44378316" ]
0.4618312
68
Get the state of the Plot for a given frame number.
def __getitem__(self, frame): if not self.dynamic == 'open' and isinstance(frame, int) and frame > len(self): self.warning("Showing last frame available: %d" % len(self)) if not self.drawn: self.handles['fig'] = self.initialize_plot() if not self.dynamic == 'open' and not isinstance(frame, tuple): frame = self.keys[frame] self.update_frame(frame) return self.state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getFrame(self, num):\n\n return self.data[num]", "def get_plot_state(self_or_cls, obj, renderer=None, **kwargs):\n if not isinstance(obj, Plot):\n obj = self_or_cls.get_plot(obj=obj, renderer=renderer, **kwargs)\n return obj.state", "def get_frame(self, frame):\n return self.frames[frame]", "def get_frame(self, index):\n filename = self.get_filename(index)\n return plt.imread(fname=filename)", "def get_state(self):\r\n return self.currentObservation", "def _get_frame(frame_index, plots):\n\n # TODO Using the indices of the self.frames, plot in correct location.\n # Okay right now there is a problem where it's unknown whether the set of coordinates\n # is a line or a dot -- that info got lost up there\n\n for amb_index in range(len(self.frames[frame_index])):\n xs = self.frames[frame_index][amb_index][0]\n ys = self.frames[frame_index][amb_index][1]\n\n # if len(xs) > 1:\n # if xs[0] == xs[1]:\n # plots[amb_index][1].set_data([xs[0]], [ys[0]])\n # if xs[-2] == xs[-1]:\n # plots[amb_index][1].set_data([xs[-1]], [ys[-1]])\n\n plots[amb_index][0].set_data(xs, ys)\n\n print(plots[len(self.ambulance_locations)])\n\n return plots,", "def get_state(self) -> np.array:\n return self.rstate.render_frame(self.rsimulator, self.grayscale)", "def get_state(self):\n return self.get_pose()", "def get_trace_state(self):\n return self.__sensor_states[4]", "def getFrame(self, num):\n\n return self.data[:, :, num]", "def get_frame(self, f):\n return self._frames[f, :]", "def get_frame(self, frame: int) -> BaseImage:\n return self.sequence[frame]", "def get_state(self,settime=False):\n\t\tif (settime):\n\t\t\tself.t=0\n\t\t\tself._history.clear()\n\t\t\tself._history = {'time':[self.t],'state': np.array(self.x)}\n\t\treturn self.x", "def get_frame(self):\n return self.frames.get()", "def _get_state(self):\n # gst's get_state function returns a 3-tuple; we just want the\n # status flag in position 1.\n return self.pipeline.get_state(Gst.CLOCK_TIME_NONE)[1]", "def get_frame(self, frame_number=None):\n try:\n import cv2\n except (ImportError, ModuleNotFoundError):\n logger.error(\n 'Import Error! Cant import cv2. Annotations operations will be limited. import manually and fix errors')\n raise\n if self.vid.isOpened():\n if self.frame_number is None:\n self.frame_number = self.vid.get(cv2.CAP_PROP_POS_FRAMES)\n else:\n self.frame_number += 1\n if frame_number is not None:\n self.frame_number = frame_number\n self.vid.set(cv2.CAP_PROP_POS_FRAMES, frame_number)\n ret, frame = self.vid.read()\n\n if ret:\n # Return a boolean success flag and the current frame converted to BGR\n return ret, cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n else:\n return ret, None\n else:\n return False, None", "def get_frame(self):\n return self.get_frame_at_index(self.current_frame)", "def get_frame(self, ind):\n pass", "def get_current_state(self):\n return self.nextYs[-1]", "def getObservation(self):\n return self._cur_state", "def getframe(self, num):\n if num < 0 or num > self.nframes:\n raise Exception(\"Requested frame number is out of range\")\n # Do a deep copy of the header to make a new one\n newheader = {}\n for k in self.header.keys():\n newheader[k] = self.header[k]\n frame = pixiimage(header=newheader)\n frame.nframes = self.nframes\n frame.sequencefilename = self.sequencefilename\n infile = frame._open(self.sequencefilename, \"rb\")\n frame._readframe(infile, num)\n infile.close()\n return frame", "def get_state(self) -> FrameState:\n assert self.__state is not None\n return self.__state", "def getframe(self, num):\n if num < 0 or num > self.nframes:\n raise RuntimeError(\"Requested frame number is out of range\")\n # Do a deep copy of the header to make a new one\n frame = hdf5image(header=self.header.copy())\n frame.header_keys = self.header_keys[:]\n for key in (\"dim1\", \"dim2\", \"nframes\", \"bytecode\", \"hdf5\", \"ds\"):\n frame.__setattr__(key, self.__getattribute__(key))\n frame.hdf5_location = copy.deepcopy(self.hdf5_location)\n frame.hdf5_location.set_index(num)\n if self.hdf5_location.slice:\n self.data = self.ds[tuple(self.hdf5_location.slice)]\n self.nframes = self.ds.shape[self.hdf5_location.last_index]\n else:\n self.data = self.ds[:]\n return frame", "def _state_index(state):\n delta_y, delta_x, bird_lmh, pipe_lmh, is_flapping = state\n actions, height, width, _, _, _ = Q.shape\n\n y = int((height / 2) + (delta_y / step_r) - 1)\n x = int((width / 2) + (delta_x / step_c) - 1)\n\n return y, x, bird_lmh, pipe_lmh, is_flapping", "def get_state(self):\n return self.env.sim.get_state()", "def get_state(self):\n return PLANET_STATES[self.state][0]", "def get_state(self):\n return PLANET_STATES[self.state][0]", "def get_frame(self, frame_num=1, display=False):\n #pdb.set_trace()\n frame_size=(512,512)\n frame_data=win32com.client.VARIANT(pythoncom.VT_BYREF | pythoncom.VT_ARRAY | pythoncom.VT_UI2, numpy.empty(frame_size))\n\n #frame_data2=win32com.client.VARIANT(pythoncom.VT_BYREF | pythoncom.VT_ARRAY | pythoncom.VT_UI2, numpy.empty(frame_size))\n\n frame_data=self.appdoc.GetFrame(1,frame_data)\n #frame_data2=self.appdoc.GetFrame(2,frame_data2)\n #pdb.set_trace()\n if hasattr(self,\"display\"):\n display = (display or self.display)\n if display:\n plt.imshow(frame_data, cmap='gray')\n plt.show()\n return numpy.array(frame_data, dtype=numpy.uint16)", "def state(self):\n return self.probe.get_data(self.variable)", "def state(self):\n return self.probe.get_data(self.variable)", "def get_frame(self):\n return self.frame", "def get_value(self, point: Point) -> FieldState:\n return self.arr[point.y][point.x]", "def _get_returned_state(self):\n if self.img_type == 'RGB':\n return np.rollaxis(np.array(self.rgb), -1, 0)\n elif self.img_type == 'SEMANTIC':\n return np.rollaxis(np.array(self.semantic), -1, 0)\n else:\n raise Exception(f'There is no {self.img_type} observation type')", "def frame_index(self):\n return self._findex", "def getPlotFocus():\n return simuConfig[\"PLOT.FOCUS\"]", "def _get_current_plot_item(self):\n return self.io_model.img_dict_keys[self.data_opt - 1]", "def get_frame(self, i: int):\r\n try:\r\n return self.frames[i]\r\n except IndexError:\r\n return None", "def _get_obs(self):\n return self.observation_function[self.cur_state]", "def get_state(self, index):\n if self._count == 0:\n raise IndexError('Empty Memory')\n\n index %= self._count\n history_length = self._history_length\n\n # If index > history_length, take from a slice\n if index >= history_length:\n return self._states[(index - (history_length - 1)):index + 1, ...]\n else:\n indexes = np.arange(index - history_length + 1, index + 1)\n return self._states.take(indexes, mode='wrap', axis=0)", "def getstate(self):\n self._oldpacket = self.state.packet_number\n self._oldsticks = self.sticks.copy()\n self._oldtriggers = self.triggers.copy()\n self._oldbuttons = self.buttons\n res = xinput.XInputGetState(0, ctypes.byref(self.state))\n if res == ERROR_SUCCESS:\n if self.state.packet_number > self._oldpacket:\n #print(hex(self.state.gamepad.buttons))\n self.sticks = np.array([self.state.gamepad.l_thumb_x, self.state.gamepad.l_thumb_y,\n self.state.gamepad.r_thumb_x, self.state.gamepad.r_thumb_y])\n self.triggers = np.array([self.state.gamepad.left_trigger, \n self.state.gamepad.right_trigger])\n self.buttons = self.state.gamepad.buttons\n self.sticks = applydeadband(self.sticks,self.stickdeadzone)\n self.triggers = applydeadband(self.triggers, self.triggerdeadzone)\n self.packet = self.state.packet_number\n return\n if res == ERROR_DEVICE_NOT_CONNECTED:\n raise RuntimeError(\n \"Error %d device not connected\" % (res))", "def get_state_num(self):\n robot_state = self.get_state('turtlebot3_waffle_pi','world')\n ball_state = self.get_state('soccer_ball','world')\n # each object is in a \"box\" that is RESOLUTION meters wide.\n robot_xbox = np.ceil((robot_state.pose.position.x-Learn.FIELD_XLEFT)/Learn.RESOLUTION)\n robot_ybox = np.ceil(robot_state.pose.position.y/Learn.RESOLUTION)\n ball_xbox = np.ceil((ball_state.pose.position.x-Learn.FIELD_XLEFT)/Learn.RESOLUTION)\n ball_ybox = np.ceil(ball_state.pose.position.y/Learn.RESOLUTION)\n # the state is the combination of dx and dy.\n dx = int(ball_xbox - robot_xbox)\n dy = int(ball_ybox - robot_ybox)\n # adjusting to remove negative values for states\n dx += Learn.BOXES_X-1\n dy += Learn.BOXES_Y-1\n # converting to unique number between 0 and NSTATES-1:\n return (2*Learn.BOXES_X-1)*dy+dx", "def state(self) -> int | None:\n\n if self.coordinator.data:\n data = self.coordinator.data.get(self._cam_name, {}).get(\n f\"{self._fps_type}_fps\"\n )\n if data is not None:\n try:\n return round(float(data))\n except ValueError:\n pass\n return None", "def _get_frame_index(self, frame):\n if isinstance(frame, cf.CoordinateFrame):\n frame = frame.name\n #frame_names = [getattr(item[0], \"name\", item[0]) for item in self._pipeline]\n frame_names = [step.frame if isinstance(step.frame, str) else step.frame.name for step in self._pipeline]\n return frame_names.index(frame)", "def state(self):\n return self._data.yinli", "def get_state(self):\n raise NotImplementedError", "def get_state(self):\n raise NotImplementedError", "def frame_idx(self) -> int:\n pass", "def get_state(self):\n\n return self.t, self.x", "def get_state(self, frames=3):\n if frames<2:\n raise ValueError('Needs at least 2 frames to determine velocity')\n self.flush_buffer()\n start_t = time.time()\n \n # time this to make sure we aren't blocking on get_pos for too long\n puck_history = []\n time_history = []\n p_pos, p_vel, p_pos_test = [0,0], [0,0], [0,0]\n s1_pos, s2_pos = [0,0], [0,0]\n for i in range(frames):\n _, frame = self.cam.read()\n t = time.time()-start_t\n p = self.get_pos(frame)\n \n if p[0] is not None:\n puck_history.append(p[0])\n time_history.append(t)\n # choose last nonzero striker locations\n if p[0] is not None:\n p_pos_test = p[0]\n if p[1] is not None:\n s1_pos = p[1]\n if p[2] is not None:\n s2_pos = p[2]\n \n # estimate puck position at current time\n if len(puck_history)==0:\n pass\n elif len(puck_history)==1:\n p_pos = puck_history[0]\n else:\n # do linear regression\n a = np.array([[t,1] for t in time_history])\n b = np.array(puck_history)\n m = np.linalg.lstsq(a, b, rcond=None)[0]\n \n t = np.array([[time.time()-start_t, 1]])\n p_pos = np.dot(t,m)[0]\n p_vel = m[:,0]\n \n return np.array([p_pos, p_vel, s1_pos, s2_pos])", "def state(self) -> int | None:\n if self.coordinator.data:\n data = self.coordinator.data.get(\"detection_fps\")\n if data is not None:\n try:\n return round(float(data))\n except ValueError:\n pass\n return None", "def _get_frame(self, key):\n pass", "def get_state(self):\n return self.fmu._get_continuous_states()", "def current_frame(self) -> str:\n return self.frames[self.frame]", "def current_frame(self):\n return self._current_frame", "def get_state(self):\n return STAR_STATES[self.state][0]", "def get_state(self):\n return STAR_STATES[self.state][0]", "def get_state(self):\n curr_state = self.lidar_ranges[self.indices]\n # curr_state = (self.lidar_ranges[self.indices] - 5.) / 5.\n # print(curr_state)\n\n return curr_state", "def get_frame(self):\n return self.last_frame", "def get_frame_at_index(self, index):\n begin = index\n end = begin + self.frame_size\n\n if begin < 0:\n return self.data\n\n return self.data[begin:end]", "def current_state(self):\n return self.obs_hook(self._current_obs)", "def __call__(self, tab):\n return self.frame_dict.get(tab)", "def get_rgb_frame(self) -> np.array:\n return self.rstate.render_frame_rgb(self.rsimulator)", "def read(self, frame_number=None):\n if frame_number is not None: # seek\n self.seek(frame_number)\n ret, frame = self._vr.read() # read\n if ret and self._crop is not None:\n frame = frame[self._crop[1]:self._crop[3], self._crop[0]:self._crop[2], :]\n return ret, frame", "def state(self, idx=None):\n return self.options[self.idx] if idx is None else self.options[idx]", "def get_state(self, _settings = None):\n settings = _settings or {}\n settings['window_type'] = 'ImageWindow'\n settings['actionPlotSettings'] = self.actionPlotSettings.isChecked()\n # Disabled QLineEdits are confusing to QSettings. Store a dummy _\n settings['x_label'] = \"_\" + self.settingsWidget.ui.x_label.text()\n settings['y_label'] = \"_\" + self.settingsWidget.ui.y_label.text()\n settings['x_label_auto'] = self.settingsWidget.ui.x_label_auto.isChecked()\n settings['y_label_auto'] = self.settingsWidget.ui.y_label_auto.isChecked()\n settings['colormap_min'] = str(self.settingsWidget.ui.colormap_min.text())\n settings['colormap_max'] = str(self.settingsWidget.ui.colormap_max.text())\n settings['transpose'] = self.settingsWidget.ui.transpose.currentText()\n settings['flipx'] = self.settingsWidget.ui.flipx.currentText()\n settings['flipy'] = self.settingsWidget.ui.flipy.currentText()\n settings['viewbox'] = self.plot.getView().getViewBox().getState()\n settings['x_view'] = self.actionX_axis.isChecked()\n settings['y_view'] = self.actionY_axis.isChecked()\n settings['histogram_view'] = self.actionHistogram.isChecked()\n settings['crosshair'] = self.actionCrosshair.isChecked()\n settings['circular_roi'] = self.actionCircularROI.isChecked()\n settings['gradient_mode'] = self.plot.getHistogramWidget().item.gradient.saveState()\n \n return DataWindow.get_state(self, settings)", "def GetState(self):\r\n \r\n return self.state", "def get_camera_state(self, parameter):\n return self.opt.getParameter(parameter)", "def get_frame(self, frame_ord):\n\n cframe = self.active_frame\n c = 0\n while cframe is not None:\n if c == frame_ord:\n return cframe\n cframe = cframe.f_back\n c += 1\n return None", "def get_current_pic(self):\n # Change the picture\n if clock() > self.next_frame:\n if self.exploding:\n if self.frame_num < self.num_of_explosion_frames:\n self.frame_num += 1\n else:\n self.away = True\n else:\n # Chose animation direction by the asteroid's horizontal direction\n self.frame_num += int(copysign(1, self.speed[0]+self.acceleration[0]))\n if self.frame_num >= self.num_of_images:\n self.frame_num = 0\n elif self.frame_num < 0:\n self.frame_num = self.num_of_images - 1\n self.next_frame = clock() + self.frame_time\n return self.current_image_set[self.frame_num]", "def getFrame(self):\n s, image = self.capture.read()\n return image", "def read(self):\r\n frame = self.last_frame\r\n return frame", "def run_animation(self):\n\n def _get_frame(frame_index, plots):\n \"\"\" Should be called by run_animations only. \"\"\"\n\n # TODO Using the indices of the self.frames, plot in correct location.\n # Okay right now there is a problem where it's unknown whether the set of coordinates\n # is a line or a dot -- that info got lost up there\n\n for amb_index in range(len(self.frames[frame_index])):\n xs = self.frames[frame_index][amb_index][0]\n ys = self.frames[frame_index][amb_index][1]\n\n # if len(xs) > 1:\n # if xs[0] == xs[1]:\n # plots[amb_index][1].set_data([xs[0]], [ys[0]])\n # if xs[-2] == xs[-1]:\n # plots[amb_index][1].set_data([xs[-1]], [ys[-1]])\n\n plots[amb_index][0].set_data(xs, ys)\n\n print(plots[len(self.ambulance_locations)])\n\n return plots,\n\n fig = plt.figure(figsize=(14, 8))\n\n # TODO need [number of ambulances] x [number of states]\n\n plots = []\n for i in range(len(self.ambulance_locations)):\n new_color = self.ambulance_colors[i]\n\n line_plot, = plt.plot([], [],\n marker='+',\n linestyle='',\n markerfacecolor=new_color,\n markeredgecolor=new_color,\n label=\"Ambulance {} Path\".format(i + 1))\n\n # dot_plot, = plt.plot([], [],\n # marker='o',\n # linestyle='',\n # markerfacecolor=new_color,\n # markeredgecolor=new_color)\n\n # plots.append([line_plot, dot_plot])\n\n plots.append([line_plot])\n\n base_plot = plt.scatter([base.longitude for base in self.bases],\n [base.latitude for base in self.bases],\n marker=\"D\", color=\"black\", label=\"Bases\")\n hospital_plot = plt.scatter([hospital.longitude for hospital in self.hospitals],\n [hospital.latitude for hospital in self.hospitals],\n marker=\"P\", color=\"r\", label=\"Hospitals\")\n\n plots.append(base_plot)\n plots.append(hospital_plot)\n\n # TODO Make boundaries parameters\n\n img = plt.imread(\"./visuals/simple.png\")\n plt.imshow(img, extent=[-117.017637, -117.167672, 32.710484, 32.823033])\n plt.legend(loc=\"upper right\")\n print(\"draw the animation\")\n ani = animation.FuncAnimation(fig, _get_frame, len(self.frames),\n fargs=(plots,), interval=50)\n\n plt.show()\n\n # fps = 15\n # print('save the animation')\n # print(\"it may take up to {}\".format(len(self.frames)/fps))\n # ani.save('regional_vis6.mp4', fps=fps, dpi=150)", "def state(self):\n if self.coordinator.data:\n return self.coordinator.data[self._sensor]", "def plot_state(\n self, highlightRobot=None, plotRobotIDs=True,\n returnax=True, figname=\"kaijuGrid.pdf\"\n ):\n from kaiju.utils import plotOne\n if returnax:\n ax = plotOne(\n step=0, robotGrid=self, isSequence=False,\n plotRobotIDs=plotRobotIDs,\n highlightRobot=highlightRobot, returnax=True\n )\n return ax\n else:\n plotOne(\n step=0, robotGrid=self, figname=figname, isSequence=False,\n plotRobotIDs=plotRobotIDs,\n highlightRobot=highlightRobot\n )", "def getstate(self):\r\n return GPBase.getstate(self) + [self.Z,\r\n self.num_inducing,\r\n self.has_uncertain_inputs,\r\n self.X_variance]", "def frame(self):\n return self._frame", "def frame(self):\n return self._frame", "def frame(self):\n return self._frame", "def read_frame(self):\n return self.decode_frame(self.grab_frame())", "def get_state(self):\n pass", "def get_state(self):\n return np.append(self.game.game_board.get_board(),\n [self.game.player_1.color, self.game.player_2.color])[None, :]", "def getImage(self):\n\n\t\t\t_frame = str(self._frame)\n\t\t\tself._counter += 1\n\t\t\tif self._counter >= 10/ANIMATION_SPEED:\n\t\t\t\tself._frame += 1\n\t\t\t\tself._counter = 0\n\n\t\t\tif self.images[self.getState()][self.getDirection()][str(self._frame)] == {}:\n\t\t\t\tself._frame = 0\n\n\t\t\treturn self.images[self.getState()][self.getDirection()][_frame]", "def get_state(self, state):\n return state", "def read(self):\n return self.frame", "def observation(state):\n return state[:4]", "def get_active_plot(self):\n for plot in list(self.plots.values()):\n canvas = plot.canvas()\n if canvas.hasFocus():\n return plot\n return self.default_plot", "def getframe(var,model,data):\n from PyAnUtils.plotstyles import njStyle\n pstyle = njStyle()\n pstyle.cd()\n\n frame = var.frame()\n data.plotOn(frame)\n model.plotOn(frame)\n\n return frame", "def getValue(self, state):\r\n return self.values[state]", "def getFrameByName(self, frameName):\n return self.data.frames[frameName]", "def get_plot(session_id, test_name):\n return Plot.get_plot(session_id, test_name)", "def state(self):\n print(\"getter of variable state called\")\n return self._state", "def get_state(self):\n return ONEUP_STATES[self.state][0]", "def get_state(self, observation):\n values = make_discrete(\n observation,\n self.__environment.observation_space,\n CartPoleEnvironment.intervals,\n )\n\n velocity_state = 0\n if observation[1] > 0:\n velocity_state = 1\n elif observation[1] < 0:\n velocity_state = 2\n\n angular_velocity_state = 0\n if observation[3] > 0:\n angular_velocity_state = 1\n elif observation[3] < 0:\n angular_velocity_state = 2\n\n return (\n values[0],\n velocity_state,\n values[2],\n angular_velocity_state\n )", "def frame_info( self ):\n return self.__frame_id, int(self.__frame_timestamp)", "def get_pixel(self, frame: int, x: int, y: int) -> Color:\n return self.get_frame(frame).clone()[x, y]", "def _get_state(self):", "def curframe(self):\n return self._stack[self._curframe_index][0]", "def getValue(self, state):\n return self.values[state]", "def store(self, state):\n if self.interactive:\n self._fig.clear()\n fig = self._fig\n else:\n fig = plt.figure()\n\n self._plot_function(fig, copy_state(state))\n\n fig.canvas.draw()\n if not self.interactive:\n plt.show()", "def get_state(self):\n return self.state" ]
[ "0.6576043", "0.64040893", "0.6168167", "0.61134017", "0.6076815", "0.6055128", "0.5951637", "0.5882845", "0.58641803", "0.5848422", "0.5799237", "0.5756058", "0.5718137", "0.5702686", "0.56998396", "0.56985414", "0.5595627", "0.5593545", "0.5543105", "0.5521952", "0.5484779", "0.5452552", "0.54350305", "0.54116875", "0.5400628", "0.53795344", "0.53795344", "0.53495896", "0.53447247", "0.53447247", "0.5330734", "0.5323877", "0.5321935", "0.5300624", "0.5286061", "0.5275684", "0.52725905", "0.5266434", "0.52539676", "0.52393544", "0.5222778", "0.5222343", "0.5202655", "0.51900214", "0.5161833", "0.5161833", "0.5152771", "0.5151661", "0.514167", "0.5138371", "0.5134891", "0.5131366", "0.5124206", "0.51214033", "0.5120739", "0.5120739", "0.51158875", "0.5114924", "0.5103424", "0.51003104", "0.5083472", "0.50812405", "0.5080831", "0.5079257", "0.5077222", "0.5059588", "0.505588", "0.50533956", "0.50521743", "0.5044515", "0.5042905", "0.5042371", "0.5040233", "0.50328434", "0.50316435", "0.5030215", "0.5030215", "0.5030215", "0.50224286", "0.5013078", "0.5008031", "0.50023866", "0.49807137", "0.49806842", "0.49796557", "0.4975364", "0.4967963", "0.49673888", "0.49672985", "0.49625754", "0.49583057", "0.49566832", "0.49502468", "0.49479824", "0.49465933", "0.49425745", "0.4936872", "0.49288136", "0.49268696", "0.49101588" ]
0.6989357
0
Required on each MPLPlot type to get the data corresponding just to the current frame out from the object.
def _get_frame(self, key): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data(self):\n return [self.axes]", "def plot_data(self):", "def _get_raw_data(self, idx=0):\n # Get the time step then make a data frame\n raise NotImplementedError('Code me up!')\n #data = self._data[???]\n return data", "def __data_frame(self):\n return self._trajectory", "def get_data(self):\n for i, plot in enumerate(self.plots):\n for j, trace in enumerate(plot):\n self.plots[i][j][\"y\"] = [\n self.function_mapper[x](i) if x in self.function_mapper else x\n for x in trace[\"y\"]\n ]\n return self.plots", "def get_data(self): # TODO: add smooth possibility\n return self.data", "def data(self):\r\n raise NotImplementedError", "def _get_current_plot_item(self):\n return self.io_model.img_dict_keys[self.data_opt - 1]", "def dataFrame(self):\n return self._dataFrame", "def data(self):", "def item(self):\n if self.data.shape != ():\n raise RuntimeError(\"Cannot call item on non-scalar type!\")\n return self.data", "def getCurentData(self):\n if not self.labExperiment:\n super().getCurentData()\n else:\n return np.array(self.connection.query('get_actuator_data'))", "def _get_data(self):\n raise NotImplementedError()", "def get_data(self):\n raise NotImplementedError(\"Not implemented!\")", "def data(self):\n return self._frame.copy(deep=True)", "def get_zyla_data_object(evt, src):\n o = evt.get(_psana.Zyla.FrameV1, src)\n if o is not None: return o\n\n return None", "def getSpectrumData(self):\n raise NotImplementedError('You must implemented getSpectrumData for your subclass.')", "def data(self):\n\t\treturn vars(self)", "def retrieve_plot_data(self):\n spec2nexus.specplot.LinePlotter.retrieve_plot_data(self)\n\n if self.signal in self.data:\n # can't plot negative Y on log scale\n # Alternative to raising NotPlottable would be\n # to remove any data where Y <= 0\n if min(self.data[self.signal]) <= 0:\n msg = \"cannot plot Y<0: \" + str(self.scan)\n raise spec2nexus.specplot.NotPlottable(msg)\n\n # in the uascan, a name for the sample is given in `self.scan.comments[0]`\n self.set_y_log(True)\n self.set_plot_subtitle(\n \"#%s uascan: %s\" % (str(self.scan.scanNum), self.scan.comments[0])\n )", "def get_data(self):\r\n pass", "def get_data(self):", "def dataframe(self):\n\t\treturn self._dataframe", "def get_frame_data(self):\n # FrameObject is a dictionary of slot names and values.\n frameObject = self.pgdb.sendPgdbFnCall('get-frame-object', self.frameid)\n if not frameObject:\n raise PythonCycError(\"Could not retrieve frame \"+self.frameid+\" from organism (orgid) \"+self.pgdb._orgid)\n else:\n self._gotframe = True\n # Modify slot names to allow Python's syntax (e.g., '_' instead of '-').\n for slot in frameObject:\n self.__dict__[convertLispIdtoPythonId(slot)] = frameObject[slot]\n return self", "def GetDataAsObject(self):", "def get_data(self):\n return [\n self.offset, self.diag, self.orig_freq_diag, self.orig_freq_dims\n ]", "def GetPyData(self, item):\r\n\r\n return item.GetData()", "def getData(self, slice=None):\n\t\traise NotImplementedError", "def get(self):\r\n return self.data_array", "def get_data(self):\n pass", "def get_data(self):\n pass", "def get_plot_data():\n if not hasattr(g, 'plot_data'):\n g.plot_data = plot.bar_plot(get_data().data_frame)\n return g.plot_data", "def data(self):\n pass", "def data(self):\n pass", "def get_model_data_for_trace(self, trace):\n pass", "def data_object(self) -> any:\r\n\r\n return self.__data_object", "def _process_plot_data(self, pattern, data, timestamps):\n raise NotImplementedError(\"Implement _process_plot_data(self, pattern, data) in subclass\")", "def __init__(self, the_PyBERT):\n\n plotdata = the_PyBERT.plotdata\n\n the_data = ArrayPlotData()\n\n for item_name in self._item_names:\n the_data.set_data(item_name, plotdata.get_data(item_name))\n\n self.the_data = the_data", "def handle_data(self, point: Tuple[float, float], frame) -> None:", "def obs_data(self):\n\n return self._obs_data", "def get_uxi_data_object(evt, src):\n o = evt.get(_psana.Uxi.FrameV1, src)\n if o is not None: return o\n\n return None", "def data_frame_info(self):\r\n print(self.dataframe_name)\r\n print(self.data_frame.info())", "def plot_data(self):\n if hasattr(self,'data'):\n plt.scatter(*self.data.T)\n plt.show()\n else:\n raise Exception('No 2d data of the instance has been loaded')", "def data(self):\n return self._data", "def getdatap(self, max_axes):\n\n\t\tself.rho_p = self.rho_r[self.rho_r < max_axes[0]]\n\t\tself.T_p = self.T_r[self.T_r < max_axes[1]]\n\t\tself.len_p = np.array([len(self.rho_p), len(self.T_p)])\n\n\t\tself.data_p = {}\t#Start with a fresh copy of the data\n\n\t\tfor item in self.data.keys():\n\t\t\tself.data_p[item] = self.make2Darray(self.rho_p, self.T_p, self.data[item], self.len_r[0])", "def data(self):\n return self.d", "def __getitem__(self, index):\n return self._timeseriesData[index]", "def get_xy_values(plot):\n return Plot.get_xy_values(plot)", "def __call__(self,data_labels = None):\n\t\tdata = None\n\t\tcurrentTime = time.time()\n\t\tif abs(currentTime - self.prevReadTime) >= (1/self.updateFreq):\n\t\t\tself.data = fxReadDevice(self.devId,self.varsToStream)\n\t\tself.prevReadTime = currentTime\n\t\tif data_labels != None:\n\t\t\tdata_index =[]\n\t\t\tfor label in data_labels:\n\t\t\t\tdata_index.append(self.varsToStream.index(label))\n\t\t\tdata = [self.data[index] for index in data_index]\t\n\t\telse:\n\t\t\tdata = self.data\n\n\t\treturn data", "def get_data(self):\n if self.downsample:\n data = self.daily_rolling_panel.get_current()\n else:\n data = self.rolling_panel.get_current()\n\n if self.supplemental_data is not None:\n for item in data.items:\n if item not in self.supplemental_data.items:\n continue\n for dt in data.major_axis:\n try:\n supplemental_for_dt = self.supplemental_data.ix[\n item, dt, :]\n except KeyError:\n # Only filling in data available in supplemental data.\n supplemental_for_dt = None\n\n if supplemental_for_dt is not None:\n data[item].ix[dt] = \\\n supplemental_for_dt.combine_first(\n data[item].ix[dt])\n\n # screen out sids no longer in the multiverse\n data = data.ix[:, :, self.latest_sids]\n if self.clean_nans:\n # Fills in gaps of missing data during transform\n # of multiple stocks. E.g. we may be missing\n # minute data because of illiquidity of one stock\n data = data.fillna(method='ffill')\n\n # Hold on to a reference to the data,\n # so that it's easier to find the current data when stepping\n # through with a debugger\n self._curr_data = data\n\n return data", "def get_data(self):\n return self.data", "def get_data(self):\n return self.data", "def get_data(self):\n return self.data", "def get_data(name, xarray=False, metadata=False):\n\n if name not in pytplot.data_quants.keys():\n print(\"That name is currently not in pytplot\")\n return\n \n temp_data_quant = pytplot.data_quants[name]\n\n if isinstance(temp_data_quant, dict):\n # non-record varying variables are stored as dicts\n return temp_data_quant['data']\n\n if xarray:\n return temp_data_quant\n\n if metadata:\n return temp_data_quant.attrs\n\n if 'v1' in temp_data_quant.coords.keys() and 'v2' in temp_data_quant.coords.keys() and 'v3' in temp_data_quant.coords.keys():\n variable = namedtuple('variable', ['times', 'y', 'v1', 'v2', 'v3'])\n return variable(temp_data_quant.time.values, temp_data_quant.data, temp_data_quant.coords['v1'].values, temp_data_quant.coords['v2'].values, temp_data_quant.coords['v3'].values)\n elif 'v1' in temp_data_quant.coords.keys() and 'v2' in temp_data_quant.coords.keys():\n variable = namedtuple('variable', ['times', 'y', 'v1', 'v2'])\n return variable(temp_data_quant.time.values, temp_data_quant.data, temp_data_quant.coords['v1'].values, temp_data_quant.coords['v2'].values)\n elif 'v1' in temp_data_quant.coords.keys():\n variable = namedtuple('variable', ['times', 'y', 'v1'])\n return variable(temp_data_quant.time.values, temp_data_quant.data, temp_data_quant.coords['v1'].values)\n elif 'v' in temp_data_quant.coords.keys():\n variable = namedtuple('variable', ['times', 'y', 'v'])\n return variable(temp_data_quant.time.values, temp_data_quant.data, temp_data_quant.coords['v'].values)\n elif 'spec_bins' in temp_data_quant.coords.keys():\n variable = namedtuple('variable', ['times', 'y', 'v'])\n return variable(temp_data_quant.time.values, temp_data_quant.data, temp_data_quant.coords['spec_bins'].values)\n variable = namedtuple('variable', ['times', 'y'])\n\n return variable(temp_data_quant.time.values, temp_data_quant.data)", "def __getitem__(self, idx):\n x = self.all_transforms(self.data[idx])\n y = self.labels[idx]\n return x, y", "def __init__(self, dataFrame):\n self.dataFrame = dataFrame", "def _get_frame(frame_index, plots):\n\n # TODO Using the indices of the self.frames, plot in correct location.\n # Okay right now there is a problem where it's unknown whether the set of coordinates\n # is a line or a dot -- that info got lost up there\n\n for amb_index in range(len(self.frames[frame_index])):\n xs = self.frames[frame_index][amb_index][0]\n ys = self.frames[frame_index][amb_index][1]\n\n # if len(xs) > 1:\n # if xs[0] == xs[1]:\n # plots[amb_index][1].set_data([xs[0]], [ys[0]])\n # if xs[-2] == xs[-1]:\n # plots[amb_index][1].set_data([xs[-1]], [ys[-1]])\n\n plots[amb_index][0].set_data(xs, ys)\n\n print(plots[len(self.ambulance_locations)])\n\n return plots,", "def _get_y_data(self):\n return self.y(self.xs)", "def get_data(self):\n\n return super().get_data()", "def _get_data(self, event):\n if event.xdata is None:\n return None, None\n xdata = np.clip(event.xdata, *self.ax.get_xbound())\n ydata = np.clip(event.ydata, *self.ax.get_ybound())\n return xdata, ydata", "def getPlot(self):\n return self.axes", "def data(self):\n self._data: np.ndarray\n return self._data", "def GetNoDataValue(self):\n result = super(Band, self).GetNoDataValue()\n if result is not None:\n return self.NumPyDataType(result)", "def get_data(self):\n\n raise NotImplementedError('''\n Must Implement get_data. Call help() for details.\n ''')", "def get_data(self):\r\n return self.data.copy()", "def _get_observation(self, observation):", "def get_associated_plot_data_single(data, special, suffix=\"\"):\n special_data = []\n list_of_lbls = special.get('data', [])\n for lbl in list_of_lbls:\n special_data += [d for d in data\n if get_name(lbl) + suffix == d[0].replace('.dat', '')]\n return special_data", "def get(self):\n if not self.is_raw_data and self._interp_xs is not None:\n return self.get_interpolation()\n return self._ys, self._xs", "def __getitem__(self, idx):\n return self.perf_data[idx]", "def getAxisValuesEvent(self): \n varID = self.myParent.getVar().id\n axisVar = MV2.array(self.axis)\n axisVar.setAxis(0, self.axis)\n axisVar.id = varID +'_' + self.axis.id + '_axis'\n\n # Generate teaching command string\n fileID = 'fid2'\n teachingCommand = \"\\n## Getting axis %s\\n\" % self.axis.id\n teachingCommand += \"%s = MV2.array(%s[\\\"%s\\\"].getAxisList(axes = \\\"%s\\\")[0][:])\\n\" % (axisVar.id, fileID, varID, self.axis.id)\n teachingCommand += \"%s.setAxis(0, %s[\\\"%s\\\"].getAxisList(axes = \\\"%s\\\")[0])\\n\" % (axisVar.id, fileID, varID, self.axis.id)\n teachingCommand += \"%s.id = \\\"%s\\\"\\n\" % (axisVar.id, axisVar.id)\n\n # Record teaching commands associate 'get axis values' and\n # define a new variable/tab with only the axis' values \n self.myParent.defineVarAxis(axisVar, teachingCommand)", "def _get_data(self, position):\n index = self._indexes[position]\n basename = self._waves[index].with_suffix(\".npy\").name\n return tuple(np.load(self._path / x / basename) for x in self._variables)", "def get_data():\n pass", "def get_timepix_data_object(evt, src):\n o = evt.get(_psana.Timepix.DataV2, src)\n if o is not None: return o\n\n o = evt.get(_psana.Timepix.DataV1, src)\n if o is not None: return o\n\n return None", "def getDataDict(self):\n #code begins here \n return self.__dflst,self.__dfwells", "def data(self):\n return self.__dict__", "def on_new_data(self):\n\n if self.connected:\n tab_open = self.tab_open()\n\n # Update plot data\n for i, series in enumerate(self.measurements_list):\n if i == tab_open:\n self.plotted_data[i].setData(self.data_indices, self.measurements_list[i])", "def __repr__(self):\n return f\"{self.__class__.__name__}({self._axl_data})\"", "def get_plot_data(self, model: str) -> dict:\n if model not in self._plot_data:\n raise ModelNotAssociatedError(\"{m} does not have any plot data associated with it in this \"\n \"spectrum\".format(m=model))\n\n return self._plot_data[model]", "def get_representative_data_object(self, obj):\n if self.dim == 0:\n # In this way, obj can be a data object and this class can be\n # used even if the assignment is not between \"flattened components\"\n return obj\n else:\n nominal_index = self.nominal_index\n return obj[nominal_index]", "def tondarray(self):\r\n return self.data;", "def timer_plot_data_out(self, w):\n w.update_plot(self.getLaps())", "def data(self):\n return self", "async def plot_device_data(self, axes, name) -> []:\n pass", "def __getitem__(self, item) -> Union[MoleculeDatapoint, List[MoleculeDatapoint]]:\n if self.preload:\n return self.data_ram[item]\n else:\n return self.data[item]", "def get_data(self)->pd.DataFrame:\n pass", "def data(self):\n if not hasattr(self, \"_data\"):\n self._data = self._get_data()\n return self._data", "def get_data(self):\r\n return self.kinds", "def get_data(self):\n return self._data", "def get_data(self):\n return self._data", "def get_data(self):\n return self._data", "def get_data(self):\n return self._data", "def GetStructuredData(self): # real signature unknown; restored from __doc__\n pass", "def reinitialiseData(self):\n if self.arrayPlotData is not None:\n self.currentPosition = 0\n self.xs = scipy.linspace(0.0, self.numberOfPoints*self.resolution, self.numberOfPoints)\n self.cursorXS = self.getCurrentPositionArray()\n self.cursorVertical = scipy.array([self.verticalLimit,0.0])\n self.arrayPlotData.set_data(\"xs\",self.xs)\n self.array0 = scipy.zeros(self.numberOfPoints)\n self.array1 = scipy.zeros(self.numberOfPoints)\n self.array2 = scipy.zeros(self.numberOfPoints)\n self.array3 = scipy.zeros(self.numberOfPoints)\n self.array4 = scipy.zeros(self.numberOfPoints)\n self.array5 = scipy.zeros(self.numberOfPoints)\n self.array6 = scipy.zeros(self.numberOfPoints)\n self.array7 = scipy.zeros(self.numberOfPoints)\n self.channels = [self.array0,self.array1,self.array2,self.array3,\n self.array4,self.array5,self.array6,self.array7]\n self.updateArrayPlotData()", "def updateArrayPlotData(self):\n self.arrayPlotData.set_data(\"channel0\",self.array0)\n self.arrayPlotData.set_data(\"channel1\",self.array1)\n self.arrayPlotData.set_data(\"channel2\",self.array2)\n self.arrayPlotData.set_data(\"channel3\",self.array3)\n self.arrayPlotData.set_data(\"channel4\",self.array4)\n self.arrayPlotData.set_data(\"channel5\",self.array5)\n self.arrayPlotData.set_data(\"channel6\",self.array6)\n self.arrayPlotData.set_data(\"channel7\",self.array7)\n self.arrayPlotData.set_data(\"cursorXS\",self.cursorXS)\n #self.arrayPlotData.set_data(\"cursorVertical\",self.cursorVertical)", "def plot(self):\r\n print('method BaseDataLogger.plot() is not implemented, to be done in subclass ' + str(type(self)))", "def plot(self):\r\n print('method BaseDataLogger.plot() is not implemented, to be done in subclass ' + str(type(self)))", "def data(self):\n return self.__data", "def data(self):\n return self.__data", "def data(self):\n return self.__data", "def data(self):\n return self.__data", "def data(self):\n return self.__data", "def data(self):\n return self.__data" ]
[ "0.6572209", "0.6476473", "0.64533323", "0.6261248", "0.6070728", "0.59735954", "0.5929763", "0.5889976", "0.5844909", "0.5839488", "0.5793765", "0.5782455", "0.57820135", "0.5781594", "0.5757953", "0.5725428", "0.5717131", "0.5696034", "0.5693247", "0.56396365", "0.56344974", "0.56301105", "0.5626174", "0.5596926", "0.55880487", "0.5583333", "0.5580258", "0.55668044", "0.555283", "0.555283", "0.554648", "0.5510667", "0.5510667", "0.5497847", "0.54862803", "0.547408", "0.54683036", "0.5463907", "0.5454412", "0.5452823", "0.54506135", "0.545042", "0.5439142", "0.54368585", "0.54311746", "0.5429822", "0.5423068", "0.5419418", "0.54156977", "0.53993875", "0.53993875", "0.53993875", "0.5396721", "0.53955257", "0.5368821", "0.53650516", "0.53590745", "0.53529966", "0.5339298", "0.5337787", "0.53342104", "0.5328712", "0.5325177", "0.53129137", "0.5306934", "0.5306857", "0.5298913", "0.5294458", "0.52760243", "0.52689236", "0.52643234", "0.52611566", "0.52578807", "0.5244552", "0.52429444", "0.5239132", "0.52390295", "0.5237134", "0.5234613", "0.52337164", "0.52288437", "0.5215964", "0.52133596", "0.52103245", "0.5204244", "0.51985437", "0.5193401", "0.5193401", "0.5193401", "0.5193401", "0.51933587", "0.518594", "0.5184042", "0.5181438", "0.5181438", "0.51804066", "0.51804066", "0.51804066", "0.51804066", "0.51804066", "0.51804066" ]
0.0
-1
Matches a specification against the current Plot.
def matches(self, spec): if callable(spec) and not isinstance(spec, type): return spec(self) elif isinstance(spec, type): return isinstance(self, spec) else: raise ValueError("Matching specs have to be either a type or a callable.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_match():\n fig, ax = plt.subplots()\n ax.plot([0, 1], [0, 2])\n slide = _get_empty_slide()\n text = ax.set_title(\"TITLE TEXT\")\n shape = figpptx.send(fig, slide=slide, match=text)\n assert get_typename(shape) == \"Shape\"\n shapes = _get_shapes(slide, individual=True)\n assert len(shapes) == 2\n assert {shape.Type for shape in shapes} == {constants.msoPicture, constants.msoTextBox}", "def matches(spec, shape_):\n (c, s) = spec\n matches_color = c is None or (shape_.color == c)\n matches_shape = s is None or (shape_.name == s)\n return matches_color and matches_shape", "def spec_test(self, hdul):\n # check for mixed spectral product in\n # single spectral extension or multi-order spectra\n # (may be SPECTRAL_FLUX or SPECTRAL_FLUX_ORDER_*)\n for hdu in hdul:\n extname = str(hdu.header.get('EXTNAME', 'UNKNOWN')).lower()\n if 'spectral_flux' in extname:\n return 'spectrum'\n\n header = hdul[0].header\n if 'NAXIS1' in header:\n xdim = header['NAXIS1']\n else:\n xdim = 0\n if 'NAXIS2' in header:\n ydim = header['NAXIS2']\n else:\n ydim = 0\n if xdim > 0 and ydim < 6:\n return 'spectrum_only'\n else:\n return 'image'", "def test_plot_ess(models, kind, kwargs):\n idata = models.model_1\n ax = plot_ess(idata, kind=kind, **kwargs)\n assert np.all(ax)", "def update_spec_plot(self):\n if self.img is None:\n return\n\n p = self.params\n pos = np.array(self.roi.pos(), dtype=int)\n size = np.array(self.roi.size(), dtype=int)\n imin = np.clip(pos, 0, self.cube.shape[1])\n imax = np.clip(pos + size, 0, self.cube.shape[2])\n print('Extract mean spectrum for {}'.format(list(zip(imin, imax))))\n data = self.cube[:, imin[1]:imax[1], imin[0]:imax[0]]\n self.spec = spec = data.mean(axis=(1, 2))\n self.specplot.clearPlots()\n\n if p['Sky', 'Show'] and self.sky is not None:\n sp = self.sky.data.data * (2 * spec.data.max()) + spec.data.min()\n self.specplot.plot(sp, pen=p['Sky', 'Line Color'])\n\n self.specplot.plot(spec.data.data, pen=p['Spectrum', 'Line color'])\n\n if p['Median filter', 'Show']:\n sp = spec.median_filter(p['Median filter', 'Kernel Size'])\n self.specplot.plot(sp.data.data, pen={\n 'color': p['Median filter', 'Line Color'],\n 'width': p['Median filter', 'Line Size']\n })\n\n # self.specplot.autoRange()\n if self.zoomplot is not None:\n self.update_zoom_spec_from_region()", "def selSpec( self,i ):\n if i < numSpec():\n self.ispec=i\n else:\n sys.exit( \"Error: Requested spectrum not available.\" )", "def has_custom_param(plot):\n return Plot.has_custom_param(plot)", "def isfigure(line):\n return re.search(\"(includegraphics|plotone|plottwo)\", line)", "def is_plot(session_id, test_name):\n return Plot.is_plot(session_id, test_name)", "def matches(self, feature):\n pass", "def locate(self, cam, props):\n if its.caps.read_3a(props):\n s, e, _, _, fd = cam.do_3a(get_results=True)\n fmt = {'format': 'yuv', 'width': VGA_WIDTH, 'height': VGA_HEIGHT}\n chart, scene, s_factor = self._calc_scale_factors(cam, props, fmt,\n s, e, fd)\n else:\n print 'Chart locator skipped.'\n self._set_scale_factors_to_one()\n return\n scale_start = self._scale_start * s_factor\n scale_stop = self._scale_stop * s_factor\n scale_step = self._scale_step * s_factor\n self.scale = s_factor\n max_match = []\n # check for normalized image\n if numpy.amax(scene) <= 1.0:\n scene = (scene * 255.0).astype(numpy.uint8)\n scene_gray = gray_scale_img(scene)\n print 'Finding chart in scene...'\n for scale in numpy.arange(scale_start, scale_stop, scale_step):\n scene_scaled = scale_img(scene_gray, scale)\n result = cv2.matchTemplate(scene_scaled, chart, cv2.TM_CCOEFF)\n _, opt_val, _, top_left_scaled = cv2.minMaxLoc(result)\n # print out scale and match\n print ' scale factor: %.3f, opt val: %.f' % (scale, opt_val)\n max_match.append((opt_val, top_left_scaled))\n\n # determine if optimization results are valid\n opt_values = [x[0] for x in max_match]\n if 2.0*min(opt_values) > max(opt_values):\n estring = ('Warning: unable to find chart in scene!\\n'\n 'Check camera distance and self-reported '\n 'pixel pitch, focal length and hyperfocal distance.')\n print estring\n self._set_scale_factors_to_one()\n else:\n if (max(opt_values) == opt_values[0] or\n max(opt_values) == opt_values[len(opt_values)-1]):\n estring = ('Warning: chart is at extreme range of locator '\n 'check.\\n')\n print estring\n # find max and draw bbox\n match_index = max_match.index(max(max_match, key=lambda x: x[0]))\n self.scale = scale_start + scale_step * match_index\n print 'Optimum scale factor: %.3f' % self.scale\n top_left_scaled = max_match[match_index][1]\n h, w = chart.shape\n bottom_right_scaled = (top_left_scaled[0] + w,\n top_left_scaled[1] + h)\n top_left = (int(top_left_scaled[0]/self.scale),\n int(top_left_scaled[1]/self.scale))\n bottom_right = (int(bottom_right_scaled[0]/self.scale),\n int(bottom_right_scaled[1]/self.scale))\n self.wnorm = float((bottom_right[0]) - top_left[0]) / scene.shape[1]\n self.hnorm = float((bottom_right[1]) - top_left[1]) / scene.shape[0]\n self.xnorm = float(top_left[0]) / scene.shape[1]\n self.ynorm = float(top_left[1]) / scene.shape[0]", "def check_plot_command(self, line):\n err_msg = \"The plot command takes the syntax:\\n\\n\"\n err_msg += \"\\t'plot <plot type> from <data name> as <plot name>'\"\n err_msg += \"\\n\\n\\t\\t\\tOR\\n\\n\\t'plot <plot type> from <data name>'\"\n\n line, any_vars = self.find_vars_in_str(line)\n\n # Check syntax\n words = line.split()\n words = self.fix_words(words)\n self.E_str = \"check_plot_command\"\n has_out_var = False\n if len(words) != 4:\n if len(words) != 6:\n self.print_error(err_msg)\n else:\n has_out_var = True\n _, plot_type, _, in_data, _, out_data = words\n else:\n _, plot_type, _, in_data = words\n\n # Check we can plot the thing asked for\n if plot_type not in f_dicts.plot_fncs:\n err_msg = f\"I don't know how to plot '{words[1]}'.\"\n err_msg += \"\\n\\nFor a full list of plots that can be done see below:\\n\\t* \"\n err_msg += \"\\n\\t* \".join(list(f_dicts.plot_fncs.keys()))\n self.print_error(err_msg)\n\n if has_out_var:\n metadata = f_dicts.plot_fncs[words[1]].metadata\n self.set_var(out_data, \"^EMPTY^\", metadata)\n return out_data\n\n return None", "def match(self, req): # type: (MarkerRequirement) -> bool\n pass", "def match(self, dc):\n raise NotImplemented", "def evaluate(self, plot):", "def plot(self):\n\t\tself.plotOfSpect()", "def plotsurvey(filename='obslist_all.fits', plot_type='f', program='m'): \n\n t = Table.read(filename, format='fits')\n\n if plot_type == 'f':\n fig, ax = plt.subplots()\n ra = t['RA']\n ra[ra>300.0] -= 360.0\n dec = t['DEC']\n mjd = t['MJD']\n mjd_start = np.min(mjd)\n mjd -= mjd_start\n\n if program=='a':\n i1 = np.where( mjd/365.0 < 1.0 )\n i2 = np.where( (mjd/365.0 >= 1.0) & (mjd/365.0 < 2.0) )\n i3 = np.where( (mjd/365.0 >= 2.0) & (mjd/365.0 < 3.0) )\n i4 = np.where( (mjd/365.0 >= 3.0) & (mjd/365.0 < 4.0) )\n i5 = np.where( (mjd/365.0 >= 4.0) & (mjd/365.0 < 5.0) )\n elif program=='m':\n i1 = np.where( (mjd/365.0 < 1.0) & (t['PROGRAM']!='BRIGHT') )\n i2 = np.where( ((mjd/365.0 >= 1.0) & (mjd/365.0 < 2.0)) & (t['PROGRAM']!='BRIGHT') )\n i3 = np.where( ((mjd/365.0 >= 2.0) & (mjd/365.0 < 3.0)) & (t['PROGRAM']!='BRIGHT') )\n i4 = np.where( ((mjd/365.0 >= 3.0) & (mjd/365.0 < 4.0)) & (t['PROGRAM']!='BRIGHT') )\n i5 = np.where( ((mjd/365.0 >= 4.0) & (mjd/365.0 < 5.0)) & (t['PROGRAM']!='BRIGHT') )\n elif program=='b':\n i1 = np.where( (mjd/365.0 < 1.0) & (t['PROGRAM']=='BRIGHT') )\n i2 = np.where( ((mjd/365.0 >= 1.0) & (mjd/365.0 < 2.0)) & (t['PROGRAM']=='BRIGHT') )\n i3 = np.where( ((mjd/365.0 >= 2.0) & (mjd/365.0 < 3.0)) & (t['PROGRAM']=='BRIGHT') )\n i4 = np.where( ((mjd/365.0 >= 3.0) & (mjd/365.0 < 4.0)) & (t['PROGRAM']=='BRIGHT') )\n i5 = np.where( ((mjd/365.0 >= 4.0) & (mjd/365.0 < 5.0)) & (t['PROGRAM']=='BRIGHT') )\n else:\n print(\"if set, program should be a, m or b; default is m.\\n\")\n return\n y1 = plt.scatter(ra[i1], dec[i1], c='r', marker='.')\n y2 = plt.scatter(ra[i2], dec[i2], c='b', marker='.')\n y3 = plt.scatter(ra[i3], dec[i3], c='g', marker='.')\n y4 = plt.scatter(ra[i4], dec[i4], c='y', marker='.')\n y5 = plt.scatter(ra[i5], dec[i5], c='m', marker='.')\n\n plt.xlabel('RA (deg)')\n plt.ylabel('DEC (deg)')\n plt.legend((y1, y2, y3, y4, y5), ('Year 1', 'Year 2', 'Year 3', 'Year 4', 'Year 5'), scatterpoints=1, loc=2)\n ticks = ax.get_xticks()\n ticks[ticks < 0] += 360\n ax.set_xticklabels([int(tick) for tick in ticks])\n\n elif plot_type == 'h':\n if (program=='m'):\n i = np.where( t['PROGRAM'] != 'BRIGHT')\n elif (program=='b'):\n i = np.where( t['PROGRAM'] == 'BRIGHT')\n elif (program=='a'):\n i = np.arange(len(t['PROGRAM']))\n else:\n print(\"if set, program should be a, m or b; default is m.\\n\")\n return\n\n plt.figure(1)\n plt.subplot(231)\n x = t['EXPTIME']\n n, bins, patches = plt.hist(x[i], 20, facecolor='0.5', alpha=0.75)\n plt.xlabel('Exposure time (seconds)')\n plt.ylabel('Count')\n\n plt.subplot(232)\n x = t['SEEING']\n n, bins, patches = plt.hist(x[i], 20, facecolor='0.5', alpha=0.75)\n plt.xlabel('Seeing (arcseconds)')\n plt.ylabel('Count')\n\n plt.subplot(233)\n x = t['LINTRANS']\n n, bins, patches = plt.hist(x[i], 20, facecolor='0.5', alpha=0.75)\n plt.xlabel('Linear transparency')\n plt.ylabel('Count')\n\n plt.subplot(234)\n x = t['AIRMASS']\n n, bins, patches = plt.hist(x[i], 20, facecolor='0.5', alpha=0.75)\n plt.xlabel('Airmass')\n plt.ylabel('Count')\n\n plt.subplot(235)\n y1 = t['MOONALT']\n y2 = y1[i]\n x1 = t['MOONFRAC']\n x2 = x1[i]\n x = x2.compress((y2>0.0).flat)\n n, bins, patches = plt.hist(x, 20, facecolor='0.5', alpha=0.75)\n plt.xlabel('Moon illumination fraction')\n plt.ylabel('Count')\n\n plt.subplot(236)\n y = t['MOONALT']\n y2 = y[i]\n x1 = t['MOONDIST']\n x2 = x1[i]\n x = x2.compress((y2>0.0).flat)\n n, bins, patches = plt.hist(x, 20, facecolor='0.5', alpha=0.75)\n plt.xlabel('Distance from the Moon (deg)')\n plt.ylabel('Count')\n\n elif plot_type == 't':\n if (program=='m'):\n i = np.where( t['PROGRAM'] != 'BRIGHT')\n elif (program=='b'):\n i = np.where( t['PROGRAM'] == 'BRIGHT')\n elif (program=='a'):\n i = np.arange(len(t['PROGRAM']))\n else:\n print(\"if set, program should be a, m or b; default is m.\\n\")\n return\n \n mjd = t['MJD']\n mjd_start = np.min(mjd)\n mjd -= mjd_start\n plt.figure(1)\n\n plt.subplot(221)\n y = t['MOONALT']\n plt.plot(mjd[i], y[i], linestyle='-', color='black')\n plt.xlabel('Days')\n plt.ylabel('Moon elevation (degrees)')\n\n plt.subplot(222)\n y = t['SEEING']\n plt.plot(mjd[i], y[i], linestyle='-', color='black')\n plt.xlabel('Days')\n plt.ylabel('Seeing (arcseconds)')\n\n plt.subplot(223)\n y = t['LINTRANS']\n plt.plot(mjd[i], y[i], linestyle='-', color='black')\n plt.xlabel('Days')\n plt.ylabel('Linear transparency')\n\n plt.subplot(224)\n if (program=='b'):\n x = mjd[t['PROGRAM']=='BRIGHT']\n elif (program=='m'):\n x = mjd[t['PROGRAM']!='BRIGHT']\n elif (program=='a'):\n x = mjd\n y = np.arange(len(x)) + 1\n #y = np.arange(len(mjd)) + 1\n #plt.plot(mjd[i], y[i], linestyle='-', color='black')\n plt.plot(x, y, linestyle='-', color='black')\n plt.xlabel('Days')\n plt.ylabel('Number of tiles observed')\n\n\n elif plot_type == 'e':\n y = t['EXPTIME']\n if (program=='m'):\n i = np.where( t['PROGRAM'] != 'BRIGHT')\n elif (program=='b'):\n i = np.where( t['PROGRAM'] == 'BRIGHT')\n elif (program=='a'):\n i = np.arange(len(y))\n else:\n print(\"if set, program should be a, m or b; default is m.\\n\")\n return\n plt.figure(1)\n\n plt.subplot(221)\n x = t['LINTRANS']\n plt.scatter(x[i], y[i], marker='.', color='black')\n plt.xlabel('Linear transparency')\n plt.ylabel('Exposure time (seconds)')\n\n plt.subplot(222)\n x = t['SEEING']\n plt.scatter(x[i], y[i], marker='.', color='black')\n plt.xlabel('Seeing (arcseconds)')\n plt.ylabel('Exposure time (seconds)')\n\n plt.subplot(223)\n x = t['EBMV']\n plt.scatter(x[i], y[i], marker='.', color='black')\n plt.xlabel('E(B-V)')\n plt.ylabel('Exposure time (seconds)')\n\n plt.subplot(224)\n x = t['AIRMASS']\n plt.scatter(x[i], y[i], marker='.', color='black')\n plt.xlabel('Airmass')\n plt.ylabel('Exposure time (seconds)')\n\n plt.show()", "def spectra(self, value):\n\n _errtxt = f\"This assignment {value} does not contain valid spectra\"\n if type(value) == dict:\n _data = value\n # if not _data:\n # self.start_fit = False\n elif type(value).__name__ == \"SpectrumDataCollection\":\n _data = value.mean_data\n _fit_lbl = \"mean\"\n # if not _data:\n # self.start_fit = False\n elif type(value).__name__ == \"SpectrumDataLoader\":\n _data = value.clean_df\n _fit_lbl = \"int\"\n # if _data.empty:\n # self.start_fit = False\n elif isinstance(value, pd.DataFrame):\n raise AttributeError\n # TODO implement self.sense_windowname(value)\n else:\n raise ValueError(_errtxt)\n\n _specs = {\n k: val\n for k, val in _data.items()\n if k in self.fit_windows and type(val) == pd.DataFrame\n }\n # assert bool(_specs), _errtxt\n if not _specs:\n self.start_fit = False\n\n self._spectra = _specs\n self.FitResults = {}\n self.info = {}\n if hasattr(value, \"info\"):\n self.info = {**self.info, **value.info}", "def matches(self, requirement):\r\n try:\r\n requirement = self.parse_requirement(requirement, self._interpreter)\r\n except ValueError as e:\r\n raise self.UnknownRequirement(str(e))\r\n return self.distribution in requirement", "def visitspec(load,plate,mjd,fiber,gridfile='apg_rvsynthgrid',apstar=False) :\n grid = fits.open(os.environ['APOGEE_DIR']+'/data/synthgrid/'+gridfile+'.fits')\n if gridfile == 'apg_rvsynthgrid' : hdu=1\n elif gridfile == 'apg_rvsynthgrid_v2': hdu=0\n elif apstar : hdu=2\n else : hdu=1\n gridspec=grid[hdu].data\n gridwave = 10.**spectra.fits2vector(grid[hdu].header,2)\n griderr = np.ones(gridspec.shape[0])\n #for ispec in range(gridspec.shape[1]) :\n # cont = norm.cont(gridspec[:,ispec],griderr)\n # gridspec[:,ispec] /= cont\n\n data = load.apVisit(plate,mjd,fiber)\n\n # compare with DR14 \n comp(a,b,domatch=False,out='plots/dr14all')\n grid.append(['dr14all_1.png',''])\n xtit.append('all stars: DR14 (dotted) and test DR16 (solid)')\n\n comp(a,b,domatch=True,out='plots/dr14match')\n grid.append(['dr14match_1.png','dr14match_2.png'])\n xtit.append('same stars: DR14 (dotted) and test DR16 (solid)')\n # set bad pixels to nan\n shape=data[1].data.shape\n spec = copy.copy(data[1].data).flatten()\n specerr = copy.copy(data[2].data)\n specwave=data[4].data\n pixmask=bitmask.PixelBitMask()\n bd = np.where( ((data[3].data & pixmask.badval()) > 0) | \n ((data[3].data & pixmask.getval('SIG_SKYLINE')) > 0) ) [0]\n spec[bd] = np.nan\n spec = spec.reshape(shape)\n\n # continuum normalize and sample to grid\n outspec = np.full(len(gridwave),np.nan)\n if not apstar :\n # apVisit wavelengths are reversed\n spec=np.flip(spec)\n specwave=np.flip(specwave)\n specerr=np.flip(specerr)\n for ichip in range(3) :\n cont = norm.cont(spec[ichip,:],specerr[ichip,:])\n spec[ichip,:] /= cont\n gd=np.where(np.isfinite(spec[ichip,:]))[0]\n ip= interpolate.InterpolatedUnivariateSpline(specwave[ichip,gd],spec[ichip,gd],k=3)\n out = ip(gridwave)\n gd = np.where( (gridwave > specwave[ichip,0]) & (gridwave < specwave[ichip,-1]) )[0]\n outspec[gd] = out[gd]\n plt.plot(specwave[ichip,:],spec[ichip,:])\n plt.plot(gridwave[gd],out[gd])\n plt.show()\n\n for ispec in range(gridspec.shape[1]) :\n print(ispec)\n bd=np.where(np.isnan(outspec))\n outspec[bd]=1.\n out=correlate(outspec,gridspec[:,ispec])\n pdb.set_trace()", "def testPlotCandidateDesign(self):\n\n np.random.seed(0)\n new_data = pd.concat([self.test_data, self.add_pair], sort=False)\n test_class = TrimmedMatchGeoXDesign(\n GeoXType.HEAVY_UP,\n pretest_data=new_data,\n time_window_for_design=self.design_window,\n time_window_for_eval=self.evaluation_window,\n )\n budget_list = [30, 40]\n iroas_list = [0, 2]\n results, _ = test_class.report_candidate_designs(\n budget_list=budget_list,\n iroas_list=iroas_list,\n use_cross_validation=True,\n num_simulations=10,\n )\n\n axes_dict = test_class.plot_candidate_design(results)\n for budget in budget_list:\n for iroas in iroas_list:\n self.assertIsInstance(axes_dict[(budget, iroas)], plt.Figure)", "def is_a_spectrum_file(self):\n import re\n\n is_spectrum = self.ndp3 == 4\n regex = re.compile(r'F[0-9]{2} PT2D[0-9]{6}')\n is_spectrum = is_spectrum and \\\n all([regex.match(var) is not None\\\n for var in self.varnames])\n\n return is_spectrum", "def match(self, product):\n\n raise NotImplementedError, 'need impletent match method'", "def find_spectra(self):\r\n\r\n #### Begin functionality here\r\n\r\n return()", "def match(uspec1, uspec2):\n \n if uspec1.is_power_onoff() and uspec2.is_power_onoff():\n return True\n \n if uspec1.number_windows() != uspec2.number_windows():\n return False\n \n if uspec1['speed'] != uspec2['speed'] or \\\n uspec1['x_bin'] != uspec2['x_bin'] or \\\n uspec1['y_bin'] != uspec2['y_bin']:\n return False\n \n if uspec1.number_window_pairs() > 0:\n \n if uspec1['x1_start'] != uspec2['x1_start'] or \\\n uspec1['x1_size'] != uspec2['x1_size'] or \\\n uspec1['y1_start'] != uspec2['y1_start'] or \\\n uspec1['y1_size'] != uspec2['y1_size']:\n return False\n \n if uspec1.number_window_pairs() > 1:\n\n if uspec1['x2_start'] != uspec2['x2_start'] or \\\n uspec1['x2_size'] != uspec2['x2_size'] or \\\n uspec1['y2_start'] != uspec2['y2_start'] or \\\n uspec1['y2_size'] != uspec2['y2_size']:\n return False\n \n return True", "def matches_output(self, text):\n if self.markers:\n for marker in self.markers:\n if marker in text:\n return True\n # -- OTHERWISE:\n return False", "def test_INFO_sym_type_with_anytext_struc(self, mock_print):\n Plotter.from_smiles(self.data_LOGS[\"smiles\"], target_type=\"R\", sim_type=\"anytext\")\n mock_print.assert_called_with('sim_type indicates the similarity type by which the plots are constructed.\\n' +\n 'The supported similarity types are structural and tailored.\\n' +\n 'Because no target list has been provided \\'structural\\' as been selected as sym_type.')", "def getInputSpecification(cls):\n spec = super().getInputSpecification()\n # TODO this is waaaaay to much to convert right now\n # For now, accept a blank plotting check and sort it out later.\n spec.strictMode = False\n return spec\n ###################################################################\n # TODO here's a good start, but skipping for now:\n # spec.addParam('interactive', param_type=InputTypes.BoolType)\n # spec.addParam('overwrite', param_type=InputTypes.BoolType)\n # spec.addSub(PIF('filename', contentType=InputTypes.StringType))\n\n # xyz = InputTypes.makeEnumType('PlotXYZ', 'PlotXYZ', ['x', 'y', 'z'])\n\n # action = PIF('actions')\n # hows = InputTypes.makeEnumType('GeneralPlotHow', 'GeneralPlotHow',\n # ['screen', 'pdf', 'png', 'eps', 'pgf', 'ps', 'gif', 'svg', 'jpeg', 'raw', 'bmp', 'tiff', 'svgz'])\n # action.addSub(PIF('how', contentType=hows))\n\n # title = PIF('title')\n # title.addSub(PIF('text', contentType=InputTypes.StringType))\n # # kwargs can be anything, so just turn strict mode off for it\n # title.addSub(PIF('kwargs', strictMode=False))\n # action.addSub(title)\n\n # labelFormat = PIF('labelFormat')\n # labelFormat.addSub(PIF('axis', contentType=xyz))\n # sciPlain = InputTypes.makeEnumType('SciNot', 'SciNot', ['sci', 'scientific', 'plain'])\n # labelFormat.addSub(PIF('style', contentType=sciPlain))\n # labelFormat.addSub(PIF('scilimits', contentType=InputTypes.StringType))\n # labelFormat.addSub(PIF('useOffset', contentType=InputTypes.FloatType))\n # action.addSub(labelFormat)\n\n # figProp TODO WORKING XXX\n # TODO\n # spec.addSub(action)\n\n # settings = parameterInputFactory('plotSettings')\n # TODO\n # spec.addSub(settings)\n # return spec\n #################################### END draft", "def test_spec(self):\n graph = graphviz.Graph(comment='The Round Table')\n graph.node('A', 'King Arthur')\n graph.node('B', 'Sir Bedevere the Wise')\n graph.edges(['AB'])\n\n st.graphviz_chart(graph)\n\n c = self.get_delta_from_queue().new_element.graphviz_chart\n self.assertEqual(hasattr(c, 'spec'), True)", "def plot_pat_fit(x_data, y_data, z_data, pp, trans='one_ele', fig=400, title='Fitted model', label='model'):\n if z_data is not None:\n plt.figure(fig)\n plt.clf()\n plt.pcolormesh(x_data, y_data, z_data, shading='auto')\n plt.title(title)\n plt.xlabel('Detuning (mV)')\n plt.ylabel('Frequency (Hz)')\n\n if trans == 'one_ele':\n model = one_ele_pat_model\n yfit = model(x_data, pp)\n plt.plot(x_data, yfit, '-g', label=label)\n yfit_t0 = model(x_data, np.array([pp[0], pp[1], 0]))\n plt.plot(x_data, yfit_t0, '--g')\n elif trans == 'two_ele':\n model = two_ele_pat_model\n ylfit, ymfit, yrfit = model(x_data, pp)\n plt.plot(x_data, ylfit, '-g', label='S-T')\n plt.plot(x_data, ymfit, '-r', label='S-S')\n plt.plot(x_data, yrfit, '-b', label='T-S')\n\n plt.ylim([np.min(y_data), np.max(y_data)])", "def plot_spectrum(spec, config):\n for v in config.getpars():\n plot.plot_projection(spec, v)", "def test_INFO_sym_type_with_anytext_struc(self, mock_print):\n Plotter.from_smiles(self.data_BBBP[\"smiles\"], target_type=\"C\", sim_type=\"anytext\")\n mock_print.assert_called_with('sim_type indicates the similarity type by which the plots are constructed.\\n' +\n 'The supported similarity types are structural and tailored.\\n' +\n 'Because no target list has been provided \\'structural\\' as been selected as sym_type.')", "def matches(self):\n pass", "def plot(self):\n\t\tself.plotOfSpect().plot()", "def is_subset_of(self, uspec):\n \n if self.is_power_onoff() or uspec.is_power_onoff():\n return False\n \n if (uspec.is_bias() or not uspec.is_calib()) and self['speed'] != uspec['speed']:\n return False\n\n if int(self['x_bin']) % int(uspec['x_bin']) != 0 or int(self['y_bin']) % int(uspec['y_bin']) != 0:\n return False\n\n if self.number_windows() > 0:\n\n if not uspec.contains_window(self['x1_start'], self['y1_start'], self['x1_size'], self['y1_size'], self['x_bin'], self['y_bin']):\n return False\n\n if self.number_windows() > 1:\n\n if not uspec.contains_window(self['x2_start'], self['y2_start'], self['x2_size'], self['y2_size'], self['x_bin'], self['y_bin']):\n return False\n\n return True", "def test_select_two(self):\n setups = self.get_setup().decompress(\n [\"dimensions.time\", \"dimensions.species_id\"]\n )\n assert len(setups) == 6\n assert isinstance(setups, PlotSetupGroup)\n assert all(isinstance(setup, PlotSetup) for setup in setups)\n res = {\n (\n s.panels.collect_equal(\"dimensions\").variable,\n s.panels.collect_equal(\"dimensions\").species_id,\n s.panels.collect_equal(\"dimensions\").time,\n )\n for s in setups\n }\n sol = {\n ((\"dry_deposition\", \"wet_deposition\"), 1, 1),\n ((\"dry_deposition\", \"wet_deposition\"), 1, 2),\n ((\"dry_deposition\", \"wet_deposition\"), 1, 3),\n ((\"dry_deposition\", \"wet_deposition\"), 2, 1),\n ((\"dry_deposition\", \"wet_deposition\"), 2, 2),\n ((\"dry_deposition\", \"wet_deposition\"), 2, 3),\n }\n assert res == sol", "def match_data(self, datasets):\n raise NotImplementedError", "def specifies(self, key, value=None, path=None):\n try:\n if path != None and isDict(multiIndex(self.current_state, path)):\n target = multiIndex(self.current_state, path)\n logging.debug(\"Specification found: \")\n logging.debug(\"Key : \" + key)\n logging.debug(\"path : \" + str(path))\n logging.debug(\"value: \" + str(target))\n return key in target.keys()\n else:\n target = self.current_state\n logging.debug(\"Specification found: \")\n logging.debug(\"Key : \" + key)\n logging.debug(\"equals value: \" + str(value))\n logging.debug(\"at spec path: \" + str(path))\n return key in target.keys() and (\n target[key] == value if value != None else True)\n except KeyError:\n logging.debug(\"WARNING: Key error when requesting path \" + \\\n str(path) + \" for widget \" + self.name)", "def match(self, data_instance: Dict[str, Any]) -> bool:", "def __eq__(*args, **kwargs):\n return _gdi_.Pen___eq__(*args, **kwargs)", "def matching_function(self):\n return self.matching", "def plot_spectrumxichange(self):\n countgood = 0 ; countbad = 0\n for idata in self.datarg:\n if idata[-1, 0] == 1.: \n self.fig.axes[0].plot(idata[0:,0], idata[0: ,1] ,'b') \n countgood += 1\n print countgood , 'good solution'\n else: \n self.fig.axes[0].plot(idata[0:,0], idata[0: ,1] ,'r') \n print countbad, 'bad solution'\n countbad += 1\n print 'We found %g good solutions and %g tda startdistributions that broke down before xi = 1, we hope that\\'s what you expected' %(countgood,countbad)\n #Create custom artistsr[goodline,badline],['solution','breakdown']\n goodline = pl.Line2D((0,1),(0,0), color='b') \n badline = pl.Line2D((0,1),(0,0), color='r')\n self.layout(self.reader.depvar['depvar'] , r'energy spectrum (a.u.)' , tit = r'All tda start distributions $\\xi$' , legendhand = [goodline , badline] , legendlab = ['solution', 'breakdown'] )\n self.savefig('xispec')", "def testPlots(self):\n\t\tself.watcher.analyze(layers=[67], plot=True, randomize=True)", "def evaluate(self, problem):\n sequence = self.location.extract_sequence(problem.sequence)\n reverse = reverse_complement(sequence)\n locations = []\n for i in range(len(sequence) - self.stem_size):\n word = sequence[i : i + self.stem_size]\n rest = reverse[-(i + self.hairpin_window) : -(i + self.stem_size)]\n if word in rest:\n index = rest.index(word)\n locations.append((i, i + self.hairpin_window - index - 1))\n score = -len(locations)\n locations = group_nearby_segments(locations, max_start_spread=10)\n locations = sorted([Location(l[0][0], l[-1][1]) for l in locations])\n\n return SpecEvaluation(self, problem, score, locations=locations)", "def inferSpecification(inputExamples, outputExamples, components):", "def ownedPlots( self, tCoords, result, argsList ):\n\t\tbPaint = True\n\t\tbContinue = True\n\t\tpCurrent = gc.getMap().plot(tCoords[0], tCoords[1])\n\t\tif pCurrent.getOwner() == argsList:\n\t\t\t# this is a good plot, so paint it and continue search\n\t\t\treturn (None, bPaint, bContinue)\n\t\t# not a good plot, so don't paint it but continue search\n\t\treturn (None, not bPaint, bContinue)", "def spec(self, spec):\n if spec is None:\n raise ValueError(\"Invalid value for `spec`, must not be `None`\") # noqa: E501\n\n self._spec = spec", "def matches(self, actual: Any) -> MatchResult:\n raise NotImplementedError()", "def matches(self, aModel):\n\n params_bis = list(map(aModel.string_to_param,self.grid_params))\n for param1, param2 in zip(self.params, params_bis):\n if (abs(param1/param2 - 1.0) > eps): return False\n return True", "def test_signature(self, method):\n fig = plt.figure()\n ax_test = fig.add_subplot(projection=\"ternary\")\n ax_ref = fig.add_subplot()\n signature_test = inspect.signature(getattr(ax_test, method))\n signature_ref = inspect.signature(getattr(ax_ref, method))\n assert signature_test == signature_ref", "def plotListSearch(self, plotList, function, argsList):\n\t\ttPaintedList = []\n\t\tresult = None\n\t\tfor i in range(len(plotList)):\n\t\t\tresult, bPaintPlot, bContinueSearch = function((plotList[i][0], plotList[i][1]), result, argsList)\n\t\t\tif bPaintPlot: # paint plot\n\t\t\t\ttPaintedList.append((plotList[i][0], plotList[i][1]))\n\t\t\tif not bContinueSearch: # goal reached, so stop\n\t\t\t\treturn result, tPaintedList\n\t\treturn result, tPaintedList", "def goodPlots( self, tCoords, result, argsList ):\n\t\tbPaint = True\n\t\tbContinue = True\n\t\tpCurrent = gc.getMap().plot(tCoords[0], tCoords[1])\n\t\tif pCurrent.isHills() or pCurrent.isFlatlands():\n\t\t\tif not pCurrent.isImpassable():\n\t\t\t\tif not pCurrent.isUnit():\n\t\t\t\t\tif pCurrent.getTerrainType() not in [con.iDesert, con.iSemidesert, con.iTundra, con.iWetland]:\n\t\t\t\t\t\tif pCurrent.calculateCulturalOwner() == -1: # edead: bugfix\n\t\t\t\t\t\t\t# this is a good plot, so paint it and continue search\n\t\t\t\t\t\t\treturn (None, bPaint, bContinue)\n\t\t# not a good plot, so don't paint it but continue search\n\t\treturn (None, not bPaint, bContinue)", "def testMatch(self):\n\n self.inv._literals_filter['fruit'] = ['pear', 'apple']\n self.inv._literals_filter['xfruit'] = None\n self.inv._compiled_filter['shape'] = None\n self.inv._compiled_filter['xshape'] = None\n self.assertTrue(self.inv._Match('fruit', 'apple'))\n\n self.inv._literals_filter['fruit'] = None\n self.inv._compiled_filter['fruit'] = [re.compile('^apple$')]\n self.assertTrue(self.inv._Match('fruit', 'apple'))", "def isvalid(self, plotid):\n return plotid in self._plots", "def fits(self, ctx: EvaluationContext) -> bool:\n return self.rules.is_satisfied(ctx) and self.targets.match(ctx)", "def capabilities_matching_schema(self, schema_capability):\n # FIXME write this, maybe refactor part back into model.\n pass", "def test_plots(self):\n args = [\"L1\", 1126259446, 1126259478]\n pesummary_data = StrainData.fetch_open_data(*args)\n fig = pesummary_data.plot(type=\"td\")\n assert isinstance(fig, matplotlib.figure.Figure)\n fig = pesummary_data.plot(type=\"fd\")\n assert isinstance(fig, matplotlib.figure.Figure)\n fig = pesummary_data.plot(1126259446 + 20., type=\"omegascan\")\n assert isinstance(fig, matplotlib.figure.Figure)\n fig = pesummary_data.plot(type=\"spectrogram\")\n assert isinstance(fig, matplotlib.figure.Figure)", "def _restricted_plots(val: str):\n plots = [\"bar\", \"line\", \"pie\"]\n try:\n val = str(val).lower()\n except ValueError:\n raise argparse.ArgumentTypeError(f\"{val} could not be parsed to a string\")\n\n if val not in plots:\n raise argparse.ArgumentTypeError(f\"{val} is not a valid plot type. Possible values: {', '.join(plots)}\")\n return val", "def qc_prop_matching(self, rel_cols, label):\n\n cols = rel_cols[::]\n\n # create reduced copies of the dataframes for propensity score quality control\n qc_dfs = []\n for df in self:\n qc_dfs.append(df[cols])\n\n # exclude label if included into columns\n if label in cols:\n cols.remove(label)\n\n # construct formula\n formula = construct_formula(label, cols)\n\n # create Matcher\n m = Matcher(*qc_dfs, yvar=label, formula=formula)\n # train classifier to asses predictability\n m.fit_scores(balance=True, nmodels=10)\n # calculate and visualize propensity scores\n m.predict_scores()\n m.plot_scores()", "def test_plot_ess_local_quantile(models, kind, kwargs):\n idata = models.model_1\n ax = plot_ess(idata, kind=kind, **kwargs)\n assert np.all(ax)", "def isMeasureTypeMatch(self):\n return self._measure_type_match", "def plottable(self):\n if self.model_or_sim.type == \"Simulation\":\n return False\n else:\n return True", "def satisfies(self, spec):\n if spec.is_abs and self.is_abs and self.path != spec.path:\n return False\n if spec.implementation is not None and spec.implementation.lower() != self.implementation.lower():\n return False\n if spec.architecture is not None and spec.architecture != self.architecture:\n return False\n\n for our, req in zip((self.major, self.minor, self.micro), (spec.major, spec.minor, spec.micro)):\n if req is not None and our is not None and our != req:\n return False\n return True", "async def on_symbol_specification_updated(self, specification: MetatraderSymbolSpecification):\n for i in range(len(self._specifications)):\n if self._specifications[i]['symbol'] == specification['symbol']:\n self._specifications[i] = specification\n break\n else:\n self._specifications.append(specification)\n self._specificationsBySymbol[specification['symbol']] = specification", "def onselect(xmin, xmax): \n # convert matplotlib float dates to a datetime format\n date_min = mdates.num2date(xmin)\n date_max = mdates.num2date(xmax) \n \n # put the xmin and xmax in datetime format to compare\n date_min = datetime.datetime(date_min.year, date_min.month, date_min.day, date_min.hour, date_min.minute) \n date_max = datetime.datetime(date_max.year, date_max.month, date_max.day, date_max.hour, date_max.minute)\n \n # find the indices that were selected \n indices = np.where((comp_data['dates'] >= date_min) & (comp_data['dates'] <= date_max))\n indices = indices[0]\n \n # set the data in ax2 plot\n plot2a.set_data(comp_data['dates'][indices], comp_data['observed_parameter'][indices])\n plot2b.set_data(comp_data['dates'][indices], comp_data['modeled_parameter'][indices])\n \n # calculate updated stats \n updated_r_squared_coeff = statistics.r_squared(modeled = comp_data['modeled_parameter'][indices], observed = comp_data['observed_parameter'][indices])\n updated_nash_sutcliffe_coeff = statistics.nash_sutcliffe(modeled = comp_data['modeled_parameter'][indices], observed = comp_data['observed_parameter'][indices])\n \n ax2.set_xlim(comp_data['dates'][indices][0], comp_data['dates'][indices][-1])\n param_max = np.max((comp_data['observed_parameter'][indices], comp_data['modeled_parameter'][indices]))\n param_min = np.min((comp_data['observed_parameter'][indices], comp_data['modeled_parameter'][indices]))\n ax2.set_ylim(param_min, param_max)\n \n # show text of mean, max, min values on graph; use matplotlib.patch.Patch properies and bbox\n text2 = 'R_squared = %.2f\\nNash sutcliffe = %.2f' % (updated_r_squared_coeff, updated_nash_sutcliffe_coeff)\n \n ax2_text.set_text(text2)\n \n # set the data in ax4 plot\n plot4a.set_data(comp_data['dates'][indices], comp_data['stats']['relative_error'][indices])\n plot4b.set_data(comp_data['dates'][indices], comp_data['stats']['relative_error'][indices])\n \n # calculate updated mean, max, min for stats data\n stat_mean = np.mean(comp_data['stats']['relative_error'][indices])\n stat_max = np.max(comp_data['stats']['relative_error'][indices])\n stat_min = np.min(comp_data['stats']['relative_error'][indices])\n \n ax4.set_xlim(comp_data['dates'][indices][0], comp_data['dates'][indices][-1])\n ax4.set_ylim(stat_min, stat_max)\n \n # show text of mean, max, min values on graph; use matplotlib.patch.Patch properies and bbox\n text4 = 'Mean = %.2f\\nMax = %.2f\\nMin = %.2f' % (stat_mean, stat_max, stat_min)\n \n ax4_text.set_text(text4) \n \n fig.canvas.draw()", "def _contains_op(spec):", "def test_init_SEGYPlotter(self):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n splt = SEGYPlotter(ax, self.segy)\n # should inherit from SEGYPlotManager\n for member in inspect.getmembers(SEGYPlotManager):\n self.assertTrue(hasattr(splt, member[0]))\n # should *not* build header lookup table\n self.assertFalse(hasattr(splt, 'sdb'))\n # should attach axes\n self.assertTrue(isinstance(splt.ax, matplotlib.axes.Axes))", "def find_chart():\r\n ###############################################\r\n # Read values of S/N\r\n sn = np.loadtxt(outtable, usecols=(14,))\r\n xs, ys = np.loadtxt(outtable, usecols=(1, 2)).T\r\n specs = np.loadtxt(outtable, usecols=(0,), dtype=str)\r\n ###############################################\r\n # Find good (and bad) regions according to S/N\r\n good = np.where(((~np.isnan(sn)) & (sn >= sn_cut)))[0]\r\n bad = np.where((sn < sn_cut))[0]\r\n ###############################################\r\n # Filter arrays for S/N\r\n sn = sn[good]\r\n xs = xs[good]\r\n ys = ys[good]\r\n specs = specs[good].tolist()\r\n specs = [x.replace(\".fits\", \"\")[1:] for x in specs]\r\n ###############################################\r\n # Set limits for the plot\r\n norm = Normalize(0, 1)\r\n ###############################################\r\n # Set colormap\r\n # cmap = brewer2mpl.get_map('YlGnBu', 'sequential', 5).mpl_colormap\r\n # Produces a collection of polygons with colors according to S/N values\r\n coll = PolyCollection(polygons_bins[good], array=np.ones_like(sn),\r\n cmap=\"gray\", edgecolors='0.5', norm=norm)\r\n ###############################################\r\n # Initiate figure and axis for matplotlib\r\n fig = plt.figure(figsize=(6.25, 6))\r\n gs = gridspec.GridSpec(1, 1)\r\n gs.update(left=0.08, right=0.985, bottom=0.08, top=0.985, hspace=0.05,\r\n wspace=0.06)\r\n ax = plt.subplot(gs[0])\r\n ###############################################\r\n # Draw the polygons\r\n draw_map(fig, ax, coll)\r\n ###############################################\r\n # Add contours according to V-band image\r\n draw_contours(\"vband\", fig, ax)\r\n ###############################################\r\n for x, y, spec in zip(xs, ys, specs):\r\n ax.text(x, y, spec, fontsize=10)\r\n # Write labels\r\n xylabels(ax)\r\n ##############################################\r\n # Save the figure\r\n plt.show()\r\n plt.savefig(\"figs/find_chart.pdf\")\r\n return", "def plot_do(cls, plot_func, *args, **kwargs):\n pf_too=kwargs.pop(\"pf_too\", False)\n plotter=kwargs.pop(\"plotter\", None)\n if plotter is None:\n plotter=kwargs.pop(\"pl\", None)\n if plotter is None:\n plotter=Plotter()\n elif isinstance(plotter, basestring):\n if plotter in cls.agent_dict:\n plotter=cls.agent_dict[plotter]\n else:\n plotter=Plotter(name=plotter)\n pf=getattr(plotter, plot_func)(*args, **kwargs)\n if pf_too:\n return plotter, pf\n return plotter", "def getSpecRules(self, rhs):\n if rhs not in self.itemSet:\n print('Please input a term contain in the term-set !')\n return None\n \n rules = dict()\n for key, value in self.freqSet.items():\n for item in value:\n if rhs.issubset(item) and len(item) > 1:\n item_supp = self.getSupport(item)\n item = item.difference(rhs)\n conf = item_supp / self.getSupport(item)\n if conf >= self.minConf:\n rules[item] = conf\n return rules", "def match(self, other: 'layout.Encoding') -> bool:\n return (\n '*' not in other.kind\n and fnmatch.fnmatch(other.kind, self.kind)\n and all(other.options.get(k) == v for k, v in self.options.items())\n )", "def test_plotting():\n specs = _read_schrodinger('tests/test_data/{}.inp'.format(PROBLEM))\n xint, yint = _interpolate(specs['interpolxydecs'][:, 0],\n specs['interpolxydecs'][:, 1], specs['xopt'])\n\n energies = np.loadtxt('tests/test_data/energies_{}.ref'.format(PROBLEM))\n wfuncsdata = np.loadtxt('tests/test_data/wfuncs_{}.ref'.format(PROBLEM))\n potdata = np.vstack((xint, yint)).T\n\n expval = calculate_expval(xint, wfuncsdata[:, 1:].T)\n uncval = calculate_uncertainty(xint, wfuncsdata[:, 1:].T)\n\n expvaldata = np.vstack((expval, uncval)).T\n _write_data('tests/test_data', potdata, energies, wfuncsdata, expvaldata)\n\n # qm_plottings('tests/test_data', scale=0.5)\n # assert True", "def handleSpecDragEvent(self, obj, val):\n # d = [self.ui.gSpectrum.plotItem.curves[1].xData,\n # self.ui.gSpectrum.plotItem.curves[1].yData]\n if self.dataObj is None: return\n self.createCompWindow(data = self.dataObj.proc_data, p = val)", "def test_plot_qual_report(self):\r\n\r\n output_dir = self.output_dir\r\n\r\n score_min = 20\r\n\r\n ave_bins = [3, 2, 3, 4]\r\n std_dev_bins = [2.16, 0.816, 1.0, 0]\r\n total_bases_bins = [3, 3, 2, 1]\r\n\r\n plot_qual_report(ave_bins, std_dev_bins, total_bases_bins, score_min,\r\n output_dir)", "def matches(self, request_info: RequestInfo):\n modifier_aliases = {\n \"frame\": \"subdocument\",\n \"ghide\": \"generichide\",\n \"~third-party\": \"1p\",\n \"third-party\": \"3p\",\n \"stylesheet\": \"css\",\n \"xmlhttprequest\": \"xhr\",\n }\n\n name = modifier_aliases.get(self.name, self.name)\n invert_modifier = False\n if name.startswith(\"~\"):\n invert_modifier = True\n name = name[1:]\n # Check again for aliases\n name = modifier_aliases.get(name, name)\n\n name = name.replace(\"-\", \"\")\n func = getattr(self, f\"_{type(self).__name__}__matches_{name}\")\n\n result = func(request_info, self.value)\n if invert_modifier:\n return MatchResult.invert(result)\n\n return result", "def handleSpecDragEvent(self, obj, val):\n # d = [self.ui.gSpectrum.plotItem.curves[1].xData,\n # self.ui.gSpectrum.plotItem.curves[1].yData]\n if self.dataObj is None: return\n self.createCompWindow(data = self.convertDataForPlot(self.dataObj.proc_data), p = val)", "def _plot(\n data: ResonatorSpectroscopyAttenuationData,\n fit: ResonatorSpectroscopyAttenuationResults,\n qubit,\n):\n return spectroscopy_plot(data, fit, qubit)", "def display_comparison(self, X_val, y_val):\n import matplotlib.pyplot as plt\n x = []\n y = []\n for model_tuple in self.model_list:\n x.append(model_tuple[1])\n y.append(model_tuple[0].score(X_val, y_val))\n plt.scatter(x, y)\n plt.show()", "def test_plot_ppc_ax(models, kind, fig_ax):\n _, ax = fig_ax\n axes = plot_ppc(models.model_1, kind=kind, ax=ax)\n assert np.asarray(axes).item(0) is ax", "def match(self) -> bool:", "def match_info(info_dict):\n try:\n return info_dict['name']=='Horizon'\n except KeyError:\n return False", "def match(self, filter_text):\n\n return filter_text.lower() in self.artist.lower() or \\\n super().match(filter_text)", "def arc_fit_qa(slf, fit, outfile, ids_only=False, title=None):\n\n plt.rcdefaults()\n plt.rcParams['font.family']= 'times new roman'\n\n arc_spec = fit['spec']\n\n # Begin\n if not ids_only:\n plt.figure(figsize=(8, 4.0))\n plt.clf()\n gs = gridspec.GridSpec(2, 2)\n idfont = 'xx-small'\n else:\n plt.figure(figsize=(11, 8.5))\n plt.clf()\n gs = gridspec.GridSpec(1, 1)\n idfont = 'small'\n\n # Simple spectrum plot\n ax_spec = plt.subplot(gs[:,0])\n ax_spec.plot(np.arange(len(arc_spec)), arc_spec)\n ymin, ymax = 0., np.max(arc_spec)\n ysep = ymax*0.03\n for kk, x in enumerate(fit['xfit']*fit['xnorm']):\n yline = np.max(arc_spec[int(x)-2:int(x)+2])\n # Tick mark\n ax_spec.plot([x,x], [yline+ysep*0.25, yline+ysep], 'g-')\n # label\n ax_spec.text(x, yline+ysep*1.3,\n '{:s} {:g}'.format(fit['ions'][kk], fit['yfit'][kk]), ha='center', va='bottom',\n size=idfont, rotation=90., color='green')\n ax_spec.set_xlim(0., len(arc_spec))\n ax_spec.set_ylim(ymin, ymax*1.2)\n ax_spec.set_xlabel('Pixel')\n ax_spec.set_ylabel('Flux')\n if title is not None:\n ax_spec.text(0.04, 0.93, title, transform=ax_spec.transAxes,\n size='x-large', ha='left')#, bbox={'facecolor':'white'})\n if ids_only:\n plt.tight_layout(pad=0.2, h_pad=0.0, w_pad=0.0)\n plt.savefig(outfile, dpi=800)\n plt.close()\n return\n\n # Arc Fit\n ax_fit = plt.subplot(gs[0, 1])\n # Points\n ax_fit.scatter(fit['xfit']*fit['xnorm'], fit['yfit'], marker='x')\n if len(fit['xrej']) > 0:\n ax_fit.scatter(fit['xrej']*fit['xnorm'], fit['yrej'], marker='o',\n edgecolor='gray', facecolor='none')\n # Solution\n xval = np.arange(len(arc_spec))\n wave = func_val(fit['fitc'], xval/fit['xnorm'], 'legendre',\n minv=fit['fmin'], maxv=fit['fmax'])\n ax_fit.plot(xval, wave, 'r-')\n xmin, xmax = 0., len(arc_spec)\n ax_fit.set_xlim(xmin, xmax)\n ymin,ymax = np.min(wave)*.95, np.max(wave)*1.05\n ax_fit.set_ylim(np.min(wave)*.95, np.max(wave)*1.05)\n ax_fit.set_ylabel('Wavelength')\n ax_fit.get_xaxis().set_ticks([]) # Suppress labeling\n # Stats\n wave_fit = func_val(fit['fitc'], fit['xfit'], 'legendre',\n minv=fit['fmin'], maxv=fit['fmax'])\n rms = np.sqrt(np.sum((fit['yfit']-wave_fit)**2)/len(fit['xfit'])) # Ang\n dwv_pix = np.median(np.abs(wave-np.roll(wave,1)))\n ax_fit.text(0.1*len(arc_spec), 0.90*ymin+(ymax-ymin),\n r'$\\Delta\\lambda$={:.3f}$\\AA$ (per pix)'.format(dwv_pix), size='small')\n ax_fit.text(0.1*len(arc_spec), 0.80*ymin+(ymax-ymin),\n 'RMS={:.3f} (pixels)'.format(rms/dwv_pix), size='small')\n # Arc Residuals\n ax_res = plt.subplot(gs[1,1])\n res = fit['yfit']-wave_fit\n ax_res.scatter(fit['xfit']*fit['xnorm'], res/dwv_pix, marker='x')\n ax_res.plot([xmin,xmax], [0.,0], 'k--')\n ax_res.set_xlim(xmin, xmax)\n ax_res.set_xlabel('Pixel')\n ax_res.set_ylabel('Residuals (Pix)')\n\n # Finish\n plt.tight_layout(pad=0.2, h_pad=0.0, w_pad=0.0)\n plt.savefig(outfile, dpi=800)\n plt.close()\n\n plt.rcdefaults()\n\n return", "def __init__(self, Y, X, cat=None, match=None, sub=None, ds=None,\n ylabel=True, alpha=.2, legend=True,\n c=['#009CFF', '#FF7D26', '#54AF3A', '#FE58C6', '#20F2C3'],\n *args, **kwargs):\n sub = assub(sub, ds)\n Y = asvar(Y, sub, ds)\n X = asvar(X, sub, ds)\n if cat is not None:\n cat = ascategorial(cat, sub, ds)\n if match is not None:\n raise NotImplementedError(\"match kwarg\")\n\n if ylabel is True:\n ylabel = Y.name\n\n # figure\n frame_title_ = frame_title(\"Regression\", Y, X, cat)\n _EelFigure.__init__(self, frame_title_, 1, 5, 1, *args, **kwargs)\n ax = self._axes[0]\n\n # labels\n if ylabel:\n ax.set_ylabel(ylabel)\n ax.set_xlabel(X.name)\n # regplot\n scatter_kwargs = {'s': 100,\n 'alpha': alpha,\n 'marker': 'o',\n 'label': '_nolegend_'}\n if cat is None:\n if type(c) in [list, tuple]:\n color = c[0]\n else:\n color = c\n y = Y.x\n reg = X.x\n ax.scatter(reg, y, edgecolor=color, facecolor=color, **scatter_kwargs)\n x, y = _reg_line(y, reg)\n ax.plot(x, y, c=color)\n else:\n for i, cell in enumerate(cat.cells):\n idx = (cat == cell)\n # scatter\n y = Y.x[idx]\n reg = X.x[idx]\n color = c[i % len(c)]\n ax.scatter(reg, y, edgecolor=color, facecolor=color, **scatter_kwargs)\n # regression line\n x, y = _reg_line(y, reg)\n ax.plot(x, y, c=color, label=cellname(cell))\n if legend is True:\n ax.legend()\n elif legend is not False:\n ax.legend(loc=legend)\n\n self._show()", "def is_satisfied_by(self, val):", "def test_plot_lm(models, kwargs):\n idata = models.model_1\n if \"constant_data\" not in idata.groups():\n y = idata.observed_data[\"y\"]\n x1data = y.coords[y.dims[0]]\n idata.add_groups({\"constant_data\": {\"_\": x1data}})\n idata.constant_data[\"x1\"] = x1data\n idata.constant_data[\"x2\"] = x1data\n\n axes = plot_lm(idata=idata, y=\"y\", y_model=\"eta\", xjitter=True, **kwargs)\n assert np.all(axes)", "def test_filters_by_dataset_description_if_requested(self):\n dataset = factories.SourceDatasetFactory.create(i_dbgap_description='a dataset about demographic measurements')\n trait = factories.SourceTraitFactory.create(i_description='lorem ipsum', source_dataset=dataset)\n other_dataset = factories.SourceDatasetFactory.create(i_dbgap_description='foo')\n factories.SourceTraitFactory.create(i_description='lorem ipsum', source_dataset=other_dataset)\n input = {'description': 'lorem', 'dataset_description': 'demographic', 'dataset_name': ''}\n response = self.client.get(self.get_url(), input)\n context = response.context\n self.assertIn('form', context)\n self.assertTrue(context['has_results'])\n self.assertIsInstance(context['results_table'], tables.SourceTraitTableFull)\n self.assertQuerysetEqual(context['results_table'].data, [repr(trait)])", "def test_analysis_match(self):\n self.api.observable_add('test.com')\n results = self.api.analysis_match(['test.com', 'unknown.com'])\n known = [o['value'] for o in results['known']]\n self.assertIn('test.com', known)\n self.assertIn('unknown.com', results['unknown'])", "def test_INFO_sym_type_with_anytext_tail(self, mock_stdout):\n Plotter.from_smiles(self.data_LOGS[\"smiles\"], target=self.data_LOGS[\"target\"], target_type=\"R\", sim_type=\"anytext\")\n assert ('sim_type indicates the similarity type by which the plots are constructed.\\n' +\n 'The supported similarity types are structural and tailored.\\n' +\n 'Because a target list has been provided \\'tailored\\' as been selected as sym_type.') in mock_stdout.getvalue()", "def set_spectral(self, alpha, is_certain):\n self.alpha = float(alpha)\n if is_certain == \"?\":\n self.is_spectral_certain = False\n else:\n self.is_spectral_certain = True", "def update(cls, event, curve, preferred_idx):\n cont, ind = curve.contains(event)\n if cont:\n idx = ind['ind'][preferred_idx]\n cls.update_annot(idx)\n cls.update_plot(True)\n elif gs.fitness_plot.floating_annot.get_visible():\n cls.update_plot(False)", "def test_plot_ess_bad_kind(models):\n idata = models.model_1\n with pytest.raises(ValueError, match=\"Invalid kind\"):\n plot_ess(idata, kind=\"bad kind\")", "def test_spector_init_autochoose_survey_spec(obj_dirobj):\n\tobj = obj_dirobj\n\n\ts = spector.Spector(obj=obj, survey='hsc')\n\n\tassert s.survey_spec == 'boss'", "def Search(self, *args):\n return _XCAFDoc.XCAFDoc_ShapeTool_Search(self, *args)", "def axes_contains(ax, obj_list):\n # Get plot elements\n elems = ax.get_children()\n\n # Loop over list of objects that should be in the plot\n contains_all = False\n for obj in obj_list:\n objtype, num_expected = obj\n num = 0\n for elem in elems:\n if isinstance(elem, objtype): num += 1\n if num != num_expected:\n return False\n\n # Return True if no problems found\n return True", "def test_no_arguments(self):\n fig = plt.figure()\n ax = fig.add_subplot(projection='ternary')\n lines = ax.plot()\n assert lines == []", "def plotBatch(inp,output='spectra_plot.pdf',comparisons=None,classify=False,normalize=False,normrange=[0.9,1.4],layout=[2,2],basecolors=['k','m'],legend=[],fontscale=0.7,classify_kwargs={},plot_kwargs={},**kwargs):\n\n# alt keyword check\n for k in ['file','filename']: \n if kwargs.get(k,'') != '': output = kwargs[k]\n for k in ['legends','labels']: \n if kwargs.get(k,'') != '': legend = kwargs[k]\n\n# force input into a list\n if isinstance(inp,list): inputlist = copy.deepcopy(inp)\n else: inputlist = [inp]\n\n# if input is a string of filenames, read in each file to a spectrum object\n if isinstance(inputlist[0],str):\n# try a glob search string \n files = glob.glob(os.path.normpath(inputlist[0]))\n if len(files) > 1 or (len(files) == 1 and inputlist[0].find('*') != -1):\n inputlist = files\n# try reading in files into Spectrum object\n try:\n splist = [splat.Spectrum(file = f) for f in inputlist]\n except:\n raise ValueError('\\nCould not read in list of files {} - make sure the full path is specified and the files are correctly formatted'.format(inputlist))\n\n# if filenames, read in each file to a spectrum object\n elif isinstance(inputlist[0],splat.Spectrum):\n splist = copy.deepcopy(inputlist)\n else:\n raise ValueError('\\nInput should be list of splat.Spectrum objects or filenames')\n\n# normalize if desired\n if normalize==True:\n tmp = [sp.normalize(normrange) for sp in splist]\n\n# comparison files are present\n complist = []\n if comparisons != None:\n comp = copy.deepcopy(comparisons)\n if not isinstance(comp,list): comp = [comp]\n if isinstance(comp[0],str):\n try:\n complist = [splat.Spectrum(file = f) for f in comp]\n except:\n print('\\nCould not read in comparison files: ignoring comparisons')\n if isinstance(comp[0],splat.Spectrum):\n complist = comp\n if len(complist) < len(splist):\n while len(complist) < len(splist):\n complist.append(complist[-1])\n# normalize\n if normalize==True:\n tmp = [sp.normalize(normrange) for sp in complist]\n\n# set comparison files to be standards for spectral classification\n# overrules input comparison sample\n if classify == True:\n complist = []\n base_kwargs={\n 'return_standard': True,\n 'method': 'kirkpatrick',\n }\n base_kwargs.update(classify_kwargs)\n for sp in splist:\n complist.append(splat.classifyByStandard(sp,**base_kwargs))\n\n# prep for plotting\n plotlist = []\n clist = []\n for i,sp in enumerate(splist):\n if len(complist) == len(splist):\n plotlist.append([sp,complist[i]])\n clist.extend(basecolors)\n else:\n plotlist.append([sp])\n clist.extend(basecolors[0])\n\n# manage legends\n if len(legend) != 0:\n if not isinstance(legend,list): legend = [legend]\n if len(legend) < (len(splist)+len(complist)):\n# guess: just left out the comparison legends \n if len(complist) > 0 and len(legend) == len(splist):\n legtmp = []\n for i,l in enumerate(legend):\n legtmp.extend([l,'{}'.format(complist[i].name)])\n legend = legtmp\n else:\n# otherwise: pad the remaining legends with the last legend (pairs) \n while len(legend) < (len(splist)+len(complist)):\n if len(complist)>0:\n legend.extend([legend[-2],legend[-1]])\n else:\n legend.extend([legend[-1]])\n if len(legend) > (len(splist)+len(complist)):\n legend = legend[0:(len(splist)+len(complist))]\n else:\n legend = []\n for i,sp in enumerate(splist):\n l = []\n if 'name' in list(sp.__dict__.keys()): l.append(sp.name)\n else: l.append(os.path.basename(sp.filename))\n if len(complist)>0:\n if 'name' in list(complist[i].__dict__.keys()): l.append(complist[i].name)\n else: l.append(os.path.basename(complist[i].filename))\n legend.extend(l)\n\n# generate plot\n base_kwargs={\n 'multiplot': True,\n 'multipage': True,\n 'legends': legend,\n 'colors': clist,\n 'layout': layout,\n 'fontscale': fontscale,\n 'output': output,\n }\n base_kwargs.update(plot_kwargs)\n fig = plotSpectrum(plotlist,**base_kwargs)\n\n return fig", "def vsone_matching(metadata, cfgdict={}, verbose=None):\n # import vtool as vt\n #assert isinstance(metadata, ut.LazyDict), 'type(metadata)=%r' % (type(metadata),)\n\n annot1 = metadata['annot1']\n annot2 = metadata['annot2']\n\n ensure_metadata_feats(annot1, cfgdict=cfgdict)\n ensure_metadata_feats(annot2, cfgdict=cfgdict)\n\n if 'dlen_sqrd' not in annot2:\n def eval_dlen_sqrd(annot):\n rchip = annot['rchip']\n dlen_sqrd = rchip.shape[0] ** 2 + rchip.shape[1] ** 2\n return dlen_sqrd\n annot2.set_lazy_func('dlen_sqrd', lambda: eval_dlen_sqrd(annot2))\n\n # Exceute relevant dependencies\n kpts1 = annot1['kpts']\n vecs1 = annot1['vecs']\n kpts2 = annot2['kpts']\n vecs2 = annot2['vecs']\n dlen_sqrd2 = annot2['dlen_sqrd']\n flann1 = annot1.get('flann', None)\n flann2 = annot2.get('flann', None)\n\n matches, output_metdata = vsone_feature_matching(\n kpts1, vecs1, kpts2, vecs2, dlen_sqrd2, cfgdict=cfgdict,\n flann1=flann1, flann2=flann2, verbose=verbose)\n metadata.update(output_metdata)\n return matches, metadata", "def contains(self, Union, QPointF=None, QPoint=None): # real signature unknown; restored from __doc__\n return False", "def horsePlots( self, tCoords, result, argsList ): # plot search for Gokturk UP\n\t\tbPaint = True\n\t\tbContinue = True\n\t\tpCurrent = gc.getMap().plot(tCoords[0], tCoords[1])\n\t\tif pCurrent.isFlatlands():\n\t\t\tif not pCurrent.isImpassable():\n\t\t\t\tif pCurrent.getTerrainType() not in [con.iTundra, con.iWetland]:\n\t\t\t\t\tif pCurrent.getFeatureType() not in [con.iMarsh, con.iJungle, con.iForest, con.iWoodland, con.iTropicalForest, con.iOasis, con.iDenseForest, con.iIce]:\n\t\t\t\t\t\tif pCurrent.getBonusType(-1) == -1:\n\t\t\t\t\t\t\t# this is a good plot, so paint it and continue search\n\t\t\t\t\t\t\treturn (None, bPaint, bContinue)\n\t\t# not a good plot, so don't paint it but continue search\n\t\treturn (None, not bPaint, bContinue)" ]
[ "0.5498152", "0.54748654", "0.54442275", "0.51404667", "0.5081998", "0.5065666", "0.50126404", "0.50075436", "0.4972846", "0.49207228", "0.49176952", "0.4876025", "0.48672763", "0.48624754", "0.48509756", "0.48303744", "0.48123005", "0.4783596", "0.47810036", "0.47768372", "0.4770321", "0.47618595", "0.47492415", "0.47385836", "0.4729871", "0.47077745", "0.47026816", "0.46857792", "0.4681561", "0.46574336", "0.4652359", "0.46448305", "0.46431655", "0.46407428", "0.46303064", "0.46288064", "0.4616456", "0.46049172", "0.46049014", "0.46014145", "0.4590342", "0.45874006", "0.45819524", "0.45806426", "0.4574988", "0.45575988", "0.4554809", "0.4532772", "0.45216894", "0.45201427", "0.45187756", "0.4512237", "0.44963062", "0.4495169", "0.44944066", "0.4492455", "0.44904462", "0.44793802", "0.4463944", "0.44637945", "0.44607925", "0.44600725", "0.44574583", "0.4456199", "0.44553617", "0.44552237", "0.44274884", "0.44254678", "0.44197038", "0.4413541", "0.44118005", "0.4403457", "0.4398125", "0.438621", "0.438451", "0.4384483", "0.43807846", "0.43737563", "0.43734962", "0.43683603", "0.43634155", "0.43611965", "0.43600303", "0.43562707", "0.43556583", "0.43555987", "0.43551707", "0.43549526", "0.43442848", "0.43397233", "0.43346798", "0.43332106", "0.43289846", "0.43273622", "0.43260184", "0.4324096", "0.43236592", "0.43143445", "0.4309612", "0.43070704" ]
0.51842225
3
Traverses any nested DimensionedPlot returning a list of all plots that match the specs. The specs should be supplied as a list of either Plot types or callables, which should return a boolean given the plot class.
def traverse(self, fn=None, specs=None, full_breadth=True): accumulator = [] matches = specs is None if not matches: for spec in specs: matches = self.matches(spec) if matches: break if matches: accumulator.append(fn(self) if fn else self) # Assumes composite objects are iterables if hasattr(self, 'subplots') and self.subplots: for el in self.subplots.values(): accumulator += el.traverse(fn, specs, full_breadth) if not full_breadth: break return accumulator
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_available_figures(self):\n return sorted((method[5:], func) \\\n for method, func in self.__class__.__dict__.iteritems() \\\n if method.startswith(\"plot_\") and callable(func))", "def get_plots(self):\n return list(self.plots.values())", "def _specs_for_flat_tensors(element_spec):\n if isinstance(element_spec, StructuredTensor.Spec):\n specs = []\n for _, field_spec in sorted(\n element_spec._field_specs.items(), key=lambda t: t[0]): # pylint: disable=protected-access\n specs.extend(_specs_for_flat_tensors(field_spec))\n elif isinstance(element_spec, type_spec.BatchableTypeSpec) and (\n element_spec.__class__._flat_tensor_specs is # pylint: disable=protected-access\n type_spec.BatchableTypeSpec._flat_tensor_specs): # pylint: disable=protected-access\n # Classes which use the default `_flat_tensor_specs` from\n # `BatchableTypeSpec` case (i.e. a derived class does not override\n # `_flat_tensor_specs`.) are encoded using `component_specs`.\n specs = nest.flatten(\n element_spec._component_specs, # pylint: disable=protected-access\n expand_composites=False)\n else:\n # In addition flatting any nesting in Python,\n # this default case covers things that are encoded by one tensor,\n # such as dense tensors which are unchanged by encoding and\n # ragged tensors and sparse tensors which are encoded by a variant tensor.\n specs = nest.flatten(element_spec, expand_composites=False)\n return specs", "def latex_figure_list(spec, includes, outfile, **kwargs):\n # Enable subsetting of, e.g. figures and tables...\n type = kwargs.pop(\"type\",'figure')\n for cfg, (id,item) in zip(spec,includes):\n if type != cfg.get('type','figure'):\n continue\n print(item, file=outfile)", "def plot_power_spectrum_fits(self, figsize=(20, 10)):\n\n debug_figs = []\n debug_fig_names = []\n # individual power spectra\n for ii in range(self.nangles):\n fig = plot_power_spectrum_fit(self.separated_components_ft[ii, 0], self.otf,\n {'pixel_size': self.dx, 'wavelength': self.wavelength, 'na': self.na},\n self.power_spectrum_params[ii, 0], frq_sim=(0, 0), mask=self.pspec_masks[ii, 0],\n figsize=figsize, ttl_str=\"Unshifted component, angle %d\" % ii)\n debug_figs.append(fig)\n debug_fig_names.append(\"power_spectrum_unshifted_component_angle=%d\" % ii)\n\n fig = plot_power_spectrum_fit(self.separated_components_ft[ii, 1], self.otf,\n {'pixel_size': self.dx, 'wavelength': self.wavelength, 'na': self.na},\n self.power_spectrum_params[ii, 1], frq_sim=self.frqs[ii], mask=self.pspec_masks[ii, 1],\n figsize=figsize, ttl_str=\"Shifted component, angle %d\" % ii)\n\n debug_figs.append(fig)\n debug_fig_names.append(\"power_spectrum_shifted_component_angle=%d\" % ii)\n\n return debug_figs, debug_fig_names", "def available_plots(self):\n return self.visualizer.available_plots()", "def test_plot_graphs(self):\n\n # Graphs who are not embedded, i.e., have no coordinates.\n COORDS_NO = {\n 'Graph',\n 'BarabasiAlbert',\n 'ErdosRenyi',\n 'FullConnected',\n 'RandomRegular',\n 'StochasticBlockModel',\n }\n\n # Coordinates are not in 2D or 3D.\n COORDS_WRONG_DIM = {'ImgPatches'}\n\n Gs = []\n for classname in set(graphs.__all__) - COORDS_NO - COORDS_WRONG_DIM:\n Graph = getattr(graphs, classname)\n\n # Classes who require parameters.\n if classname == 'NNGraph':\n Xin = np.arange(90).reshape(30, 3)\n Gs.append(Graph(Xin))\n elif classname in ['ImgPatches', 'Grid2dImgPatches']:\n Gs.append(Graph(img=self._img, patch_shape=(3, 3)))\n elif classname == 'LineGraph':\n Gs.append(Graph(graphs.Sensor(20, seed=42)))\n else:\n Gs.append(Graph())\n\n # Add more test cases.\n if classname == 'TwoMoons':\n Gs.append(Graph(moontype='standard'))\n Gs.append(Graph(moontype='synthesized'))\n elif classname == 'Cube':\n Gs.append(Graph(nb_dim=2))\n Gs.append(Graph(nb_dim=3))\n elif classname == 'DavidSensorNet':\n Gs.append(Graph(N=64))\n Gs.append(Graph(N=500))\n Gs.append(Graph(N=128))\n\n for G in Gs:\n self.assertTrue(hasattr(G, 'coords'))\n self.assertEqual(G.N, G.coords.shape[0])\n\n signal = np.arange(G.N) + 0.3\n\n G.plot(backend='pyqtgraph')\n G.plot(backend='matplotlib')\n G.plot(signal, backend='pyqtgraph')\n G.plot(signal, backend='matplotlib')\n plotting.close_all()", "def show_all_elements(ds, shapes_only=True):\n if shapes_only:\n log_print = lambda i, e: logger.info(\n \"Element %d:\\nshapes:\\n %s\",\n i, _dict_to_logstring(_element_shapes_dict(e)))\n else:\n log_print = lambda i, e: logger.info(\n \"Element %d:\\nshapes:\\n %s\\ncontents:\\n %s\",\n i, _dict_to_logstring(_element_shapes_dict(e)), _dict_to_logstring(e))\n\n logger.info(\"Showing all elements.\")\n i = 0\n for i, element in ds.enumerate(start=1).as_numpy_iterator():\n if not isinstance(element, dict):\n element = dict(enumerate(element))\n log_print(i, element)\n\n logger.info(\"All %d elements shown.\", i)\n return ds", "def plots():\n out = interactive_output(generate_plots, {'gsize':gridSlider, 'ra':RABox, 'ra':RASlider, 'dec':DECBox, 'dec':DECSlider, 'ang':radBox, 'ang':radSlider, 'style':hexDrop})\n return display(widgrid, out)", "def all(folder, mt=False):\n handles = []\n experiments = get_experiment_series(folder, mT=mt)\n for ex in experiments:\n if mt:\n handles.append(\n plt.plot(\n ex.distance,\n ex.weight,\n label='{}mm {}mT'.format(ex.height, ex.magnet))[0])\n else:\n handles.append(\n plt.plot(\n ex.distance,\n ex.weight,\n label='{}mm'.format(ex.height))[0])\n plt.legend()\n plt.show()", "def plot_list(self):\n wrapper = TextWrapper(subsequent_indent = \" \" * 22,\n width = 78)\n for method, func in self.get_available_figures():\n if method != \"list\":\n wrapper.initial_indent = (\"%-20s \" % method).ljust(22)\n print wrapper.fill(func.figure_name)", "def _iter_panels(self, sides='lrbt'):\n axs = [self] if self.get_visible() else []\n if not ({*sides} <= {*'lrbt'}):\n raise ValueError(f'Invalid sides {sides!r}.')\n for s in sides:\n for ax in getattr(self, '_' + s + 'panels'):\n if not ax or not ax.get_visible():\n continue\n axs.append(ax)\n return axs", "def plot_figs(self):\n tstart = time.process_time()\n\n saving = self.save_dir is not None\n\n # todo: populate these\n figs = []\n fig_names = []\n\n # plot images\n figh = self.plot_sim_imgs(self.frqs_guess, figsize=self.figsize)\n\n if saving:\n figh.savefig(os.path.join(self.save_dir, \"raw_images.png\"))\n if not self.hold_figs_open:\n plt.close(figh)\n\n # plot frequency fits\n fighs, fig_names = self.plot_frequency_fits(figsize=self.figsize)\n for fh, fn in zip(fighs, fig_names):\n if saving:\n fh.savefig(os.path.join(self.save_dir, \"%s.png\" % fn))\n if not self.hold_figs_open:\n plt.close(fh)\n\n # plot power spectrum fits\n fighs, fig_names = self.plot_power_spectrum_fits(figsize=self.figsize)\n for fh, fn in zip(fighs, fig_names):\n if saving:\n fh.savefig(os.path.join(self.save_dir, \"%s.png\" % fn))\n if not self.hold_figs_open:\n plt.close(fh)\n\n # widefield power spectrum fit\n figh = plot_power_spectrum_fit(self.widefield_ft, self.otf,\n {'pixel_size': self.dx, 'wavelength': self.wavelength, 'na': self.na},\n self.pspec_params_wf, mask=self.mask_wf, figsize=self.figsize,\n ttl_str=\"Widefield power spectrum\")\n if saving:\n figh.savefig(os.path.join(self.save_dir, \"power_spectrum_widefield.png\"))\n if not self.hold_figs_open:\n plt.close(figh)\n\n # plot filters used in reconstruction\n fighs, fig_names = self.plot_reconstruction_diagnostics(figsize=self.figsize)\n for fh, fn in zip(fighs, fig_names):\n if saving:\n fh.savefig(os.path.join(self.save_dir, \"%s.png\" % fn))\n if not self.hold_figs_open:\n plt.close(fh)\n\n # plot reconstruction results\n fig = self.plot_reconstruction(figsize=self.figsize)\n if saving:\n fig.savefig(os.path.join(self.save_dir, \"sim_reconstruction.png\"), dpi=400)\n if not self.hold_figs_open:\n plt.close(fig)\n\n # plot otf\n fig = self.plot_otf(figsize=self.figsize)\n if saving:\n fig.savefig(os.path.join(self.save_dir, \"otf.png\"))\n if not self.hold_figs_open:\n plt.close(fig)\n\n tend = time.process_time()\n # print_tee(\"plotting results took %0.2fs\" % (tend - tstart), self.log_file)\n print(\"plotting results took %0.2fs\" % (tend - tstart))\n\n return figs, fig_names", "def getPlottableReactions(reactionSuite, observable='crossSection'):\n result = []\n for reactionList in reactionSuite.reactions, reactionSuite.sums.crossSectionSums, \\\n reactionSuite.sums.multiplicitySums, \\\n reactionSuite.fissionComponents, reactionSuite.productions:\n for r in reactionList:\n if hasattr(r, observable):\n result.append(r)\n return result", "def axes_contains(ax, obj_list):\n # Get plot elements\n elems = ax.get_children()\n\n # Loop over list of objects that should be in the plot\n contains_all = False\n for obj in obj_list:\n objtype, num_expected = obj\n num = 0\n for elem in elems:\n if isinstance(elem, objtype): num += 1\n if num != num_expected:\n return False\n\n # Return True if no problems found\n return True", "def which_patches(extent):\n # TODO check input\n ramin, ramax, decmin, decmax = extent\n p1 = which_patch(ramin, decmin) # lower left\n p2 = which_patch(ramax, decmin) # lower right\n p3 = which_patch(ramin, decmax) # upper left\n if not ((p1 >= 0) & (p2 >= 0) & (p3 >= 0)):\n patch_ids = []\n else:\n patch_ids = [range(y, y + p2 - p1 + 1) for y in range(p1, p3 + 9, 9)]\n return np.array(patch_ids).flatten()", "def print_parsed(specs):\n observed_types = set()\n for i in specs.values():\n observed_types.update(i['types'])\n observed_types = sorted(observed_types)\n\n s = ['# Observed types from the parsed document']\n s.append('TRACKTYPES = [')\n for i in observed_types:\n s.append(\" '{}',\".format(i))\n s.append(']')\n print('\\n'.join(s) + '\\n')\n\n data_types = specs['bigDataUrl']['types']\n\n s = ['# Tracks for which the definition specifies bigDataUrl']\n s.append('DATA_TRACKTYPES = [')\n for i in data_types:\n s.append(\" '{}',\".format(i))\n s.append(']')\n print('\\n'.join(s) + '\\n')\n print('param_defs = [')\n print()\n for k, v in sorted(specs.items()):\n print(\n (\n '''\n Param(\n name=\"{k}\",\n fmt={v[format]},\n types={v[types]},\n required={v[required]},\n validator=str),'''.format(**locals())\n )\n )", "def plot_all(best_results: BestResults,\n *args,\n **kwargs) -> plt.Figure:\n if isinstance(best_results, BestResultsOne):\n return plot_all_one(best_results, *args, **kwargs)\n elif isinstance(best_results, BestResultsTwo):\n return plot_all_two(best_results, *args, **kwargs)\n else:\n raise ValueError('best_results argument is of unknown type')", "def test_get_axes():\n fig, axs = plt.subplots()\n assert all(\n isinstance(ax, mpl.axes.Axes)\n for ax in prettypyplot.tools.get_axes(axs)\n )\n assert all(\n isinstance(ax, mpl.axes.Axes)\n for ax in prettypyplot.tools.get_axes(None)\n )\n with pytest.raises(TypeError):\n prettypyplot.tools.get_axes(fig)", "def get_renderables(y_hat, args):\n # len(y_hat.fit) == 1 means that we do not have hierarhcy\n if len(y_hat.fit) == 1:\n if args.from_flat_partition:\n return _renderables_from_flat_partition(y_hat, args)\n else:\n return _renderables_from_flat_primitives(y_hat, args)\n\n if args.from_fit:\n return _renderables_from_fit(y_hat, args)\n else:\n return _renderables_from_partition(y_hat, args)", "def plot(self, **kwargs):\n from ..plot.plotutil import PlotUtilities\n\n if not self.plottable:\n raise TypeError(\"Simulation level packages are not plottable\")\n\n axes = PlotUtilities._plot_package_helper(self, **kwargs)\n return axes", "def plot(self, ax: Axes):\n\n plotted_objects = Element.plot(self, ax)\n plotted_objects += plotting.plot_aperture(ax, self)\n\n if plot_blockers:\n plotted_objects += plotting.plot_blocker(ax, self, self.blocker_diameter)\n\n return plotted_objects", "def convert_plots(self, workdir, imgFormat):\n plotList = list()\n for (label, plot) in self._expectedPlots_globalAvg:\n plotList.append(plot)\n for (label, plot) in self._expectedPlots_Nino:\n plotList.append(plot)\n for (label, plot) in self._expectedPlots_transportDiags:\n plotList.append(plot)\n\n self._convert_plots(workdir, imgFormat, plotList )", "def scree_plots(t, ndim = []):\n total_dim = len(t.shape)\n if not ndim: # case with no input ndim\n for i in range(total_dim):\n ndim.append(t.shape[i])\n elif len(ndim) != total_dim: # case that input ndim does not agree with number of dimensions of the input tensor\n for i in range(total_dim):\n ndim.append(t.shape[i])\n else: # check whether the number in ndim is less than the size of that dimension\n for i in range(total_dim):\n if ndim[i] > t.shape[i]:\n ndim[i] = t.shape[i]\n \n scree = []\n for i in range(total_dim):\n t_unfold = unfold_axis(t, i)\n [ _, e, _ ] = fast_svd(np.matmul(t_unfold,np.transpose(t_unfold)),ndim[i],n_iter=15)\n e = np.sqrt(e)\n e = np.real(e)\n scree.append(e)\n\n return scree", "def spectators(self):\n return self._return_if('_spectators')", "def plot_all_subplot_trends(resolutions, temporal_resolution, temporal_decomposition, detrend, imagefolder = 'images/timeseries/SIC/',seaice_source='nsidc'):\n for n, temp_res, temp_decomp, dt in itertools.product(resolutions, temporal_resolution, temporal_decomposition, detrend):\n plot_subplot_trend(anomlous = 'anomalous' == temp_decomp, temporal_resolution = temp_res, spatial_resolution = n, detrend = dt == 'detrended',seaice_source=seaice_source)", "def seemlike_dimension(vols):\n count = 0\n params = VolCaps.PARAMS_MAPPER.values()\n sorted_vols_caps = [VolCaps.basic_cap_by_method\n (vols, param, sorted, reverse=True) for param in\n params]\n for sorted_dimension in sorted_vols_caps:\n max_cap = sorted_dimension[0]\n min_cap = sorted_dimension[1]\n curr_rsd = get_rsd(max_cap[0], min_cap[0])\n if curr_rsd < 0.1:\n objective = sum([max_cap[0], min_cap[0]]) / 2\n yield (objective, max_cap[1], min_cap[1])\n count += 1\n else:\n yield False\n assert not count, \"There aren't seem like dimensions at all\"", "def getAllWidgets(self):\n \n visualisations = Visualisation.objects.filter(dataSource=self)\n widgets = []\n for vis in visualisations:\n widgets.append(vis.getWidget())\n return widgets", "def plot_explorer_panels(self, param_val, photonnumber, initial_index, final_index, qbt_index, osc_index):\n def fig_ax(index):\n return fig, axes_list_flattened[index]\n\n param_index = np.searchsorted(self.param_vals, param_val)\n param_val = self.param_vals[param_index]\n\n initial_bare = self.sweep.lookup.bare_index(initial_index, param_index)\n final_bare = self.sweep.lookup.bare_index(final_index, param_index)\n energy_ground = self.sweep.lookup.energy_dressed_index(0, param_index)\n energy_initial = self.sweep.lookup.energy_dressed_index(initial_index, param_index) - energy_ground\n energy_final = self.sweep.lookup.energy_dressed_index(final_index, param_index) - energy_ground\n qbt_subsys = self.sweep.hilbertspace[qbt_index]\n\n nrows = 3\n ncols = 2\n fig, axs = plt.subplots(ncols=ncols, nrows=nrows, figsize=self.figsize)\n axes_list_flattened = [elem for sublist in axs for elem in sublist]\n\n # Panel 1 ----------------------------------\n panels.display_bare_spectrum(self.sweep, qbt_subsys, param_val, fig_ax(0))\n\n # Panels 2 and 6----------------------------\n if type(qbt_subsys).__name__ in ['Transmon', 'Fluxonium']: # do not plot wavefunctions if multi-dimensional\n panels.display_bare_wavefunctions(self.sweep, qbt_subsys, param_val, fig_ax(1))\n panels.display_charge_matrixelems(self.sweep, initial_bare, qbt_subsys, param_val, fig_ax(5))\n\n # Panel 3 ----------------------------------\n panels.display_dressed_spectrum(self.sweep, initial_bare, final_bare, energy_initial, energy_final, param_val,\n fig_ax(2))\n\n # Panel 4 ----------------------------------\n panels.display_n_photon_qubit_transitions(self.sweep, photonnumber, initial_bare, param_val, fig_ax(3))\n\n # Panel 5 ----------------------------------\n panels.display_chi_01(self.sweep, qbt_index, osc_index, param_index, fig_ax(4))\n\n fig.tight_layout()\n return fig, axs", "def dims_list(self):\n return [n for n in self.schema.names if n in self.dims]", "def _get_extent_axes(self, x):\n if not hasattr(self, 'get_subplotspec'):\n return [self]\n y = ('y' if x == 'x' else 'x')\n idx = (0 if x == 'x' else 1)\n argfunc = (np.argmax if x == 'x' else np.argmin)\n irange = self._range_gridspec(x)\n axs = [ax for ax in self.figure._axes_main\n if ax._range_gridspec(x) == irange]\n if not axs:\n return [self]\n else:\n pax = axs.pop(argfunc([ax._range_gridspec(y)[idx] for ax in axs]))\n return [pax, *axs]", "def custom_graphs(self) -> List[Component]:\n graphs = []\n # TODO: Figure this out\n for i, go_data in enumerate(self.config.overview_graphs):\n groupby = go_data.pop('groupby', None)\n agg = go_data.pop('agg', None)\n if groupby and agg:\n data = getattr(self.summary.groupby(groupby), agg)()\n else:\n data = self.summary\n graphs.append(\n dbc.Row(\n dbc.Col(\n dcc.Graph(\n id=f'graph_{i}',\n figure=self.graph(data, go_data.pop('graph_type'), **go_data)\n )\n )\n )\n )\n return graphs", "def plot(self):\n fig, axes = plt.subplots(math.ceil(len(self.plots) / self.col_wrap), self.col_wrap)\n\n for ps, ax in zip(self.plots, axes.flatten()):\n for p in ps:\n if p.x is not None and p.y is not None:\n p.method(x=p.x, y=p.y, *p.args, ax=ax, **p.kwargs)\n else:\n p.method(*p.args, ax=ax, **p.kwargs)\n\n return fig, axes", "def find_groups(self, mesh):\n grps = []\n dim = mesh.give_dim()\n if dim:\n ctypes = self._dct[dim]\n grps = self._exp.find_groups_from_ctypes(mesh, ctypes)\n log.debug(\"GroupExplorator.find_groups for mesh %s returns %s with dim %s and dct %s\", mesh, grps, dim, self._dct)\n return grps", "def nested(self, format_callback=None):\n seen = set()\n roots = []\n for root in self.edges.get(None, ()):\n roots.extend(self._nested(root, seen, format_callback))\n return roots", "def get_prominent_figures():\n result = []\n handles = get_all_handles()\n\n for handle in handles:\n result.append(handle)\n\n return jsonify({\n 'handles': result\n })", "def save_figures(self):\n cls = self.__class__\n plotted_canvas = []\n\n for result in self._get_output_objects_info():\n result_objects_list = self._get_results_from_all_files(result)\n\n canvas = cls.plotting_function(\n result_objects_list,\n labels=self.labels,\n plot_config=self.json_config.get(result.name),\n **self.plotting_kwargs,\n )\n\n plotted_canvas.append(canvas)\n\n plot.save_canvas(\n result.with_input(self.input_arguments),\n canvas,\n self.output,\n self.suffix,\n )\n return plotted_canvas", "def get_plot(self, plot_format):\n return subplots(plot_format=plot_format)", "def convert_plots(self, workdir, imgFormat):\n my_plot_list = list()\n for i in range(len(self._suffix)):\n my_plot_list.extend(eval('self._expectedPlots_{0}'.format(self._suffix[i])))\n\n self._convert_plots(workdir, imgFormat, my_plot_list)", "def convert_plots(self, workdir, imgFormat):\n my_plot_list = list()\n for i in range(len(self._labels)):\n my_plot_list.extend(eval('self._expectedPlots_{0}'.format(self._labels[i])))\n\n self._convert_plots(workdir, imgFormat, my_plot_list)", "def convert_plots(self, workdir, imgFormat):\n my_plot_list = list()\n for i in range(len(self._labels)):\n my_plot_list.extend(eval('self._expectedPlots_{0}'.format(self._labels[i])))\n\n self._convert_plots(workdir, imgFormat, my_plot_list)", "def get_standard_plots(\n experiment: Experiment, generation_strategy: Optional[GenerationStrategy]\n) -> List[go.Figure]:\n\n objective = not_none(experiment.optimization_config).objective\n if isinstance(objective, MultiObjective):\n logger.warning(\n \"get_standard_plots does not currently support MultiObjective \"\n \"optimization experiments. Returning an empty list.\"\n )\n return []\n if isinstance(objective, ScalarizedObjective):\n logger.warning(\n \"get_standard_plots does not currently support ScalarizedObjective \"\n \"optimization experiments. Returning an empty list.\"\n )\n return []\n\n if experiment.fetch_data().df.empty:\n logger.info(f\"Experiment {experiment} does not yet have data, nothing to plot.\")\n return []\n\n output_plot_list = []\n output_plot_list.append(\n _get_objective_trace_plot(\n experiment=experiment,\n metric_name=not_none(experiment.optimization_config).objective.metric.name,\n # TODO: Adjust `model_transitions` to case where custom trials are present\n # and generation strategy does not start right away.\n model_transitions=not_none(generation_strategy).model_transitions\n if generation_strategy is not None\n else [],\n optimization_direction=(\n \"minimize\"\n if not_none(experiment.optimization_config).objective.minimize\n else \"maximize\"\n ),\n )\n )\n\n # Objective vs. parameter plot requires a `Model`, so add it only if model\n # is alrady available. In cases where initially custom trials are attached,\n # model might not yet be set on the generation strategy.\n if generation_strategy and generation_strategy.model:\n model = not_none(not_none(generation_strategy).model)\n try:\n output_plot_list.append(\n _get_objective_v_param_plot(\n search_space=experiment.search_space,\n model=model,\n metric_name=not_none(\n experiment.optimization_config\n ).objective.metric.name,\n trials=experiment.trials,\n )\n )\n output_plot_list.append(_get_cross_validation_plot(model))\n except NotImplementedError:\n # Model does not implement `predict` method.\n pass\n\n return [plot for plot in output_plot_list if plot is not None]", "def test_partially_one(self):\n setups = self.get_setup().decompress([\"dimensions.species_id\"])\n assert isinstance(setups, PlotSetupGroup)\n assert all(isinstance(setup, PlotSetup) for setup in setups)\n assert len(setups) == 2\n res = {\n (\n s.panels.collect_equal(\"dimensions\").variable,\n s.panels.collect_equal(\"dimensions\").species_id,\n s.panels.collect_equal(\"dimensions\").time,\n )\n for s in setups\n }\n sol = {\n ((\"dry_deposition\", \"wet_deposition\"), 1, (1, 2, 3)),\n ((\"dry_deposition\", \"wet_deposition\"), 2, (1, 2, 3)),\n }\n assert res == sol", "def plot_spectrum(spec, config):\n for v in config.getpars():\n plot.plot_projection(spec, v)", "def _GetSpecsForSchema(self, schema):\n specs = []\n settings = Gio.Settings.new(schema)\n for action in settings.keys():\n # Handle custom keybindings specially.\n if action == 'custom-list':\n custom_keys = settings.get_strv(action)\n if custom_keys:\n specs += self._GetSpecsForCustomBindingSchema(schema,\n custom_keys)\n elif settings.get_value(action).is_of_type(self._keybinding_type):\n keybindings = settings.get_strv(action)\n if keybindings:\n specs += [self._BuildSpec(schema, action, binding)\n for binding in keybindings if binding]\n return specs", "def plot(self) -> List[matplotlib.figure.Figure]:\n figs = []\n # Figure 1: Position\n fig = self.plot_kine_var(1, self.trial_name, ('X (mm)', 'Y (mm)', 'Z (mm)'), self.prev_filled[0],\n self.smoothed[0], self.filled[0], self.sfs[0])\n figs.append(fig)\n\n # Figure 2: Orientation\n fig = self.plot_kine_var(2, self.trial_name, ('Flex/Ext (deg)', 'Lat Flex (deg)', 'Axial (deg)'),\n self.prev_filled[1], self.smoothed[1], self.filled[1], self.sfs[1])\n figs.append(fig)\n\n return figs", "def get_data(self):\n for i, plot in enumerate(self.plots):\n for j, trace in enumerate(plot):\n self.plots[i][j][\"y\"] = [\n self.function_mapper[x](i) if x in self.function_mapper else x\n for x in trace[\"y\"]\n ]\n return self.plots", "def extract_figures(self, plot_dir, remove=False):\r\n if not self._engine:\r\n msg = \"Session is not open\"\r\n raise Oct2PyError(msg)\r\n figures = self._engine.extract_figures(plot_dir, remove)\r\n return figures", "def report_plots(logger, iteration=0):\n # type: (Logger, int) -> ()\n\n # report 3d surface\n surface = np.random.randint(10, size=(10, 10))\n logger.report_surface(\n \"example_surface\",\n \"series1\",\n iteration=iteration,\n matrix=surface,\n xaxis=\"title X\",\n yaxis=\"title Y\",\n zaxis=\"title Z\",\n )\n\n # report 3d scatter plot\n scatter3d = np.random.randint(10, size=(10, 3))\n logger.report_scatter3d(\n \"example_scatter_3d\",\n \"series_xyz\",\n iteration=iteration,\n scatter=scatter3d,\n xaxis=\"title x\",\n yaxis=\"title y\",\n zaxis=\"title z\",\n )", "def get_components_drawables(self):\n # print self.component_list\n print len(self.component_list)\n for c in self.component_list:\n return c.get_drawables()", "def matched_sub_graph_instances(self, graph: Graph):\n if self.replacement_desc.match_kind == 'points': # instance is specified with lists of start/end nodes\n match = self._match_sub_graph_for_points(graph)\n if match is not None:\n yield match\n elif self.replacement_desc.match_kind == 'scope': # instance is specified with a node name pattern\n for instance in self.replacement_desc.sub_graph_instances():\n match = self._match_sub_graph_for_scope(graph, instance)\n if match is not None:\n yield match\n else:\n raise Error('Unsupported match kind \"{}\". Match kinds \"points\" or \"scope\" are supported only. '.format(\n self.replacement_desc.match_kind) +\n refer_to_faq_msg(35))", "def expand(cls, specs, **kwargs):\n specs = Search.list_wrap(specs)\n\n params = []\n for spec in specs:\n params.extend(cls._expand_spec(spec, **kwargs))\n return params", "def generate_plots(rspecs, datroot, pltroot, labels,\n verbose=False, subplot=False):\n\n if subplot:\n n = len(labels)\n plt.figure(figsize=(4*n,4))\n else:\n plt.figure(figsize=(6,4))\n\n if verbose:\n print 'Generating {} plots...'.format(len(rspecs))\n print 'First plot will hang a little longer'\n\n # Make a plot for each region\n for i in xrange(len(rspecs)):\n # Plot line/points for each label\n for flab, j in zip(labels, xrange(n)):\n dat_fname = '{0}_{1:02d}_band_{2}.dat'.format(datroot, i+1, flab)\n a = np.loadtxt(dat_fname)\n if subplot:\n plt.subplot(1, n, j+1) # j indexes labels\n plt.plot(a[:,0], a[:,1], '-o') # Use default color cycle\n plt.title('Region {:g}, band {}'.format(i+1, flab))\n fplot('Radial dist. (?)', 'Intensity? (?)')\n else:\n plt.plot(a[:,0], a[:,1], '-o') # Use default color cycle\n # Format overall plot\n if not subplot:\n fplot('Radial dist. (?)', 'Intensity? (?)')\n plt.title('Region %g' % (i+1))\n plt.legend(labels, loc='best')\n plt.tight_layout()\n plt.savefig('{0}_{1:02d}.png'.format(pltroot, i+1), dpi=150)\n plt.clf()\n if verbose:\n print 'Saved: {0}_{1:02d}.png'.format(pltroot, i+1)", "def all_spectra(self) -> List[Spectrum]:\n all_spec = []\n for ann_i in range(self._num_ann):\n # If there is only one spectrum per annulus then get_spectra will just return an object\n ann_spec = self.get_spectra(ann_i)\n if isinstance(ann_spec, Spectrum):\n ann_spec = [ann_spec]\n\n all_spec += ann_spec\n\n return all_spec", "def test_list_figures_success(\n self,\n mock_list_figures,\n ):\n project_id = util.MOCK_UUID_1\n deployment_id = util.MOCK_UUID_1\n monitoring_id = util.MOCK_UUID_1\n\n rv = TEST_CLIENT.get(\n f\"/projects/{project_id}/deployments/{deployment_id}/monitorings/{monitoring_id}/figures\"\n )\n result = rv.json()\n self.assertIsInstance(result, list)\n self.assertEqual(rv.status_code, 200)\n\n mock_list_figures.assert_any_call(\n deployment_id=deployment_id, monitoring_id=monitoring_id\n )", "def make_plots(self,indices=None,hardcopy=False,hardcopydir='.',hardcopyprefix='',hardcopytype='png'):\n\t\tfor (i,E) in enumerate(self.experiments):\n\t\t\tif(indices==None) or (i in indices):\n\t\t\t\tE.show_plot(hardcopy,hardcopydir,hardcopyprefix,hardcopytype)", "def _generate_all_charts(spec, input_data):\n\n def _generate_chart(_, data_q, graph):\n \"\"\"Generates the chart.\n \"\"\"\n\n logs = list()\n\n logging.info(\" Generating the chart '{0}' ...\".\n format(graph.get(\"title\", \"\")))\n logs.append((\"INFO\", \" Generating the chart '{0}' ...\".\n format(graph.get(\"title\", \"\"))))\n\n job_name = graph[\"data\"].keys()[0]\n\n csv_tbl = list()\n res = list()\n\n # Transform the data\n logs.append((\"INFO\", \" Creating the data set for the {0} '{1}'.\".\n format(graph.get(\"type\", \"\"), graph.get(\"title\", \"\"))))\n data = input_data.filter_data(graph, continue_on_error=True)\n if data is None:\n logging.error(\"No data.\")\n return\n\n chart_data = dict()\n chart_tags = dict()\n for job, job_data in data.iteritems():\n if job != job_name:\n continue\n for index, bld in job_data.items():\n for test_name, test in bld.items():\n if chart_data.get(test_name, None) is None:\n chart_data[test_name] = OrderedDict()\n try:\n chart_data[test_name][int(index)] = \\\n test[\"result\"][\"receive-rate\"]\n chart_tags[test_name] = test.get(\"tags\", None)\n except (KeyError, TypeError):\n pass\n\n # Add items to the csv table:\n for tst_name, tst_data in chart_data.items():\n tst_lst = list()\n for bld in builds_dict[job_name]:\n itm = tst_data.get(int(bld), '')\n if not isinstance(itm, str):\n itm = itm.avg\n tst_lst.append(str(itm))\n csv_tbl.append(\"{0},\".format(tst_name) + \",\".join(tst_lst) + '\\n')\n\n # Generate traces:\n traces = list()\n index = 0\n groups = graph.get(\"groups\", None)\n visibility = list()\n\n if groups:\n for group in groups:\n visible = list()\n for tag in group:\n for test_name, test_data in chart_data.items():\n if not test_data:\n logs.append((\"WARNING\",\n \"No data for the test '{0}'\".\n format(test_name)))\n continue\n if tag in chart_tags[test_name]:\n message = \"index: {index}, test: {test}\".format(\n index=index, test=test_name)\n test_name = test_name.split('.')[-1]\n try:\n trace, rslt = _generate_trending_traces(\n test_data,\n job_name=job_name,\n build_info=build_info,\n name='-'.join(test_name.split('-')[2:-1]),\n color=COLORS[index])\n except IndexError:\n message = \"Out of colors: {}\".format(message)\n logs.append((\"ERROR\", message))\n logging.error(message)\n index += 1\n continue\n traces.extend(trace)\n visible.extend([True for _ in range(len(trace))])\n res.append(rslt)\n index += 1\n break\n visibility.append(visible)\n else:\n for test_name, test_data in chart_data.items():\n if not test_data:\n logs.append((\"WARNING\", \"No data for the test '{0}'\".\n format(test_name)))\n continue\n message = \"index: {index}, test: {test}\".format(\n index=index, test=test_name)\n test_name = test_name.split('.')[-1]\n try:\n trace, rslt = _generate_trending_traces(\n test_data,\n job_name=job_name,\n build_info=build_info,\n name='-'.join(test_name.split('-')[2:-1]),\n color=COLORS[index])\n except IndexError:\n message = \"Out of colors: {}\".format(message)\n logs.append((\"ERROR\", message))\n logging.error(message)\n index += 1\n continue\n traces.extend(trace)\n res.append(rslt)\n index += 1\n\n if traces:\n # Generate the chart:\n try:\n layout = deepcopy(graph[\"layout\"])\n except KeyError as err:\n logging.error(\"Finished with error: No layout defined\")\n logging.error(repr(err))\n return\n if groups:\n show = list()\n for i in range(len(visibility)):\n visible = list()\n for r in range(len(visibility)):\n for _ in range(len(visibility[r])):\n visible.append(i == r)\n show.append(visible)\n\n buttons = list()\n buttons.append(dict(\n label=\"All\",\n method=\"update\",\n args=[{\"visible\": [True for _ in range(len(show[0]))]}, ]\n ))\n for i in range(len(groups)):\n try:\n label = graph[\"group-names\"][i]\n except (IndexError, KeyError):\n label = \"Group {num}\".format(num=i + 1)\n buttons.append(dict(\n label=label,\n method=\"update\",\n args=[{\"visible\": show[i]}, ]\n ))\n\n layout['updatemenus'] = list([\n dict(\n active=0,\n type=\"dropdown\",\n direction=\"down\",\n xanchor=\"left\",\n yanchor=\"bottom\",\n x=-0.12,\n y=1.0,\n buttons=buttons\n )\n ])\n\n name_file = \"{0}-{1}{2}\".format(spec.cpta[\"output-file\"],\n graph[\"output-file-name\"],\n spec.cpta[\"output-file-type\"])\n\n logs.append((\"INFO\", \" Writing the file '{0}' ...\".\n format(name_file)))\n plpl = plgo.Figure(data=traces, layout=layout)\n try:\n ploff.plot(plpl, show_link=False, auto_open=False,\n filename=name_file)\n except plerr.PlotlyEmptyDataError:\n logs.append((\"WARNING\", \"No data for the plot. Skipped.\"))\n\n data_out = {\n \"job_name\": job_name,\n \"csv_table\": csv_tbl,\n \"results\": res,\n \"logs\": logs\n }\n data_q.put(data_out)\n\n builds_dict = dict()\n for job in spec.input[\"builds\"].keys():\n if builds_dict.get(job, None) is None:\n builds_dict[job] = list()\n for build in spec.input[\"builds\"][job]:\n status = build[\"status\"]\n if status != \"failed\" and status != \"not found\" and \\\n status != \"removed\":\n builds_dict[job].append(str(build[\"build\"]))\n\n # Create \"build ID\": \"date\" dict:\n build_info = dict()\n tb_tbl = spec.environment.get(\"testbeds\", None)\n for job_name, job_data in builds_dict.items():\n if build_info.get(job_name, None) is None:\n build_info[job_name] = OrderedDict()\n for build in job_data:\n testbed = \"\"\n tb_ip = input_data.metadata(job_name, build).get(\"testbed\", \"\")\n if tb_ip and tb_tbl:\n testbed = tb_tbl.get(tb_ip, \"\")\n build_info[job_name][build] = (\n input_data.metadata(job_name, build).get(\"generated\", \"\"),\n input_data.metadata(job_name, build).get(\"version\", \"\"),\n testbed\n )\n\n work_queue = multiprocessing.JoinableQueue()\n manager = multiprocessing.Manager()\n data_queue = manager.Queue()\n cpus = multiprocessing.cpu_count()\n\n workers = list()\n for cpu in range(cpus):\n worker = Worker(work_queue,\n data_queue,\n _generate_chart)\n worker.daemon = True\n worker.start()\n workers.append(worker)\n os.system(\"taskset -p -c {0} {1} > /dev/null 2>&1\".\n format(cpu, worker.pid))\n\n for chart in spec.cpta[\"plots\"]:\n work_queue.put((chart, ))\n work_queue.join()\n\n anomaly_classifications = list()\n\n # Create the header:\n csv_tables = dict()\n for job_name in builds_dict.keys():\n if csv_tables.get(job_name, None) is None:\n csv_tables[job_name] = list()\n header = \"Build Number:,\" + \",\".join(builds_dict[job_name]) + '\\n'\n csv_tables[job_name].append(header)\n build_dates = [x[0] for x in build_info[job_name].values()]\n header = \"Build Date:,\" + \",\".join(build_dates) + '\\n'\n csv_tables[job_name].append(header)\n versions = [x[1] for x in build_info[job_name].values()]\n header = \"Version:,\" + \",\".join(versions) + '\\n'\n csv_tables[job_name].append(header)\n\n while not data_queue.empty():\n result = data_queue.get()\n\n anomaly_classifications.extend(result[\"results\"])\n csv_tables[result[\"job_name\"]].extend(result[\"csv_table\"])\n\n for item in result[\"logs\"]:\n if item[0] == \"INFO\":\n logging.info(item[1])\n elif item[0] == \"ERROR\":\n logging.error(item[1])\n elif item[0] == \"DEBUG\":\n logging.debug(item[1])\n elif item[0] == \"CRITICAL\":\n logging.critical(item[1])\n elif item[0] == \"WARNING\":\n logging.warning(item[1])\n\n del data_queue\n\n # Terminate all workers\n for worker in workers:\n worker.terminate()\n worker.join()\n\n # Write the tables:\n for job_name, csv_table in csv_tables.items():\n file_name = spec.cpta[\"output-file\"] + \"-\" + job_name + \"-trending\"\n with open(\"{0}.csv\".format(file_name), 'w') as file_handler:\n file_handler.writelines(csv_table)\n\n txt_table = None\n with open(\"{0}.csv\".format(file_name), 'rb') as csv_file:\n csv_content = csv.reader(csv_file, delimiter=',', quotechar='\"')\n line_nr = 0\n for row in csv_content:\n if txt_table is None:\n txt_table = prettytable.PrettyTable(row)\n else:\n if line_nr > 1:\n for idx, item in enumerate(row):\n try:\n row[idx] = str(round(float(item) / 1000000, 2))\n except ValueError:\n pass\n try:\n txt_table.add_row(row)\n except Exception as err:\n logging.warning(\"Error occurred while generating TXT \"\n \"table:\\n{0}\".format(err))\n line_nr += 1\n txt_table.align[\"Build Number:\"] = \"l\"\n with open(\"{0}.txt\".format(file_name), \"w\") as txt_file:\n txt_file.write(str(txt_table))\n\n # Evaluate result:\n if anomaly_classifications:\n result = \"PASS\"\n for classification in anomaly_classifications:\n if classification == \"regression\" or classification == \"outlier\":\n result = \"FAIL\"\n break\n else:\n result = \"FAIL\"\n\n logging.info(\"Partial results: {0}\".format(anomaly_classifications))\n logging.info(\"Result: {0}\".format(result))\n\n return result", "def getSetup(figsize, gridd, multz=None, empts=None):\n sns.set(style=\"whitegrid\", font_scale=0.7, color_codes=True, palette=\"colorblind\", rc={\"grid.linestyle\": \"dotted\", \"axes.linewidth\": 0.6})\n\n # create empty list if empts isn't specified\n if empts is None:\n empts = []\n\n if multz is None:\n multz = dict()\n\n # Setup plotting space and grid\n f = plt.figure(figsize=figsize, constrained_layout=True)\n gs1 = gridspec.GridSpec(*gridd, figure=f)\n\n # Get list of axis objects\n x = 0\n ax = list()\n while x < gridd[0] * gridd[1]:\n if x not in empts and x not in multz.keys(): # If this is just a normal subplot\n ax.append(f.add_subplot(gs1[x]))\n elif x in multz.keys(): # If this is a subplot that spans grid elements\n ax.append(f.add_subplot(gs1[x: x + multz[x] + 1]))\n x += multz[x]\n x += 1\n\n return (ax, f)", "def descendants(self) -> List[Plugin]:\n raise NotImplementedError()", "def _plot_evoked(evoked, picks, exclude, unit, show, ylim, proj, xlim, hline,\n units, scalings, titles, axes, plot_type, cmap=None,\n gfp=False, window_title=None, spatial_colors=False,\n selectable=True, zorder='unsorted',\n noise_cov=None, colorbar=True, mask=None, mask_style=None,\n mask_cmap=None, mask_alpha=.25, time_unit='s',\n show_names=False, group_by=None, sphere=None):\n import matplotlib.pyplot as plt\n\n # For evoked.plot_image ...\n # First input checks for group_by and axes if any of them is not None.\n # Either both must be dicts, or neither.\n # If the former, the two dicts provide picks and axes to plot them to.\n # Then, we call this function recursively for each entry in `group_by`.\n if plot_type == \"image\" and isinstance(group_by, dict):\n if axes is None:\n axes = dict()\n for sel in group_by:\n plt.figure()\n axes[sel] = plt.axes()\n if not isinstance(axes, dict):\n raise ValueError(\"If `group_by` is a dict, `axes` must be \"\n \"a dict of axes or None.\")\n _validate_if_list_of_axes(list(axes.values()))\n remove_xlabels = any([_is_last_row(ax) for ax in axes.values()])\n for sel in group_by: # ... we loop over selections\n if sel not in axes:\n raise ValueError(sel + \" present in `group_by`, but not \"\n \"found in `axes`\")\n ax = axes[sel]\n # the unwieldy dict comp below defaults the title to the sel\n titles = ({channel_type(evoked.info, idx): sel\n for idx in group_by[sel]} if titles is None else titles)\n _plot_evoked(evoked, group_by[sel], exclude, unit, show, ylim,\n proj, xlim, hline, units, scalings, titles,\n ax, plot_type, cmap=cmap, gfp=gfp,\n window_title=window_title,\n selectable=selectable, noise_cov=noise_cov,\n colorbar=colorbar, mask=mask,\n mask_style=mask_style, mask_cmap=mask_cmap,\n mask_alpha=mask_alpha, time_unit=time_unit,\n show_names=show_names,\n sphere=sphere)\n if remove_xlabels and not _is_last_row(ax):\n ax.set_xticklabels([])\n ax.set_xlabel(\"\")\n ims = [ax.images[0] for ax in axes.values()]\n clims = np.array([im.get_clim() for im in ims])\n min, max = clims.min(), clims.max()\n for im in ims:\n im.set_clim(min, max)\n figs = [ax.get_figure() for ax in axes.values()]\n if len(set(figs)) == 1:\n return figs[0]\n else:\n return figs\n elif isinstance(axes, dict):\n raise ValueError(\"If `group_by` is not a dict, \"\n \"`axes` must not be a dict either.\")\n\n time_unit, times = _check_time_unit(time_unit, evoked.times)\n evoked = evoked.copy() # we modify info\n info = evoked.info\n if axes is not None and proj == 'interactive':\n raise RuntimeError('Currently only single axis figures are supported'\n ' for interactive SSP selection.')\n if isinstance(gfp, str) and gfp != 'only':\n raise ValueError('gfp must be boolean or \"only\". Got %s' % gfp)\n\n scalings = _handle_default('scalings', scalings)\n titles = _handle_default('titles', titles)\n units = _handle_default('units', units)\n\n picks = _picks_to_idx(info, picks, none='all', exclude=())\n if len(picks) != len(set(picks)):\n raise ValueError(\"`picks` are not unique. Please remove duplicates.\")\n\n bad_ch_idx = [info['ch_names'].index(ch) for ch in info['bads']\n if ch in info['ch_names']]\n if len(exclude) > 0:\n if isinstance(exclude, str) and exclude == 'bads':\n exclude = bad_ch_idx\n elif (isinstance(exclude, list) and\n all(isinstance(ch, str) for ch in exclude)):\n exclude = [info['ch_names'].index(ch) for ch in exclude]\n else:\n raise ValueError(\n 'exclude has to be a list of channel names or \"bads\"')\n\n picks = np.array([pick for pick in picks if pick not in exclude])\n\n types = np.array(_get_channel_types(info, picks), str)\n ch_types_used = list()\n for this_type in _VALID_CHANNEL_TYPES:\n if this_type in types:\n ch_types_used.append(this_type)\n\n fig = None\n if axes is None:\n fig, axes = plt.subplots(len(ch_types_used), 1)\n fig.subplots_adjust(left=0.125, bottom=0.1, right=0.975, top=0.92,\n hspace=0.63)\n if isinstance(axes, plt.Axes):\n axes = [axes]\n fig.set_size_inches(6.4, 2 + len(axes))\n\n if isinstance(axes, plt.Axes):\n axes = [axes]\n elif isinstance(axes, np.ndarray):\n axes = list(axes)\n\n if fig is None:\n fig = axes[0].get_figure()\n\n if window_title is not None:\n _set_window_title(fig, window_title)\n\n if len(axes) != len(ch_types_used):\n raise ValueError('Number of axes (%g) must match number of channel '\n 'types (%d: %s)' % (len(axes), len(ch_types_used),\n sorted(ch_types_used)))\n _check_option('proj', proj, (True, False, 'interactive', 'reconstruct'))\n noise_cov = _check_cov(noise_cov, info)\n if proj == 'reconstruct' and noise_cov is not None:\n raise ValueError('Cannot use proj=\"reconstruct\" when noise_cov is not '\n 'None')\n projector, whitened_ch_names = _setup_plot_projector(\n info, noise_cov, proj=proj is True, nave=evoked.nave)\n if len(whitened_ch_names) > 0:\n unit = False\n if projector is not None:\n evoked.data[:] = np.dot(projector, evoked.data)\n if proj == 'reconstruct':\n evoked = evoked._reconstruct_proj()\n\n if plot_type == 'butterfly':\n _plot_lines(evoked.data, info, picks, fig, axes, spatial_colors, unit,\n units, scalings, hline, gfp, types, zorder, xlim, ylim,\n times, bad_ch_idx, titles, ch_types_used, selectable,\n False, line_alpha=1., nave=evoked.nave,\n time_unit=time_unit, sphere=sphere)\n plt.setp(axes, xlabel='Time (%s)' % time_unit)\n\n elif plot_type == 'image':\n for ai, (ax, this_type) in enumerate(zip(axes, ch_types_used)):\n use_nave = evoked.nave if ai == 0 else None\n this_picks = list(picks[types == this_type])\n _plot_image(evoked.data, ax, this_type, this_picks, cmap, unit,\n units, scalings, times, xlim, ylim, titles,\n colorbar=colorbar, mask=mask, mask_style=mask_style,\n mask_cmap=mask_cmap, mask_alpha=mask_alpha,\n nave=use_nave, time_unit=time_unit,\n show_names=show_names, ch_names=evoked.ch_names)\n if proj == 'interactive':\n _check_delayed_ssp(evoked)\n params = dict(evoked=evoked, fig=fig, projs=info['projs'], axes=axes,\n types=types, units=units, scalings=scalings, unit=unit,\n ch_types_used=ch_types_used, picks=picks,\n plot_update_proj_callback=_plot_update_evoked,\n plot_type=plot_type)\n _draw_proj_checkbox(None, params)\n\n plt.setp(fig.axes[:len(ch_types_used) - 1], xlabel='')\n fig.canvas.draw() # for axes plots update axes.\n plt_show(show)\n return fig", "def list_spectrographs(self) -> None:\n for key, item in self.spectrographs.items():\n item.summary()\n print(\"\\n\")", "def stack_plot(spec_list, offset = False, alpha=1.):\r\n\r\n import matplotlib.pyplot as plt\r\n\r\n offset_val = 0.\r\n for spec in spec_list:\r\n dat, errdat = read_spectrum(spec)\r\n plt.plot(dat['wave'], dat['flux']+offset_val, label = spec, alpha=alpha)\r\n if offset:\r\n offset_val -= np.median(dat['flux'])\r\n print spec\r\n plt.legend()\r\n plt.show()", "def plot(self) -> List[matplotlib.figure.Figure]:\n figs = []\n\n title_prefix = self.trial_name + ' ' + self.segment_name + ' '\n # Figure 1, position in 3 subplots\n pos_fig_sub = self.plot_subplots(self.fig_num_start, title_prefix + 'Position (mm)', self.pos_raw,\n self.pos_smooth, self.pos_legend)\n figs.append(pos_fig_sub)\n\n # Figure 2, orientation in 3 subplots\n eul_fig_sub = self.plot_subplots(self.fig_num_start + 1, title_prefix + 'Euler Angles (deg)', self.eul_raw,\n self.eul_smooth, self.euler_legend)\n figs.append(eul_fig_sub)\n\n # Figure 3, velocity in 3 subplots\n vel_fig_sub = self.plot_subplots_vel(self.fig_num_start + 2, title_prefix + 'Velocity', 'Velocity (mm/s)',\n self.vel)\n figs.append(vel_fig_sub)\n\n # Figure 4, angular velocity in 3 subplots\n ang_vel_fig_sub = self.plot_subplots_vel(self.fig_num_start + 3, title_prefix + 'Angular Velocity',\n 'Angular Velocity (deg/s)', self.ang_vel)\n figs.append(ang_vel_fig_sub)\n\n # Figure 5, position in one axes\n pos_fig_one = self.plot_one_axes(self.fig_num_start + 4, title_prefix + 'Position', 'Position (mm)',\n self.pos_raw, self.pos_smooth, self.pos_legend)\n figs.append(pos_fig_one)\n\n # Figure 6, orientation in one axes\n eul_fig_one = self.plot_one_axes(self.fig_num_start + 5, title_prefix + 'Euler Angles', 'Angle (deg)',\n self.eul_raw, self.eul_smooth, self.euler_legend)\n figs.append(eul_fig_one)\n\n # Figure 7, velocity in one axes\n vel_fig_one = self.plot_one_axes_vel(self.fig_num_start + 6, title_prefix + 'Velocity', 'Velocity (mm/s)',\n self.vel, self.pos_legend)\n figs.append(vel_fig_one)\n\n # Figure 8, angular velocity in one axes\n ang_vel_fig_one = self.plot_one_axes_vel(self.fig_num_start + 7, title_prefix + 'Angular Velocity',\n 'Angular Velocity (deg/s)', self.ang_vel, self.pos_legend)\n figs.append(ang_vel_fig_one)\n\n return figs", "def _get_all_spectra(self):\n pass", "def get_plot_options(self):\n plot_options = []\n # Get pumping rate plot options\n op = self.pumprate.get_plot_options()\n if op['visible']:\n plot_options.append(op)\n # Get associated data options\n for i in range(self.well_count()):\n well_options = self.wells[i].get_plot_options()\n plot_options.extend(well_options)\n return(plot_options)", "def plot_sgls(\n mask_exp,\n depths,\n mask_tag_filt,\n sgls,\n mask_sgls_filt,\n Az_g_hf,\n idx_start=None,\n idx_end=None,\n path_plot=None,\n linewidth=0.5,\n leg_bbox=(1.23, 1),\n clip_x=False,\n):\n import matplotlib.pyplot as plt\n from matplotlib.ticker import FuncFormatter, ScalarFormatter\n import numpy\n\n from . import plotutils\n\n # Create experiment mask from specified start/end indices if passed\n if idx_start or idx_end:\n mask_exp = numpy.zeros(len(depths), dtype=bool)\n ind_exp = numpy.where(mask_exp)[0]\n if idx_start and idx_end:\n mask_exp[idx_start:idx_end] = True\n elif idx_start:\n mask_exp[idx_start : ind_exp[-1]] = True\n elif idx_end:\n mask_exp[ind_exp[0] : idx_end] = True\n\n # Filter passed data to experimental period\n depths = depths[mask_exp]\n Az_g_hf = Az_g_hf[mask_exp]\n\n # Create subglide indice groups for plotting\n sgl_ind = numpy.where(mask_tag_filt & mask_exp)[0]\n notsgl_ind = numpy.where((~mask_tag_filt) & mask_exp)[0]\n # Create experiment indices from `mask_exp`\n ind_exp = numpy.where(mask_exp)[0]\n offset = 0\n plt_offset = ind_exp[0]\n\n # Clip values to within experimental period\n if clip_x:\n offset = ind_exp[0]\n ind_exp = ind_exp - offset\n sgl_ind = sgl_ind - offset\n notsgl_ind = notsgl_ind - offset\n plt_offset = 0\n\n fig, (ax1, ax2) = plt.subplots(2, 1)\n\n # Plot glides\n c0, c1 = _colors[0:2]\n ax1 = plotutils.plot_noncontiguous(\n ax1, depths, sgl_ind, c0, \"Glides\", offset=plt_offset, linewidth=linewidth * 2\n )\n ax1 = plotutils.plot_noncontiguous(\n ax1,\n depths,\n notsgl_ind,\n c1,\n \"Stroking\",\n offset=plt_offset,\n linewidth=linewidth,\n linestyle=\"--\",\n )\n\n # Plot HF Z-axis\n c0 = _colors[2]\n ax2.plot(ind_exp, Az_g_hf, color=c0, label=\"Z-axis HF Acc.\", linewidth=linewidth)\n\n # Get dives within mask\n gg = sgls[mask_sgls_filt]\n\n # Get midpoint of dive occurance\n x = gg[\"start_idx\"] + (gg[\"stop_idx\"] - gg[\"start_idx\"]) / 2\n x = x.values.astype(float)\n x_mask = (x - offset > ind_exp[0]) & (x - offset < ind_exp[-1])\n x = x[x_mask]\n\n # Get depth at midpoint\n if clip_x:\n x = x - offset\n ind_x = numpy.round(x).astype(int)\n else:\n ind_x = numpy.round(x - plt_offset).astype(int)\n y = depths[ind_x]\n\n # For each dive_id, sgl_id pair, create annotation string, apply\n dids = gg[\"dive_id\"].values.astype(int)\n sids = numpy.array(gg.index)\n dids = dids[x_mask]\n sids = sids[x_mask]\n n = [\"Dive:{}, SGL:{}\".format(did, sid) for did, sid in zip(dids, sids)]\n\n diff = ind_exp[1] - ind_exp[0]\n for i, txt in enumerate(n):\n # TODO semi-hardcoded dist for annotation\n ax1.annotate(txt, (x[i] + int(diff * 16), y[i]))\n\n # Plot shaded areas where not sub-glides\n ax1 = plotutils.plot_shade_mask(\n ax1, ind_exp, ~mask_tag_filt[mask_exp], facecolor=\"#d9d9d9\"\n )\n ax2 = plotutils.plot_shade_mask(\n ax2, ind_exp, ~mask_tag_filt[mask_exp], facecolor=\"#d9d9d9\"\n )\n\n # Set x-axes limits\n for ax in [ax1, ax2]:\n ticks = ax.get_yticks()\n ax.set_ylim((ticks[0], ticks[-1]))\n if idx_start:\n xmin = idx_start\n else:\n xmin = ax.get_xlim()[0]\n if idx_end:\n xmax = idx_end\n else:\n xmax = ax.get_xlim()[1]\n if clip_x:\n xmin, xmax = xmin - offset, xmax - offset\n ax.set_xlim(xmin, xmax)\n for tick in ax.get_xticklabels():\n tick.set_rotation(45)\n tick.set_ha(\"right\")\n\n # Update Depth subplot y-axis labels, limits, invert depth\n ax1.set_ylabel(\"Depth ($m$)\")\n ymin = depths.min() - (depths.max() * 0.01)\n ymax = depths.max() + (depths.max() * 0.01)\n ax1.set_ylim((ymin, ymax))\n ax1.invert_yaxis()\n ax1.get_yaxis().set_label_coords(-0.09, 0.5)\n\n # Update PRH subplot y labels, limits\n ax2.set_ylabel(\"Z-axis acceleration ($g$)\")\n ax2.set_ylim((Az_g_hf.min(), Az_g_hf.max()))\n ax2.get_yaxis().set_label_coords(-0.09, 0.5)\n\n # Scientific notation for ax1 `n_samples`\n ax1.set_xlabel(\"No. sensor samples\")\n mf1 = ScalarFormatter(useMathText=True)\n mf1.set_powerlimits((-2, 2))\n ax1.xaxis.set_major_formatter(mf1)\n\n # Convert n_samples to hourmin labels\n ax2.set_xlabel(r\"Experiment duration ($min \\, sec$)\")\n mf2 = FuncFormatter(plotutils.nsamples_to_minsec)\n ax2.xaxis.set_major_formatter(mf2)\n\n # Create legends outside plot area\n ax1.legend(bbox_to_anchor=leg_bbox)\n plt.tight_layout(rect=[0, 0, 0.8, 1])\n\n # Save plot if `path_plot` passed\n if path_plot:\n import os\n\n fname = \"subglide_highlight\"\n if idx_start:\n fname += \"_start{}\".format(idx_start)\n if idx_end:\n fname += \"_stop{}\".format(idx_end)\n ext = \".eps\"\n file_fig = os.path.join(path_plot, fname + ext)\n plt.savefig(file_fig, box=\"tight\")\n\n plt.show()\n\n return None", "def output_shapes(self):\n return nest.map_structure(\n lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access\n self._element_spec)", "def output_shapes(self):\n return nest.map_structure(\n lambda component_spec: component_spec._to_legacy_output_shapes(), # pylint: disable=protected-access\n self._element_spec)", "def _save_all_basesimulator_plots(simulator, dir_name):\n plt.ioff()\n # Overall latency and throughput\n _fig = simulator.summary(format='plot')\n plt.close(_fig)\n save_plot(fig=_fig, fname='latency_throughput', dir_name=dir_name)\n # Transfer and queueing delay\n _fig = simulator.summary(format='plot', summary_type='l')\n plt.close(_fig)\n save_plot(fig=_fig, fname='transfer_delay', dir_name=dir_name)\n _fig = simulator.summary(format='plot', summary_type='l', latency_type='qd')\n plt.close(_fig)\n save_plot(fig=_fig, fname='queueing_delay', dir_name=dir_name)\n # Packet count\n _fig = simulator.summary(format='plot', summary_type='c')\n plt.close(_fig)\n save_plot(fig=_fig, fname='packet_count', dir_name=dir_name)\n # Batch throughput\n _fig = plot_batch_throughput(simulator=simulator)\n plt.close(_fig)\n save_plot(fig=_fig, fname='batch_throughput', dir_name=dir_name)\n del _fig", "def get_child_descriptors(descriptor, depth, descriptor_filter):\r\n if descriptor_filter(descriptor):\r\n descriptors = [descriptor]\r\n else:\r\n descriptors = []\r\n\r\n if depth is None or depth > 0:\r\n new_depth = depth - 1 if depth is not None else depth\r\n\r\n for child in descriptor.get_children() + descriptor.get_required_module_descriptors():\r\n descriptors.extend(get_child_descriptors(child, new_depth, descriptor_filter))\r\n\r\n return descriptors", "def plot(self, scene=None, **kwargs):\r\n result = []\r\n for tile in self.tiles():\r\n result.append(tile.raw_plot(scene, self.z.min(), self.z.max(),\r\n scalarbar=(result==[]), **kwargs))\r\n if scene is None: scene = result[0].scene\r\n return result", "def show(self, outdir=None, ext=\".pdf\", **kwargs):\n axes = []\n for name, step in self.visual_steps.items():\n if outdir is not None:\n outpath = path.join(outdir, slugify(name) + ext)\n else:\n outpath = None\n\n ax = step.show(outpath=outpath, **kwargs)\n axes.append(ax)\n\n # Return axes array to ensure figures are shown in notebook\n return axes", "def load_plots(seriesname):\n LOG.debug(\"Calling load_plots() with the following arguments:\")\n LOG.debug(\"seriesname = %s\"%seriesname)\n\n plots = []\n return plots", "def import_suitable_visualizations(suitable_visualization_list):\n\n mods = []\n for item in suitable_visualization_list:\n mod = '.%s' % item\n mods.append(mod)\n\n modules = []\n\n for item in mods:\n modules.append(importlib.import_module(item,\n package=default.module_path))\n\n return modules", "def get_spec_calls(self):\n return get_call_nodes(self.ast)", "def tests(self):\n return [t for t in self._collection if t.parent is self]", "def config_dropdowns(dropdown_specs):\n return [dd.dropdown for dd in dropdown_specs]", "def targets(cls, spec):\r\n return set(target for target, _ in SpecParser(cls.build_root).parse(spec) if target)", "def plotVisualCoverage(self):\r\n\r\n if hasattr(self, 'finalPatchesMarked'):\r\n finalPatches = self.finalPatchesMarked\r\n elif hasattr(self, 'finalPatches'):\r\n finalPatches = self.finalPatches\r\n else:\r\n self.processTrial()\r\n finalPatches = self.finalPatches\r\n\r\n visualFieldOrigin = self.getVisualFieldOrigin()\r\n figList, axList = pt.grid_axis(3, 4, len(finalPatches.keys()), figsize=(12, 10))\r\n\r\n i = 0\r\n\r\n pixelSize = self.params['visualSpacePixelSize']\r\n closeIter = self.params['visualSpaceCloseIter']\r\n\r\n for key, patch in finalPatches.items():\r\n currAx = axList[i]\r\n visualSpace, _, _, _ = patch.getVisualSpace(\r\n self.altPosMapf,\r\n self.aziPosMapf,\r\n visualFieldOrigin=visualFieldOrigin,\r\n pixelSize=pixelSize,\r\n closeIter=closeIter,\r\n isplot=False)\r\n\r\n plotVisualCoverage(visualSpace,\r\n pixelSize=pixelSize,\r\n plotAxis=currAx)\r\n\r\n currAx.set_title(key)\r\n\r\n i = i + 1\r\n\r\n return figList, axList", "def has_datapoint_with_all_dims(fake_ingest, dims):\n for datapoint in fake_ingest.datapoints:\n if has_all_dims(datapoint, dims):\n return True\n return False", "def create_plots(self):\n shutil.rmtree(self.param.path, ignore_errors=True)\n os.makedirs(self.param.path)\n\n ids = list(range(len(self.signs)))\n\n \"\"\"True positives\"\"\"\n values, kinds = self.get_evaluations(ids)\n plots.create_plot(\n kinds,\n [e[0] for e in values], # True positives\n save_dir=self.param.path,\n y_label=\"number_tp\",\n file_name=\"number_tp\",\n title=\"Amount of true positives\",\n )\n\n # Only signs with at least one detection!\n ids = [i for i, _ in enumerate(self.signs) if self.signs[i].evaluate()[2] > 0]\n values, kinds = self.get_evaluations(ids)\n\n \"\"\"Distance\"\"\"\n plots.create_plot(\n kinds,\n values=[e[2] for e in values], # Distances\n save_dir=self.param.path,\n y_label=\"distance\",\n file_name=\"distance\",\n title=\"Distance\",\n )\n \"\"\"Precision\"\"\"\n plots.create_plot(\n kinds,\n # Precision signs with at least one detection are used, e[0]+e[1] > 0)\n values=[e[0] / (e[0] + e[1]) for e in values],\n save_dir=self.param.path,\n y_label=\"precision\",\n file_name=\"precision\",\n title=\"Precision\",\n )", "def _2d_plot_samples(self, **kwargs):\n\n from pesummary.core.plots.bounded_2d_kde import Bounded_2d_kde\n\n # get bounds\n lows = []\n highs = []\n methods = []\n for param in self.parameters[0:2]:\n if param in DEFAULT_BOUNDS:\n lows.append(\n DEFAULT_BOUNDS[param][\"low\"]\n if \"low\" in DEFAULT_BOUNDS[param]\n else None\n )\n highs.append(\n DEFAULT_BOUNDS[param][\"high\"]\n if \"high\" in DEFAULT_BOUNDS[param]\n else None\n )\n methods.append(\n DEFAULT_BOUNDS[param][\"method\"]\n if \"method\" in DEFAULT_BOUNDS[param]\n else \"Reflection\"\n )\n\n if self.plottype == \"triangle\":\n from pesummary.core.plots.publication import triangle_plot as plotfunc\n elif self.plottype == \"reverse_triangle\":\n from pesummary.core.plots.publication import (\n reverse_triangle_plot as plotfunc,\n )\n else:\n # contour plot\n from pesummary.core.plots.publication import (\n comparison_twod_contour_plot as plotfunc,\n )\n\n # set KDE information\n kwargs.update(\n {\n \"kde\": Bounded_2d_kde,\n \"kde_kwargs\": {\n \"xlow\": lows[0],\n \"xhigh\": highs[0],\n \"ylow\": lows[1],\n \"yhigh\": highs[1],\n },\n }\n )\n\n # default to not showing data points\n if \"plot_datapoints\" not in kwargs:\n kwargs[\"plot_datapoints\"] = False\n\n if \"triangle\" in self.plottype:\n from pesummary.core.plots.bounded_1d_kde import bounded_1d_kde\n\n # set KDE informaiton\n kwargs.update(\n {\n \"kde_2d\": Bounded_2d_kde,\n \"kde_2d_kwargs\": {\n \"xlow\": lows[0],\n \"xhigh\": highs[0],\n \"ylow\": lows[1],\n \"yhigh\": highs[1],\n },\n \"kde\": bounded_1d_kde,\n }\n )\n\n kwargs[\"kde_kwargs\"] = {\n \"x_axis\": {\"xlow\": lows[0], \"xhigh\": highs[0], \"method\": methods[0]},\n \"y_axis\": {\"xlow\": lows[1], \"xhigh\": highs[1], \"method\": methods[1]},\n }\n\n args = [\n [samps[self.parameters[0]].values for samps in self._samples.values()],\n [samps[self.parameters[1]].values for samps in self._samples.values()],\n ]\n\n if \"xlabel\" not in kwargs:\n kwargs[\"xlabel\"] = self.latex_labels[self.parameters[0]]\n if \"ylabel\" not in kwargs:\n kwargs[\"ylabel\"] = self.latex_labels[self.parameters[1]]\n\n if \"labels\" not in kwargs and len(self.results) > 1:\n kwargs[\"labels\"] = list(self._samples.keys())\n\n # set injection parameter values\n if self.injection_parameters is not None:\n if (\n self.injection_parameters[self.parameters[0]] is not None\n and self.injection_parameters[self.parameters[1]] is not None\n ):\n kwargname = \"truths\" if self.plottype == \"corner\" else \"truth\"\n kwargs[kwargname] = [\n self.injection_parameters[self.parameters[0]]\n - self.parameter_offsets[self.parameters[0]],\n self.injection_parameters[self.parameters[1]]\n - self.parameter_offsets[self.parameters[1]],\n ]\n\n # create plot\n with DisableLogger():\n fig = plotfunc(*args, **kwargs)\n\n return fig", "def pypeit_multi_plot(filenames, show_flux_err=True, show_tellurics=False,\n mask_values=False, smooth=None, ymax=None):\n\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(15, 7), dpi=140)\n fig.subplots_adjust(left=0.09, right=0.97, top=0.89, bottom=0.16)\n\n # Plot 0-line\n ax.axhline(y=0.0, linewidth=1.5, color='k', linestyle='--')\n\n max_limits = []\n\n for idx, filename in enumerate(filenames):\n\n color = color_list[idx]\n\n spec = SpecOneD()\n spec.read_pypeit_fits(filename)\n\n if mask_values:\n mask = spec.mask\n else:\n mask = np.ones(spec.dispersion.shape, dtype=bool)\n\n if smooth is not None and type(smooth) is int:\n spec.smooth(smooth, inplace=True)\n\n label = filename\n\n\n # Add second axis to plot telluric model\n if show_tellurics is True:\n telluric = spec.telluric[mask] / spec.telluric.max() * np.median(\n spec.flux[mask]) * 2.5\n ax.plot(spec.dispersion[mask], telluric[mask],\n label='Telluric', color=color, alpha=0.5, ls='..')\n\n if show_flux_err:\n ax.plot(spec.dispersion[mask], spec.flux_err[mask], 'grey',\n lw=1, label='Flux Error', color=color, alpha=0.5)\n\n\n ax.plot(spec.dispersion[mask], spec.flux[mask], 'k',\n linewidth=1, label=label, color=color)\n\n # # Add OBJ model if it exists\n # if hasattr(spec, 'obj_model'):\n # ax.plot(spec.dispersion[mask], spec.obj_model, label='Obj '\n # 'model')\n\n lim_spec = spec.copy()\n lim_spec.restore()\n lim_spec = lim_spec.mask_sn(5)\n lim_spec = lim_spec.sigmaclip_flux(3, 3)\n\n max_limits.append(lim_spec.flux[lim_spec.mask].max())\n\n if spec.unit == 'f_lam':\n ax.set_xlabel(r'$\\rm{Wavelength}\\ [\\rm{\\AA}]$', fontsize=15)\n ax.set_ylabel(\n r'$\\rm{Flux}\\ f_{\\lambda}\\ [\\rm{erg}\\,\\rm{s}^{-1}\\,\\rm{cm}^{-2}\\,'\n r'\\rm{\\AA}^{-1}]$',\n fontsize=15)\n\n elif spec.unit == 'f_nu':\n ax.set_xlabel(r'$\\rm{Frequency}\\ [\\rm{Hz}]$', fontsize=15)\n ax.set_ylabel(\n r'$\\rm{Flux}\\ f_{\\nu}\\ [\\rm{erg}\\,\\rm{s}^{-1}\\,\\rm{cm}^{-2}\\,'\n r'\\rm{Hz}^{-1}]$',\n fontsize=15)\n\n elif spec.unit == 'f_loglam':\n ax.set_xlabel(r'$\\log\\rm{Wavelength}\\ [\\log\\rm{\\AA}]$',\n fontsize=15)\n ax.set_ylabel(\n r'$\\rm{Flux}\\ f_{\\lambda}\\ [\\rm{erg}\\,\\rm{s}^{-1}\\,\\rm{cm}^{-2}\\,'\n r'(\\log\\rm{\\AA})^{-1}]$',\n fontsize=15)\n\n else:\n raise ValueError(\"Unrecognized units\")\n\n\n ylim_min = 0\n if ymax == None:\n ylim_max = max(max_limits)\n else:\n ylim_max = ymax\n ax.set_ylim(ylim_min, ylim_max)\n ax.legend()\n plt.show()", "def make_all_room_inspector_graphs(df):\n\n # tags is list of column names, excluding datetime\n tags = list(df.set_index('datetime').columns)\n\n # Make a plot for each tag\n plots = [make_room_inspector_graph(df, tag) for tag in tags]\n\n # Put plots in a \"column\" layout\n return components(column(plots))", "def _update_plots(self):\n for dock in self.plotDocks:\n for widget in dock.widgets:\n if not self.dataList.findItems(dock.name(), QtCore.Qt.MatchExactly):\n # no data for this plot -> reset it\n widget.getPlotItem().clear()\n # TODO remove tab from dock and del instance\n else:\n widget.getPlotItem().clear()\n x_data = self.currentDataset[\"results\"][\"time\"]\n y_data = self._get_data_by_name(dock.name())\n widget.getPlotItem().plot(x=x_data, y=y_data)", "def get_safety_vars_plot(self):\n if 'safety_vars_stats' not in self.stats:\n raise ValueError('No safety vars statistics present in this evaluator.')\n\n safety_vars = self.stats['safety_vars_stats'][0].keys()\n n_plots = len(safety_vars)\n fig, axes = plt.subplots(n_plots, 1, figsize=(8, 6 * n_plots))\n\n for idx, var in enumerate(safety_vars):\n series = collections.defaultdict(list)\n for ep in self.stats['safety_vars_stats']:\n for stat in ep[var]:\n series[stat].append(ep[var][stat])\n ax = axes[idx]\n for stat in ['min', 'max']:\n ax.plot(np.squeeze(np.array(series[stat])), label=stat)\n x = range(len(series['mean']))\n\n mean = np.squeeze(np.array(series['mean']))\n std_dev = np.squeeze(np.array(series['std_dev']))\n ax.plot(x, mean, label='Value')\n ax.fill_between(\n range(len(series['mean'])), mean - std_dev, mean + std_dev, alpha=0.3)\n ax.set_title('Stats for {}'.format(var))\n ax.legend()\n ax.spines['top'].set_visible(False)\n\n ax.xaxis.set_ticks_position('bottom')\n ax.set_xlabel('Episode #')\n ax.set_ylabel('Magnitude')\n ax.plot()\n return fig", "def getPatches(slide, mask, numPatches=0, dims=(0,0), dirPath='', slideNum='', plot=False, plotMask=False):\n # extractPatchByXMLLabeling \n w,h = dims \n levelDims = slide.level_dimensions\n Xratio, Yratio = calculateRatio(levelDims)\n\n i = 0\n while i < numPatches:\n firstLoop = True # Boolean to ensure while loop runs at least once. \n\n while firstLoop: # or not mask[rr,cc].all(): # True if it is the first loop or if all pixels are in the mask \n firstLoop = False\n x, y = chooseRandPixel(mask) # Get random top left pixel of patch. \n xVertices = np.array([x, x+(w/Xratio), x+(w/Xratio), x, x])\n yVertices = np.array([y, y, y-(h/Yratio), y-(h/Yratio), y])\n rr, cc = polygon(xVertices, yVertices)\n\n image = slide.read_region((x*Xratio, y*Yratio), 0, (w,h))\n \n isWhite = checkWhiteSlide(image)\n newPath = 'other' if isWhite else dirPath\n if not isWhite: i += 1\n\n slideName = '_'.join([slideNum, 'x'.join([str(x*Xratio),str(y*Yratio)])])\n image.save(os.path.join(newPath, slideName+\".png\"))\n\n if plot: \n plotImage(image)\n if plotMask: mask[rr,cc] = 0\n\n if plotMask:\n plotImage(mask)", "def getWithImages(self):\n return [x for x in self.xeps if x.images]", "def save_all_plots(simulator, dir_name, grid=None, titles=None):\n # Soft dependencies\n from NetworkSim.simulation.simulator.base import BaseSimulator\n from NetworkSim.simulation.simulator.parallel import ParallelSimulator\n if isinstance(simulator, BaseSimulator):\n return _save_all_basesimulator_plots(simulator=simulator, dir_name=dir_name)\n elif isinstance(simulator, ParallelSimulator):\n return _save_all_parallelsimulator_plots(simulator=simulator, dir_name=dir_name, grid=grid, titles=titles)\n else:\n raise ValueError(\"A BaseSimulator or ParallelSimulator object is expected.\")", "def load_specifications(specification_dir):\n assert E(specification_dir), \"Specification directory {} does not exist\".format(specification_dir)\n\n specification_jsons = glob.glob(J(specification_dir, '*.json'))\n\n logger.info(\"Loading experiment specificaitons...\")\n if not specification_jsons:\n logger.warning(\"Could not find any experiment specifications in {}\".format(specification_dir))\n\n specs = []\n for spec_path in specification_jsons:\n with open(spec_path, 'r') as f:\n specs.append(json.load(f))\n logger.info(\"Found {} experiment specifications\".format(len(specs)))\n\n return specs", "def figures_layout(figures_dict: Dict[str, go.Figure]):\n return [\n html.Div(className='cardlive-figures', children=[\n single_figure_layout(title='Map',\n description=['Geographic distribution of the submitted genomic samples.'],\n id='figure-geographic-map-id',\n fig=figures_dict['map']\n ),\n single_figure_layout(title='Samples timeline',\n description=['Submission dates for genomic samples.'],\n id='figure-timeline-id',\n fig=figures_dict['timeline'],\n dropdowns=figure_menus_layout(\n id_type='timeline-type-select',\n options_type=[\n {'label': 'Cumulative counts', 'value': 'cumulative_counts'},\n {'label': 'Cumulative percent', 'value': 'cumulative_percent'},\n {'label': 'Counts', 'value': 'counts'},\n {'label': 'Percent', 'value': 'percent'},\n ],\n value_type='cumulative_counts',\n id_color='timeline-color-select',\n options_color=[\n {'label': 'Default', 'value': 'default'},\n {'label': 'Geographic region', 'value': 'geographic'},\n {'label': 'Organism', 'value': 'organism'},\n ],\n value_color='default'\n ),\n ),\n single_figure_layout(title='Samples total',\n description=['Count of samples matching selection.'],\n id='figure-totals-id',\n fig=figures_dict['totals'],\n dropdowns=figure_menus_layout(\n id_type='totals-type-select',\n options_type=[\n {'label': 'Geographic region', 'value': 'geographic'},\n {'label': 'Organism', 'value': 'organism'},\n ],\n value_type='geographic',\n id_color='totals-color-select',\n options_color=[\n {'label': 'Default', 'value': 'default'},\n {'label': 'Geographic region', 'value': 'geographic'},\n {'label': 'Organism', 'value': 'organism'},\n ],\n value_color='default'\n ),\n ),\n single_figure_layout(title='RGI results',\n description=['Percent of selected samples (',\n html.Span(id='sample-count-figure', children=[LOADING]),\n ') with the chosen type of RGI results.'\n ],\n id='figure-rgi-id',\n fig=figures_dict['rgi'],\n dropdowns=figure_menus_layout(\n id_type='rgi-type-select',\n options_type=[\n {'label': 'Drug class', 'value': 'drug_class'},\n {'label': 'AMR gene', 'value': 'amr_gene'},\n {'label': 'AMR gene family', 'value': 'amr_gene_family'},\n {'label': 'Resistance mechanism', 'value': 'resistance_mechanism'},\n ],\n value_type='drug_class',\n id_color='rgi-color-select',\n options_color=[\n {'label': 'Default', 'value': 'default'},\n {'label': 'Geographic region', 'value': 'geographic'},\n {'label': 'Organism', 'value': 'organism'},\n ],\n value_color='default'\n ),\n ),\n single_figure_layout(title='RGI intersections',\n description=['Patterns of co-occurrence of the selected RGI result type across genome subset'],\n id='figure-rgi-intersections',\n fig=figures_dict['rgi'],\n dropdowns=figure_menus_layout(\n id_type='rgi-intersection-type-select',\n options_type=[\n {'label': 'Drug class', 'value': 'drug_class'},\n {'label': 'AMR gene', 'value': 'amr_gene'},\n {'label': 'AMR gene family', 'value': 'amr_gene_family'},\n {'label': 'Resistance mechanism', 'value': 'resistance_mechanism'},\n ],\n value_type='drug_class',\n )\n ),\n ])\n ]", "def has_all_dims(dp_or_event, dims):\n return dims.items() <= {d.key: d.value for d in dp_or_event.dimensions}.items()", "def matches_shapes(self, shape_):\n m = []\n for i, spec in enumerate(self.shapes):\n if matches(spec, shape_):\n m.append(i)\n return m", "def zoom_spec_figure(results, zoom_region_list,\n alpha=0.3, samples=[-1],\n inlog=True,\n start=0, thin=1, layout=None,\n subplot_spec=None, xlim=None, **kwargs):\n fig = pl.figure(figsize=(10, 5))\n\n # posterior draws\n flatchain = results['chain'][:,start::thin,:]\n flatchain = flatchain.reshape(flatchain.shape[0] * flatchain.shape[1],\n flatchain.shape[2])\n thetas = [flatchain[s,:] for s in samples]\n mwave, mospec, mounc, specvecs = comp_samples(thetas, results['model'], inlog=inlog)\n \n #get the panel geometry if not given\n nobj = len(zoom_region_list)\n if layout is None:\n nx = int(np.floor(np.sqrt(nobj)))\n ny = int(np.ceil(nobj*1.0/nx))\n layout = [nx,ny]\n \n #loop over panels, one for each spectral region\n gs = gridspec.GridSpec(layout[0], layout[1])\n for i, reg in enumerate(zoom_region_list):\n x, y = i % nx, np.floor(i*1.0 / nx)\n fig, subgs = one_specregion_figure(mwave, mospec, specvecs,\n reg, fig, subplot_spec = gs[i], **kwargs)\n\n return fig", "def get_plot_options(self):\n plot_options = []\n # Get drawdown plot options\n op = self.drawdown.get_plot_options()\n if op['visible']:\n plot_options.append(op)\n # Get associated data options\n for i in range(self.data_count()):\n op = self.data[i].get_plot_options()\n if op['visible']:\n plot_options.append(op)\n return(plot_options)", "def get_multiobjective_plot(evaluator_list, stride=500):\n num_objectives = (\n evaluator_list[0].stats['multiobj_stats']['episode_totals'].shape[1])\n values = [collections.defaultdict(list) for _ in range(num_objectives)]\n for e in evaluator_list:\n for i in range(num_objectives):\n values[i][e.task_name].append(\n e.stats['multiobj_stats']['episode_totals'][:, i])\n means = [None] * num_objectives\n stds = [None] * num_objectives\n for i in range(num_objectives):\n values[i] = _map(np.vstack, values[i])\n means[i] = _map(functools.partial(np.mean, axis=0), values[i])\n stds[i] = _map(functools.partial(np.std, axis=0), values[i])\n\n fig, axes = plt.subplots(num_objectives, 1, figsize=(8, 6 * num_objectives))\n for objective_idx in range(num_objectives):\n ax = axes[objective_idx]\n for i, task_name in enumerate(means[objective_idx]):\n m = means[objective_idx][task_name]\n s = stds[objective_idx][task_name]\n idx = i % len(_COLORS)\n x = np.arange(len(m))\n ax.plot(x, m, lw=2, color=_COLORS[idx], alpha=.6, label=None)\n ax.plot(x[::stride], m[::stride], 'o', lw=2, marker=_MARKERS[idx],\n markersize=10, color=_COLORS[idx], label=task_name)\n ax.fill_between(x, m - s, m + s, alpha=.4, lw=2, color=_COLORS[idx])\n ax.legend()\n ax.set_ylabel('Objective {}'.format(objective_idx))\n ax.set_xlabel('Episode')\n return fig", "def plot_glide_depths(depths, mask_tag_filt):\n import numpy\n\n from . import plotutils\n\n fig, ax = plt.subplots()\n\n ax = plotutils.plot_noncontiguous(ax, depths, numpy.where(mask_tag_filt)[0])\n ax.invert_yaxis()\n\n plt.show()\n\n return None", "def get_plotting_data(each_misfit_windows_collection, iterations_list, snr_threshold, event_depth_dict):\n result = {}\n phases_zr = [\"P\", \"pP\", \"sP\", \"PP\", \"S\", \"sS\", \"SS\"]\n phases_t = [\"ScS\", \"S\", \"sS\", \"SS\"]\n conditions = {\n \"P\": {\n \"exclude_p\": False,\n \"exclude_s\": True\n },\n \"pP\": {\n \"exclude_p\": True,\n \"exclude_s\": True\n },\n \"sP\": {\n \"exclude_p\": True,\n \"exclude_s\": True\n },\n \"PP\": {\n \"exclude_p\": True,\n \"exclude_s\": True\n },\n \"S\": {\n \"exclude_p\": False,\n \"exclude_s\": False\n },\n \"sS\": {\n \"exclude_p\": True,\n \"exclude_s\": True\n },\n \"SS\": {\n \"exclude_p\": True,\n \"exclude_s\": True\n },\n \"ScS\": {\n \"exclude_p\": True,\n \"exclude_s\": True\n },\n \"surface_z\": {\n \"exclude_p\": False,\n \"exclude_s\": False\n },\n \"surface_r\": {\n \"exclude_p\": False,\n \"exclude_s\": False\n },\n \"surface_t\": {\n \"exclude_p\": False,\n \"exclude_s\": False\n },\n }\n # we can exrtact the information from the misfit_windows in the order of the pdf output.\n # order will be z,r,t[,surface_z,surface_r,surface_t]\n rep_net_sta = sorted(event_depth_dict.keys())[0]\n event_depth_this_event = event_depth_dict[rep_net_sta]\n if (event_depth_this_event > SURFACE_THRESHOLD):\n category_list = [\"z\", \"r\", \"t\"]\n category_phases = [phases_zr, phases_zr, phases_t]\n else:\n category_list = [\"z\", \"r\", \"t\", \"surface_z\", \"surface_r\", \"surface_t\"]\n category_phases = [phases_zr, phases_zr, phases_t,\n [\"surface_z\"], [\"surface_r\"], [\"surface_t\"]]\n for each_iteration in iterations_list:\n result[each_iteration] = {}\n for each_category, each_category_phases in zip(category_list, category_phases):\n result[each_iteration][each_category] = []\n for each_category_phase in each_category_phases:\n phase_condition = conditions[each_category_phase]\n cc = get_windows_cc(\n each_misfit_windows_collection[each_iteration], phase_condition[\n \"exclude_p\"], phase_condition[\"exclude_s\"],\n each_category, snr_threshold, each_category_phase)\n cc = cc[cc >= 0]\n deltat = get_windows_deltat(\n each_misfit_windows_collection[each_iteration], phase_condition[\n \"exclude_p\"], phase_condition[\"exclude_s\"],\n each_category, snr_threshold, each_category_phase)\n deltat = deltat[np.abs(deltat) <= 10]\n similarity = get_windows_similarity(\n each_misfit_windows_collection[each_iteration], phase_condition[\n \"exclude_p\"], phase_condition[\"exclude_s\"],\n each_category, snr_threshold, each_category_phase)\n similarity = similarity[similarity >= 0]\n result[each_iteration][each_category].append(\n {\"net_sta\": get_windows_net_sta(\n each_misfit_windows_collection[each_iteration], phase_condition[\n \"exclude_p\"], phase_condition[\"exclude_s\"],\n each_category, snr_threshold, each_category_phase),\n \"cc\": cc,\n \"deltat\": deltat,\n \"similarity\": similarity,\n }\n )\n # result:dict->each_iteration:dict->each_category:list as the dict showed before, we should return the category_phases\n # we should combine the surface wave phases to one page\n if (len(category_phases) == 6):\n for each_iteration in iterations_list:\n category_phases = [phases_zr, phases_zr, phases_t,\n [\"surface_z\", \"surface_r\", \"surface_t\"]]\n category_list = [\"z\", \"r\", \"t\", \"surface\"]\n result[each_iteration][\"surface\"] = []\n result[each_iteration][\"surface\"].append(\n result[each_iteration][\"surface_z\"][0])\n result[each_iteration][\"surface\"].append(\n result[each_iteration][\"surface_r\"][0])\n result[each_iteration][\"surface\"].append(\n result[each_iteration][\"surface_t\"][0])\n del result[each_iteration][\"surface_z\"]\n del result[each_iteration][\"surface_r\"]\n del result[each_iteration][\"surface_t\"]\n\n return result, category_phases, category_list", "def test_full_internal(self):\n group = self.get_setup().decompress(internal=True)\n assert isinstance(group, PlotSetupGroup)\n res = {\n (\n s.panels.collect_equal(\"dimensions\").variable,\n s.panels.collect_equal(\"dimensions\").species_id,\n s.panels.collect_equal(\"dimensions\").time,\n )\n for s in group\n }\n sol = {\n ((\"dry_deposition\", \"wet_deposition\"), 1, 1),\n ((\"dry_deposition\", \"wet_deposition\"), 1, 2),\n ((\"dry_deposition\", \"wet_deposition\"), 1, 3),\n ((\"dry_deposition\", \"wet_deposition\"), 2, 1),\n ((\"dry_deposition\", \"wet_deposition\"), 2, 2),\n ((\"dry_deposition\", \"wet_deposition\"), 2, 3),\n }\n assert res == sol", "def do_compare_plots(cat7, cat7s, subdir,label):\n #Check the effect of the 5km/s split\n #dNdX\n cat7.plot_line_density(zmax=5)\n cat7s.plot_line_density(zmax=5, label=label)\n plt.legend(loc=0)\n save_figure(path.join(subdir,\"dndx_\"+label))\n plt.clf()\n\n #Omega_DLA\n cat7.plot_cddf(zmax=4,color=\"blue\")\n cat7s.plot_cddf(zmax=4,color=\"red\",label=label)\n plt.xlim(1e20, 1e23)\n plt.ylim(1e-28, 5e-21)\n plt.legend(loc=0)\n save_figure(path.join(subdir, \"cddf_\"+label))\n plt.clf()\n\n #Omega_DLA\n cat7.plot_omega_dla(zmax=5)\n cat7s.plot_omega_dla(zmax=5, label=label)\n plt.legend(loc=0)\n save_figure(path.join(subdir,\"omega_\"+label))\n plt.clf()" ]
[ "0.52858", "0.52142483", "0.51448363", "0.50744987", "0.50589556", "0.48802635", "0.48739997", "0.48073155", "0.4798928", "0.4749356", "0.47428975", "0.47420156", "0.47197765", "0.46932372", "0.46918872", "0.4680866", "0.46731734", "0.46569225", "0.4638593", "0.46093962", "0.45868868", "0.45852435", "0.457482", "0.45559883", "0.4552447", "0.45511317", "0.4548975", "0.45413592", "0.45412245", "0.45367965", "0.4524808", "0.4513525", "0.45031762", "0.45015147", "0.44913638", "0.4475015", "0.44617745", "0.44524", "0.4447917", "0.4447674", "0.4447674", "0.4443774", "0.44218504", "0.44178933", "0.44177565", "0.44097245", "0.44082367", "0.4406856", "0.43911037", "0.4385509", "0.438324", "0.43825993", "0.43811795", "0.43707445", "0.43649116", "0.436127", "0.4360169", "0.4356233", "0.43371767", "0.43277508", "0.4325842", "0.43145126", "0.43136635", "0.4309695", "0.4309191", "0.43066922", "0.43041", "0.43041", "0.42942545", "0.42906272", "0.42906067", "0.42905083", "0.4287659", "0.42870206", "0.42810935", "0.42777076", "0.42762598", "0.4272396", "0.42657384", "0.42634264", "0.42621624", "0.42572725", "0.42533907", "0.42420283", "0.42399713", "0.42392367", "0.4238131", "0.42302972", "0.42298165", "0.42293742", "0.42287955", "0.42284563", "0.42227852", "0.42222354", "0.42219788", "0.42172545", "0.42106467", "0.4206024", "0.42007208", "0.41983593" ]
0.56958634
0
Returns the formatted dimension group strings for a particular frame.
def _frame_title(self, key, group_size=2, separator='\n'): if self.dynamic == 'open' and self.current_key: key = self.current_key if self.layout_dimensions is not None: dimensions, key = zip(*self.layout_dimensions.items()) elif not self.dynamic and (not self.uniform or len(self) == 1) or self.subplot: return '' else: key = key if isinstance(key, tuple) else (key,) dimensions = self.dimensions dimension_labels = [dim.pprint_value_string(k) for dim, k in zip(dimensions, key)] groups = [', '.join(dimension_labels[i*group_size:(i+1)*group_size]) for i in range(len(dimension_labels))] return util.safe_unicode(separator.join(g for g in groups if g))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_grp_string(self):\n\n grp = self.get_grp()\n\n if grp == -1:\n\n return \"\"\n\n return \"grp \" + str(grp)", "def format_to_string(groups: List[List]) -> str:\n\tgroups_str = \"\"\n\tcount = 1\n\tfor group in groups:\n\t\tgroups_str += f\"Group {count}: {group} \\n\"\n\t\tcount += 1\n\treturn groups_str", "def get_group_names(self):\n return [self.frame.columns[i] for i in self.group_cols]", "def _format_dimensions(dimensions):\n if not dimensions:\n return \"\"\n\n dim_pairs = [\"%s=%s\" % (k, v) for k, v in dimensions.items()]\n return \"[%s]\" % (\",\".join(dim_pairs))", "def groups(self):\n groups_text = '\\n'\n for group in self.exercise_numbers:\n txt = ' %s:\\t' % group[0]\n for exercise in group[1:]:\n if isinstance(exercise, int):\n txt += '%d. ' % exercise\n else:\n txt += '\\n\\t%s\\n\\t' % exercise\n groups_text += txt + '\\n'\n return groups_text", "def format(self):\n groups = [g + \".\" for g in self.groups]\n params = [\";\" + p.format() for p in self.params]\n groups_name_params = \"\".join(groups) + self.name + \"\".join(params)\n return groups_name_params + \":\" + self.format_value() + CRLF", "def __repr__(self) -> str:\n return f\"Group ({len(self.elements)} elements total)\"", "def format_group_name(gn):\r\n\r\n return {\r\n \"Studies in language and literature\": \"Group 1 - Studies in Language and Literature\",\r\n \"Language acquisition\": \"Group 2 - Language Acquisition\",\r\n \"Individuals and societies\": \"Group 3 - Individuals and Societies\",\r\n \"Experimental sciences\": \"Group 4 - Sciences\",\r\n \"Mathematics\": \"Group 5 - Mathematics\",\r\n \"The arts\": \"Group 6 - The Arts\"\r\n }[gn]", "def __repr__(self):\n return str(self.group)", "def group_tag(self):\n return ''.join(['[{}]'.format(x.name) for x in self.groups])", "def get_group_label(i):\n if i//4 == 0:\n return \"buildUpPlay\"\n elif i//4 == 1:\n return \"chanceCreation\"\n elif i//4 == 2:\n return \"defence\"", "def construct_groups_string(nodes):\n groups = get_groups(nodes)\n if len(groups) <= 1:\n return \"\"\n else:\n result = []\n for color in groups:\n # +1 because .tsp nodes are indexed with 1\n group = [node.nid + 1 for node in nodes if node.color == color]\n result.append(group)\n return str(result)", "def log_group_name(self) -> str:\n ...", "def __str__(self):\n\n nframes = len(self.frames)\n if nframes == 0:\n return \"\"\n elif nframes == 1:\n frame, = self.frames\n return str(frame)\n else:\n frames = sorted(self.frames)\n start = prev = frames[0] # First frame.\n step = None\n subranges = []\n for end in frames[1:]: # Frame starting from the second in the list.\n\n if step is None: # Step is still none.\n step = end - prev # Find and set step.\n\n if prev + step != end: # If the sequence is broken.\n subranges.append((start, prev, step)) # Create a subrange.\n step = None # Reset step.\n start = end # Re-start start.\n prev = end # The next previous.\n\n else:\n subranges.append((start, end, step))\n\n return \", \".join(format_subrange(start, end, step) for (start, end, step) in subranges)", "def toString(self):\r\n if self.mesgType == MULTIPLEXER_FRAME_NOT_INIT:\r\n raise AttributeError, \"Frame is not yet initialized!\"\r\n \r\n # Create header\r\n frameHeader = MULTIPLEXER_FRAME_DIVIDER + str(self.mesgType) + MULTIPLEXER_FRAME_DIVIDER + str(self.contentLength) + \\\r\n MULTIPLEXER_FRAME_DIVIDER + str(self.referenceID) + MULTIPLEXER_FRAME_DIVIDER\r\n \r\n # Determine variable header size\r\n headerSize = str(len(frameHeader)).rjust(MULTIPLEXER_FRAME_HEADER_DIGITS,\"0\")\r\n \r\n if len(headerSize) > MULTIPLEXER_FRAME_HEADER_DIGITS:\r\n raise AttributeError, \"Frame Header too large! Max:\"+ MULTIPLEXER_FRAME_HEADER_DIGITS+ \" Actual:\"+ len(headerSize)\r\n \r\n return headerSize + frameHeader + self.content", "def __repr__(self):\n output = ''\n for grp_id, col_list in self.data.items():\n output += grp_id + ':\\n'\n for col in col_list:\n output += ' ' * 4 + col + '\\n'\n return output", "def names(self):\n labels = [\n \"$X_{%i}$\" % i if d.name is None else d.name\n for i, d in enumerate(self.dimensions)\n ]\n return labels", "def _get_fmt_string(self):\n fmt = '>4s'\n for datatype in self.message_datatypes:\n if datatype in self.VALID_DATAYPES:\n if datatype == 'int':\n fmt += 'I'\n if datatype == 'float':\n fmt += 'f'\n if datatype == 'double':\n fmt += 'd'\n if datatype == 'char':\n fmt += 'c'\n if datatype == 'string':\n fmt += str(self.max_str_len)+'s'\n if datatype == 'bool':\n fmt += 'b'\n\n return fmt", "def dimensions_from_subgroups(s):\n letters = 'XYABCDEFGHIJKLMNOPQRSTUVWZ'\n return ' '.join(['dim{0}={1}'.format(dim, sg.name)\n for dim, sg in zip(letters, s)])", "def getGroupName(Id):\r\n return \"Group name\"", "def ranCharGroup(self):\n group = self.genCharGroup() + ' ' + self.genCharGroup() + ' ' + self.genCharGroup() + ' ' + self.genCharGroup()\n return group #Returns a string of 4 character groups", "def group(self):\n return self.tr(self.groupId())", "def group(self):\n return self.tr(self.groupId())", "def group(self):\n return self.tr(self.groupId())", "def group(self):\n return self.tr(self.groupId())", "def list(self, frame=0):\n text = []\n if not self.number_of_variations:\n return \"\"\n for group_number in range(1, len(self.varexercise_numbers)+1):\n text.extend(\n self.group_list(group_number))\n return text", "def group(self) -> str:\n return self._db_data.group", "def _cell_dimensions_string(self):\n return_str = ''\n return_str += '0.0 {:2.6f} xlo xhi\\n0.0 {:2.6f} ylo yhi\\n0.0 {:2.6f} zlo zhi\\n\\n'.format(*self.cell_lengths)\n return_str += '{:2.5f} {:2.5f} {:2.5f} xy xz yz \\n\\n'.format(*self.tilt_factors)\n \n return return_str", "def get_group_names(self):\r\n return self.groups.keys()", "def group_name(self) -> str:\n return pulumi.get(self, \"group_name\")", "def __str__(self) -> str:\n\t\treturn f\"dim {self.dimM},{self.dimN}\" +\"\\n\" \\\n\t\t\t+ \"\\n\".join(\"\".join(str(n) for n in m) for m in self.matrix)", "def log_group_name(self) -> str:\n return jsii.get(self, \"logGroupName\")", "def log_group_name(self) -> str:\n return jsii.get(self, \"logGroupName\")", "def log_group_name(self):\n return self._get_param(CW_LOGS_CFN_PARAM_NAME)", "def group(self):\n\t\treturn self.tr(\"DEM Processing\")", "def group_name(self):\n return \"device-%s\" % self.id", "def getName(self):\n return _libsbml.GroupsExtension_getName(self)", "def get_group(self):\n\t\treturn self.variables.get('group')", "def _groups_format(groups: torch.Tensor) -> torch.Tensor:\n return groups.reshape(groups.shape[0], -1)", "def group(self) -> str:\n return pulumi.get(self, \"group\")", "def get_group_name(self):\n return self.groupname", "def group_name(self):\n\t\treturn \"DocumentChat-%s\" % self.id", "def get_group_label(group):\n indices = [a.index for a in group.atoms]\n names = [a.name for a in group.atoms]\n label = []\n for i in range(len(indices)):\n label.append('%d/%s' % (indices[i], names[i]))\n return(' '.join(label))", "def grouping_crs(self):\n return self._get_srid_name(\n self._engine.execute(select([FOOTPRINT_SRID_EXPRESSION])).scalar()\n )", "def __repr__(self) -> str:\n\n return f\"{self.group}({self._value})\"", "def get_formatter(self, group):\n return getattr(self, \"format_\" + group + \"_standings\")", "def list_groups(self):\n\n for counter, label in enumerate(self.exp_labels_list):\n print('Key {}: {} \\n'.format(str(counter), label))", "def __repr__(self) -> str:\n\n return f\"ZqGroup({self.q})\"", "def get_debug_strings(self):\n debug_strings = []\n debug_strings.append(f\"program counter: {self.program_counter:#06x}\")\n debug_strings.append(f\"index register: {self.index_register:#06x}\")\n debug_strings.append(f\"word: {self._current_word:#06x}\")\n debug_strings.append(f\"op: {self._current_operation.__class__.__name__}\")\n debug_strings.append(f\"sound timer: {self.sound_timer:#06x}\")\n debug_strings.append(f\"delay timer: {self.delay_timer:#06x}\")\n\n for i in range(16):\n debug_strings.append(f\"register V{i}: {self.general_purpose_registers[i]:#06x}\")\n\n return debug_strings", "def groupdn(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"groupdn\")", "def group_name(self):\n\n return self._group_name", "def GroupKind_toString(*args):\n return _libsbml.GroupKind_toString(*args)", "def _group_headers(self) -> List[str]:\n return [_[0] for _ in self._grouping_labels]", "def __gen_fmt_str__(self, fmt):\n return '=' + (self.num_pts_recv * (fmt + ' '))", "def getFormatString(self):\n return ['a0r', 'a1r', 'a2r', 'a3r',\n 'a4r', 'a5r', 'a6r', 'a7r',\n 'a8r', 'a9r', 'a10r', 'a11r',\n 'a12r', 'a13r', 'a14r', 'a15r'\n #'w0r', 'w1r', 'w2r', 'w3r',\n # 'w4r', 'w5r', 'w6r', 'w7r',\n # 'w8r', 'w9r', 'w10r', 'w11r',\n # 'w12r', 'w13r', 'w14r', 'w15r'\n ]", "def get_dimensions(self, obj):\n return str(obj.dimensions)", "def group_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"group_name\")", "def __str__(self) -> str:\n out = \"STACK: \" + str(self.da.length()) + \" elements. [\"\n out += ', '.join([str(self.da.get_at_index(_))\n for _ in range(self.da.length())])\n return out + ']'", "def mysummary(self):\n return self.sprintf(\"IGMPv3 Group Record %IGMPv3gr.type% %IGMPv3gr.maddr%\")", "def test_get_grouping_expression(self):\n\n field_name = 'i_am_a_field_name'\n dimension = models.CategoricalDimension(\n key='contains_url',\n name='Contains a url',\n description='Contains a url',\n field_name=field_name,\n )\n\n self.assertEquals(dimension.get_grouping_expression(mock.Mock()), field_name)", "def GroupPayload(self):\n\t\tif self.Group:\n\t\t\treturn \"<group-id>%s</group-id>\" % (self.Group)\n\t\telse:\n\t\t\treturn None", "def name(self):\n base_str = 'd{}{}_'.format(self.derivative_count if\n self.derivative_count > 1 else '', self.expr)\n for var, count in self.variable_count:\n base_str += 'd{}{}'.format(var, count if count > 1 else '')\n return base_str", "def __str__(self):\n return '<TuebingenMEG: %i samples, %i timepoints, %i channels>' \\\n % (self.nsamples, self.ntimepoints, len(self.channelids))", "def _group_name(cls, group=None):\n suffix = f\"{cls.__module__}.{cls.__qualname__}\"\n if group is not None:\n suffix += \"-\" + group\n\n # Wrap the suffix into SHA256 to guarantee that the length of\n # the group name is limited. Otherwise Channels will complain\n # about that the group name is wrong (actually is too long).\n suffix_sha256 = hashlib.sha256()\n suffix_sha256.update(suffix.encode(\"utf-8\"))\n\n return f\"{GraphqlWsConsumer.group_name_prefix}-{suffix_sha256.hexdigest()}\"", "def getGraphPointNamesString(self):\n names = []\n for gp in self.getGraphPoints():\n if hasattr(aq_base(gp), 'isBroken') and gp.isBroken():\n names.append('%s(<span style=\"color: red\">missing</span>)' %\n gp.id)\n else:\n names.append(gp.id)\n return ', '.join(names)", "def __str__(self):\n return '%i traces in the SEG Y structure.' % len(self.traces)", "def get_frame_names(self):\n\t\t# return \",\".join(self.frame_names)\n\t\treturn str(self.drives)", "def __repr__(self) -> str:\r\n\r\n saida = \"Format: \"\r\n x = self.getformat()\r\n for _ in range(len(x)):\r\n saida = f\"{saida}{x[_]}\"\r\n if _ < len(x)-1:\r\n saida += \", \"\r\n saida += \"\\n\"\r\n return saida", "def __str__(self):\n return f'<KNXIPFrame {self.header}\\n body=\"{self.body}\" />'", "def __str__(self):\n return u\"group_list={self.group_list} bits={self.bits}\"", "def world_tag_string(self):\n return_string = \"\"\n for tag in self.world_tag_ids:\n return_string += f\"{tables.PlanetTagTable[tag][0]}, \"\n return return_string[:-2]", "def string_ds(ds, verbosity: int):\n\n # initialize formatting for each of the three major facets of a dataset\n table_strings = {\n \"sample_table\": \"\"\"\nSample Table:\n-------------\\n\"\"\",\n \"peptide_table\": \"\"\"\nPeptide Table:\n--------------\\n\"\"\",\n # 'enrichments' : \"\"\"\n # Enrichment Matrices:\n # --------------------\\n\"\"\"\n }\n\n for dimension in [\"sample\", \"peptide\"]:\n\n df = get_annotation_table(ds, dim=dimension)\n num_dimensions = len(df)\n\n buffer = io.StringIO()\n df.info(buf=buffer)\n complete = buffer.getvalue()\n table_strings[f\"{dimension}_table\"] += f\"\"\"{complete}\"\"\"\n\n # initialize formatting strings for all enrichment layers\n # enr_layers = set(list(ds.data_vars)) - set([\"sample_table\", \"peptide_table\"])\n # enrichment_strings = {}\n # for enr in enr_layers:\n # mat = ds[enr].to_pandas()\n # enrichment_strings[enr] = f\"\"\"* {enr}\\n{mat.describe()}\"\"\"\n\n # complete = \"\"\"\"\"\"\n # for key, value in enrichment_strings.items():\n # complete += value\n # table_strings['enrichments'] += complete\n\n final = \"\"\"\"\"\"\n for key, value in table_strings.items():\n final += value\n\n return final", "def df_print(p_df):\n l_gr = p_df['Group']\n l_number = p_df['Number']\n l_string = p_df['String']\n\n for i in range(l_gr.count()):\n print(\"=== {} ===\".format(str(l_gr[i])))\n for j in range(len(l_number[i])):\n print('{}. {}'.format(l_number[i][j], l_string[i][j]))", "def GroupsExtension_getPackageName():\n return _libsbml.GroupsExtension_getPackageName()", "def get_security_group_short_name(self):\n return self.config['security_group']", "def groupId(self):\n return 'D Dinámicas socio-espaciales'", "def _get_name(self, report):\n match = re.search(\"(.*)\\:\\s*\\(groupid\", report)\n if match:\n return match.group(1)", "def log_group_name(self) -> typing.Optional[str]:\n return self._values.get('log_group_name')", "def dim_str(dims):\n return \"*\".join(\n \"%s**%g\" % (unit_abbreviation, power)\n for unit_abbreviation, power in zip(abbreviations, dims)\n if power != 0\n )", "def get_group_symbol(locale: Locale | str | None = LC_NUMERIC) -> str:\n return Locale.parse(locale).number_symbols.get('group', ',')", "def get_data_group_name(listener_id):\n return \"{}{}\".format(get_name(listener_id), f5_const.SUFFIX_ALLOWED_CIDRS)", "def dim_col(d: int) -> str:\n return f\"dim_{d}\"", "def get_group_names(self, include = ['*'], exclude = []):\n \n raise NotImplementedError('get_group_names')", "async def str(self, sequences=None):\n if sequences is None: \n sequences = self.sequences\n \n lines = []\n def str_k(k):\n if k is NoDim: return \"NoDim\"\n elif k is None: return \"None\"\n else: return str(k)\n\n max_len_key = max([len(str_k(k)) for k in sequences.keys()])\n for k in sorted(sequences.keys()):\n dimension_lines = [await s.str() for s in self.sequences[k]]\n for i,l in enumerate(dimension_lines):\n if i == 0:\n lines.append(str_k(k) + (max_len_key - len(str_k(k)) + 1) * \" \" + l)\n else:\n lines.append(\" \" * max_len_key + \" \" + l)\n return \"\\n\".join(lines)", "def set_groupname(diagram, p, g):\n if diagram.startswith('C2'):\n groupname = diagram + '_uu_p%1i%1i%1i.d000.g%i' % \\\n (p[0][0], p[0][1], p[0][2], g[0][0]) \\\n + '_p%1i%1i%1i.d000.g%i' % (p[1][0], p[1][1], p[1][2], g[1][0])\n elif diagram.startswith('C3'):\n groupname = diagram + '_uuu_p%1i%1i%1i.d000.g5' % \\\n (p[0][0][0], p[0][0][1], p[0][0][2]) \\\n + '_p%1i%1i%1i.d000.g%1i' % \\\n (p[1][0], p[1][1], p[1][2], g[1][0]) \\\n + '_p%1i%1i%1i.d000.g5' % (p[0][1][0], p[0][1][1], p[0][1][2])\n elif diagram == 'C4+D' or diagram == 'C4+C':\n groupname = diagram + '_uuuu_p%1i%1i%1i.d000.g5' % (p[0][0][0], p[0][0][1], p[0][0][2]) + \\\n '_p%1i%1i%1i.d000.g5' % (p[1][0][0], p[1][0][1], p[1][0][2]) + \\\n '_p%1i%1i%1i.d000.g5' % (p[0][1][0], p[0][1][1], p[0][1][2]) + \\\n '_p%1i%1i%1i.d000.g5' % (p[1][1][0], p[1][1][1], p[1][1][2])\n elif diagram == 'C4+B':\n groupname = diagram + '_uuuu_p%1i%1i%1i.d000.g5' % (p[0][0][0], p[0][0][1], p[0][0][2]) + \\\n '_p%1i%1i%1i.d000.g5' % (p[1][0][0], p[1][0][1], p[1][0][2]) + \\\n '_p%1i%1i%1i.d000.g5' % (p[1][1][0], p[1][1][1], p[1][1][2]) + \\\n '_p%1i%1i%1i.d000.g5' % (p[0][1][0], p[0][1][1], p[0][1][2]) \n\n else:\n print 'in set_groupname: diagram unknown! Quantum numbers corrupted.'\n return\n\n return groupname", "def groupname():\n return jsonify(name=getRandomLine(groupNamesFile))", "def __format__(self, format_spec: str = \"\") -> str:\n if not format_spec:\n format_spec = \".4G\"\n array_string = np.array2string(\n self.data_in_display_units,\n formatter={\"float_kind\": lambda x: format(x, format_spec)},\n )\n return f\"{array_string} {self.display_unit}\"", "def getSpaceGroup(self):\n sg = self.stru.space_group()\n t = sg.type()\n return t.lookup_symbol()", "def __makeFormatString(self):\n self.__formatString = \"\"\n for f in self.__columns:\n self.__formatString += \"%(\"+ f + \")-\" + str(self.__widths[f]) + \\\n \"s \"", "def name(self):\n return self._dimensions", "def log_group_arn(self) -> str:\n ...", "def getName(self):\n return _libsbml.Group_getName(self)", "def to_string(self):\n string = (\n f\"r{self.num_repeat}_k{self.kernel_size}_s{self.stride}{self.stride}\"\n f\"_e{self.expand_ratio}_i{self.input_filters}_o{self.output_filters}\"\n f\"_se{self.se_ratio}\"\n )\n\n if not self.id_skip:\n string += \"_noskip\"\n return string", "def __str__(self):\n #Get an ordered list of the elements strings so it outputs always the same\n #string given a mass function.\n elements = []\n for element in self.focals:\n elements.append((element, str(element)))\n sortedList = sorted(elements, key=lambda x:x[1])\n \n result = \"\"\n first = True\n for t in sortedList:\n if first:\n result += t[1] + \":\" + \"{:.4f}\".format(self.focals[t[0]])\n first = False\n else:\n result += \", \" + t[1] + \":\" + \"{:.4f}\".format(self.focals[t[0]])\n return \"{\" + result + \"}\"", "def get_stack_name_stack_group(stacks) -> list:\n stack_names = []\n for stack in stacks:\n _stack = {\"stack\": stack[\"StackName\"]}\n for tag in stack[\"Tags\"]:\n if tag[\"Key\"] == \"stack-finder\":\n _stack[\"group\"] = tag[\"Value\"]\n stack_names.append(_stack)\n return stack_names", "def _deepvariant_channel_names(num_channels):\n # Add additional empty labels if there are more channels than expected.\n filler_labels = [\n 'channel {}'.format(i + 1)\n for i in range(len(DEEPVARIANT_CHANNEL_NAMES), num_channels)\n ]\n labels = DEEPVARIANT_CHANNEL_NAMES + filler_labels\n # Trim off any extra labels.\n return labels[0:num_channels]", "def label(self):\n G = self.__f.group()\n if is_Gamma0(G):\n group = ''\n elif is_Gamma1(G):\n group = 'G1'\n elif is_GammaH(G):\n group = 'GH[' + ','.join([str(z) for z in G._generators_for_H()]) + ']'\n return '%s%s%s'%(self.level(), cremona_letter_code(self.factor_number()), group)", "def __str__(self) -> str:\n out = \"BAG: \" + str(self.da.length()) + \" elements. [\"\n out += ', '.join([str(self.da.get_at_index(_))\n for _ in range(self.da.length())])\n return out + ']'", "def __str__(self) -> str:\n out = \"BAG: \" + str(self.da.length()) + \" elements. [\"\n out += ', '.join([str(self.da.get_at_index(_))\n for _ in range(self.da.length())])\n return out + ']'", "def __str__(self):\n outstr = [\"\\n<%s: %s>\" % (self.__class__, self.name)]\n outstr.append(\"%d graphs\" % len(self._graphs))\n outstr = \"\\n\".join(outstr)\n return outstr" ]
[ "0.64216906", "0.63535005", "0.6112604", "0.59159833", "0.58164614", "0.5747988", "0.5726756", "0.57234955", "0.56927526", "0.56833076", "0.565327", "0.5643954", "0.5607467", "0.55895174", "0.55715007", "0.5488538", "0.5466888", "0.5411597", "0.5394599", "0.53788805", "0.53649765", "0.5339156", "0.5339156", "0.5339156", "0.5339156", "0.5329281", "0.5310379", "0.53020257", "0.5295315", "0.52805567", "0.5279393", "0.5259291", "0.5259291", "0.5253572", "0.5247298", "0.52442753", "0.5216865", "0.52146554", "0.518328", "0.51807284", "0.5176605", "0.51569635", "0.5152471", "0.5141411", "0.51371247", "0.51283", "0.51281154", "0.5123417", "0.5122639", "0.5116327", "0.51097935", "0.51083183", "0.5102558", "0.5096387", "0.50942916", "0.50915647", "0.50764656", "0.5070701", "0.50630164", "0.5060636", "0.50574553", "0.50559473", "0.5049085", "0.50413156", "0.50383466", "0.50377405", "0.5036641", "0.50349873", "0.50296277", "0.4999298", "0.49931368", "0.49919456", "0.49821615", "0.49768424", "0.49767536", "0.49744248", "0.49686942", "0.4954179", "0.49391204", "0.4933305", "0.49277306", "0.49142116", "0.49138597", "0.49109742", "0.49068072", "0.49045265", "0.49007317", "0.48999038", "0.4899249", "0.48901507", "0.48832646", "0.48728293", "0.48728165", "0.4866635", "0.48313695", "0.48290855", "0.48258257", "0.48143145", "0.48143145", "0.4810965" ]
0.5254912
33
Given an object, a specific key and the normalization options this method will find the specified normalization options on the appropriate OptionTree, group the elements according to the selected normalization option (i.e. either per frame or over the whole animation) and finally compute the dimension ranges in each group. The new set of ranges is returned.
def compute_ranges(self, obj, key, ranges): all_table = all(isinstance(el, Table) for el in obj.traverse(lambda x: x, [Element])) if obj is None or not self.normalize or all_table: return OrderedDict() # Get inherited ranges ranges = self.ranges if ranges is None else dict(ranges) # Get element identifiers from current object and resolve # with selected normalization options norm_opts = self._get_norm_opts(obj) # Traverse displayed object if normalization applies # at this level, and ranges for the group have not # been supplied from a composite plot return_fn = lambda x: x if isinstance(x, Element) else None for group, (axiswise, framewise) in norm_opts.items(): elements = [] # Skip if ranges are cached or already computed by a # higher-level container object. framewise = framewise or self.dynamic if group in ranges and (not framewise or ranges is not self.ranges): continue elif not framewise: # Traverse to get all elements elements = obj.traverse(return_fn, [group]) elif key is not None: # Traverse to get elements for each frame frame = self._get_frame(key) elements = [] if frame is None else frame.traverse(return_fn, [group]) if not axiswise or ((not framewise or len(elements) == 1) and isinstance(obj, HoloMap)): # Compute new ranges self._compute_group_range(group, elements, ranges) self.ranges.update(ranges) return ranges
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_norm_opts(self, obj):\n norm_opts = {}\n\n # Get all elements' type.group.label specs and ids\n type_val_fn = lambda x: (x.id, (type(x).__name__, util.group_sanitizer(x.group, escape=False),\n util.label_sanitizer(x.label, escape=False))) \\\n if isinstance(x, Element) else None\n element_specs = {(idspec[0], idspec[1]) for idspec in obj.traverse(type_val_fn)\n if idspec is not None}\n\n # Group elements specs by ID and override normalization\n # options sequentially\n key_fn = lambda x: -1 if x[0] is None else x[0]\n id_groups = groupby(sorted(element_specs, key=key_fn), key_fn)\n for gid, element_spec_group in id_groups:\n gid = None if gid == -1 else gid\n group_specs = [el for _, el in element_spec_group]\n\n backend = self.renderer.backend\n optstree = Store.custom_options(\n backend=backend).get(gid, Store.options(backend=backend))\n # Get the normalization options for the current id\n # and match against customizable elements\n for opts in optstree:\n path = tuple(opts.path.split('.')[1:])\n applies = any(path == spec[:i] for spec in group_specs\n for i in range(1, 4))\n if applies and 'norm' in opts.groups:\n nopts = opts['norm'].options\n if 'axiswise' in nopts or 'framewise' in nopts:\n norm_opts.update({path: (nopts.get('axiswise', False),\n nopts.get('framewise', False))})\n element_specs = [spec for _, spec in element_specs]\n norm_opts.update({spec: (False, False) for spec in element_specs\n if not any(spec[:i] in norm_opts.keys() for i in range(1, 4))})\n return norm_opts", "def normalize_all_data_in_dict(data: Data_dict_type, normalizers: Tuple[object, ...]) -> Data_dict_type:\n for key, item in data.items():\n values, sample_rate = item\n # save old shape and reshape data to supported format for normalizer\n old_shape = values.shape\n values = values.reshape((-1, values.shape[-1]))\n # normalize data\n for normalizer in normalizers:\n values = normalizer.transform(values)\n # Reshape data back to old shape\n values = values.reshape(old_shape)\n data[key] = (values, sample_rate)\n return data", "def _apply_compositor(self, holomap, ranges=None, keys=None, dimensions=None):\n # Compute framewise normalization\n defaultdim = holomap.ndims == 1 and holomap.kdims[0].name != 'Frame'\n\n if keys and ranges and dimensions and not defaultdim:\n dim_inds = [dimensions.index(d) for d in holomap.kdims]\n sliced_keys = [tuple(k[i] for i in dim_inds) for k in keys]\n frame_ranges = OrderedDict([(slckey, self.compute_ranges(holomap, key, ranges[key]))\n for key, slckey in zip(keys, sliced_keys) if slckey in holomap.data.keys()])\n else:\n mapwise_ranges = self.compute_ranges(holomap, None, None)\n frame_ranges = OrderedDict([(key, self.compute_ranges(holomap, key, mapwise_ranges))\n for key in holomap.keys()])\n ranges = frame_ranges.values()\n\n return Compositor.collapse(holomap, (ranges, frame_ranges.keys()), mode='display')", "def all_gizmo_to_group():\n\n for n in nuke.allNodes():\n # Avoid scripted gizmo.\n if nuke.knobChangeds.get(n.Class()):\n continue\n\n gizmo_to_group(n)", "def scan_range(self, obj):\n detect_minmax = []\n for item in self._category:\n cat = item.replace(' ', '')\n has_minmax = False\n for k, v in obj.items():\n has_minmax = has_minmax or isinstance(v.get(cat), dict)\n in_k, in_v = list(v.items())[-1]\n while not isinstance(in_v, str):\n has_minmax = has_minmax or isinstance(v.get(cat), dict)\n in_k, in_v = list(in_v.items())[-1]\n \n if has_minmax:\n detect_minmax.append('Min ' + item)\n detect_minmax.append('Max ' + item)\n else:\n detect_minmax.append(item)\n \n self._category_aux = detect_minmax\n for c in self._category_aux:\n self._data[c] = []", "def _init_group_dicts(self):\n\n all_groups = set()\n\n for detection in config['detections'].values():\n if 'action' in detection and detection['action'] == 'buy':\n if 'groups' in detection:\n for group in detection['groups']:\n all_groups.add(group)\n\n for group in all_groups:\n self.trade_sizes[group] = config['trade_min_size']\n self.trade_proceeds[group] = {}\n\n self.trade_sizes['default'] = config['trade_min_size']\n self.trade_proceeds['default'] = {}", "def recalculate_groups(dynamic_list):\n new_final_list = []\n for page in dynamic_list:\n page_list = []\n for group in page:\n new_dict = {}\n lowest_x = 100\n highest_x = 0\n for line in group:\n # find if x is the lowest or highest\n if line['bbox'][0] < lowest_x:\n lowest_x = line['bbox'][0]\n if line['bbox'][2] > highest_x:\n highest_x = line['bbox'][2]\n x_one = lowest_x\n x_two = highest_x\n y_one = group[0]['bbox'][1]\n y_two = h.get_biggest_y(group)\n new_bbox = [x_one, y_one, x_two, y_two]\n new_dict['bbox'] = new_bbox\n new_dict['font'] = h.find_font_in_group(group)\n new_dict['size'] = h.find_font_size_in_group(group)\n new_dict['page'] = group[0]['page']\n new_dict['lines'] = group\n\n page_list.append(new_dict)\n\n new_final_list.append(page_list)\n return new_final_list", "def group_normalize(strokes):\n\n long_stroke = concat(strokes)\n x_min = min(long_stroke.x)\n x_max = max(long_stroke.x)\n y_min = min(long_stroke.y)\n y_max = max(long_stroke.y)\n x_range = float(x_max - x_min)\n y_range = float(y_max - y_min)\n normalized_strokes = []\n for stroke in strokes:\n x = ((np.array(stroke.x) - x_min) / x_range).tolist()\n y = ((np.array(stroke.y) - y_min) / y_range).tolist()\n normalized_strokes.append(Stroke(x, y))\n return normalized_strokes", "def get_entityset_ranges(my_core, meshset, geom_dim):\n\n entityset_ranges = {}\n entityset_types = ['Nodes', 'Curves', 'Surfaces', 'Volumes']\n for dimension, set_type in enumerate(entityset_types):\n entityset_ranges[set_type] = my_core.get_entities_by_type_and_tag(meshset, types.MBENTITYSET, geom_dim,\n [dimension])\n return entityset_ranges", "def group_normalize_wrt_max(strokes):\n\n long_stroke = concat(strokes)\n x_min = min(long_stroke.x)\n x_max = max(long_stroke.x)\n y_min = min(long_stroke.y)\n x_range = float(x_max - x_min)\n y_range = float(y_max - y_min)\n max_range = max(x_range, y_range)\n normalized_strokes = []\n for stroke in strokes:\n x = ((np.array(stroke.x) - x_min) / max_range).tolist()\n y = ((np.array(stroke.y) - y_min) / max_range).tolist()\n normalized_strokes.append(Stroke(x, y))\n return normalized_strokes", "def _normalize(self, value_dict):\n median = np.median([value_dict[i] for i in list(value_dict.keys())])\n n = len(value_dict.keys())\n if median < 1.0 / float(n):\n divisor = 1.0 / float(n)\n else:\n divisor = median\n return_dict = {}\n for i in list(value_dict.keys()):\n return_dict[i] = float(value_dict[i]) / float(divisor)\n return return_dict", "def __call__(self, results):\n\n for key in results.get('seg_fields', []):\n if self.scale_factor != 1:\n results[key] = general_ocr.imrescale(\n results[key],\n self.scale_factor,\n interpolation='nearest',\n backend=self.backend)\n return results", "def normalize_other_inputs(X, Args):\n other_keys = list(X.keys())\n other_keys.remove(\"blend_image\")\n for key in other_keys:\n X[key] = (X[key] - np.mean(X[key])) / np.std(X[key])\n if Args.model == \"orchid\":\n loc_im = np.zeros_like(X[other_keys[0]])\n for i, key in enumerate(other_keys):\n im = X.pop(key)\n maximum = np.min((im.max(axis=2).max(axis=1)))\n im[im < maximum / 1.5] = 0\n im[im >= maximum / 1.5] = i + 1\n loc_im += im\n X['loc_im'] = loc_im\n return X", "def _traverse_options(cls, obj, opt_type, opts, specs=None, keyfn=None, defaults=True):\n def lookup(x):\n \"\"\"\n Looks up options for object, including plot defaults,\n keyfn determines returned key otherwise None key is used.\n \"\"\"\n options = cls.lookup_options(x, opt_type)\n selected = {o: options.options[o]\n for o in opts if o in options.options}\n if opt_type == 'plot' and defaults:\n plot = Store.registry[cls.backend].get(type(x))\n selected['defaults'] = {o: getattr(plot, o) for o in opts\n if o not in selected and hasattr(plot, o)}\n key = keyfn(x) if keyfn else None\n return (key, selected)\n\n # Traverse object and accumulate options by key\n traversed = obj.traverse(lookup, specs)\n options = defaultdict(lambda: defaultdict(list))\n default_opts = defaultdict(lambda: defaultdict(list)) \n for key, opts in traversed:\n defaults = opts.pop('defaults', {})\n for opt, v in opts.items():\n options[key][opt].append(v)\n for opt, v in defaults.items():\n default_opts[key][opt].append(v)\n\n # Merge defaults into dictionary if not explicitly specified\n for key, opts in default_opts.items():\n for opt, v in opts.items():\n if opt not in options[key]:\n options[key][opt] = v\n return options if keyfn else options[None]", "def _get_cmap_normalisation(self):\n\n # Non-normalisable data should return cmap = None\n if not self.normalisable:\n return\n\n # Get min/max based upon ZScale with contrast parameter\n contrast = self.options.get('contrast', 0.2)\n vmin, vmax = ZScaleInterval(contrast=contrast).get_limits(self.data)\n\n # Make this symmetric if using Stokes V\n if self.stokes == 'v':\n v = max(abs(vmin), abs(vmax))\n vmin = -v\n vmax = v\n\n # Override with user-supplied values if present\n if self.options.get('vmin') or self.options.get('vmax'):\n vmin = self.options.get('vmin', -2)\n vmax = self.options.get('vmax', 1)\n\n # Normalise with maximum value in data\n if self.options.get('maxnorm'):\n vmax = np.nanmax(self.data)\n vmin = None\n\n norm = ImageNormalize(\n self.data,\n interval=ZScaleInterval(),\n vmin=vmin,\n vmax=vmax,\n clip=True\n )\n\n return norm", "def __iter__(self):\n for key in self._group._opts.keys():\n yield key", "def _dictRoundTripNormalize(self, treedict):\n for key, value in list(treedict.items()):\n if isinstance(value, dict):\n self._dictRoundTripNormalize(value)\n\n # Expand treedict[(\"group\", \"attr_name\")]\n # to treedict[\"group\"][\"attr_name\"]\n for key, value in list(treedict.items()):\n if not isinstance(key, tuple):\n continue\n # Put the attribute inside the group\n grpname, attr = key\n if not grpname:\n continue\n group = treedict.setdefault(grpname, dict())\n if isinstance(group, dict):\n del treedict[key]\n group[(\"\", attr)] = value", "def transform(self, applyfunc):\n result = self.obj.copy()\n\n for name, group in self:\n # XXX\n group.groupName = name\n res = applyfunc(group)\n\n indexer, _ = common.get_indexer(self.obj.index, group.index, None)\n np.put(result, indexer, res)\n\n return result", "def preprocess_data(self):\n\n selected_data = []\n selected_name = []\n quant_norm_applied = []\n\n rgb_color_to_keys = self.get_rgb_items_for_plot()\n for data_key in rgb_color_to_keys.values():\n if data_key in self.dict_to_plot:\n selected_name.append(data_key)\n\n if self.scaler_data is not None:\n if np.count_nonzero(self.scaler_data) == 0:\n logger.warning(\"scaler is zero - scaling was not applied\")\n elif len(self.scaler_data[self.scaler_data == 0]) > 0:\n logger.warning(\"scaler data has zero values\")\n\n for i, k in enumerate(selected_name):\n q_norm_applied = False\n if self.quantitative_normalization:\n # Quantitative normalization\n (\n data_arr,\n q_norm_applied,\n ) = self.img_model_adv.param_quant_analysis.apply_quantitative_normalization(\n data_in=self.dict_to_plot[k],\n scaler_dict=self.scaler_norm_dict,\n scaler_name_default=self.get_selected_scaler_name(),\n data_name=k,\n ref_name=self.quantitative_ref_eline,\n name_not_scalable=self.name_not_scalable,\n )\n else:\n # Normalize by the selected scaler in a regular way\n data_arr = normalize_data_by_scaler(\n data_in=self.dict_to_plot[k],\n scaler=self.scaler_data,\n data_name=k,\n name_not_scalable=self.name_not_scalable,\n )\n\n selected_data.append(data_arr)\n quant_norm_applied.append(q_norm_applied)\n\n return selected_data, selected_name, rgb_color_to_keys, quant_norm_applied", "def normalize(self, argumentMap):\r\n method = moduleName + '.' + self.className + '.' + 'normalize'\r\n global graphAPI\r\n \r\n #Create a list of indices that we'll use later to control a for loop. flowControlList governs the value j\r\n # in the inner loop. It contains a list of bucket indices. flowControlList will be destructively evaluated\r\n # by removing i at each iteration of the outer loop, so we want to start a copy of the key list. \r\n flowControlList = list(self.buckets.keys())\r\n \r\n for indexKey in sorted(self.buckets.keys()):\r\n #Outer loop,from i to k\r\n try:\r\n flowControlList.remove(indexKey)\r\n if len(flowControlList) >= 1:\r\n #Take the agents from the current bucket.\r\n # Iterate over the rest to make sure that the agents from the current bucket don't appear in any\r\n stimulusProfile = self.buckets[indexKey]\r\n try:\r\n if len(stimulusProfile.agentSet) > 0:\r\n for flowControlListKey in flowControlList:\r\n #inner loop,from j to k\r\n nextStimulusProfile = self.buckets[flowControlListKey]\r\n nextStimulusProfile.agentSet.difference_update(stimulusProfile.agentSet)\r\n except Exception as e:\r\n stimulusMeme = graphAPI.getEntityMemeType(stimulusProfile.stimulusID)\r\n errorMsg = \"Can't disentangle lower prio conditional stimulus %s agent set from higher prio agent set. Traceback = %s\" %(stimulusMeme,e)\r\n Graph.logQ.put( [logType , logLevel.ERROR , method , errorMsg])\r\n except Exception as e:\r\n errorMsg = \"\"\r\n try:\r\n remaining = len(flowControlList)\r\n stimulusProfile = self.buckets[indexKey]\r\n stimulusMeme = graphAPI.getEntityMemeType(stimulusProfile.stimulusID)\r\n errorMsg = \"Can't normalize conditional stimulus %s agent set with regard to lower prio stimuli. %s lower prio stimuli unnormalized. Traceback = %s\" %(stimulusMeme, remaining, e)\r\n except Exception as ee:\r\n errorMsg = \"Unexpected error %s occurred while trying to normalized conditional stimulus set. Traceback = %s\" %(ee, e)\r\n finally:\r\n Graph.logQ.put( [logType , logLevel.ERROR , method , errorMsg])", "def normalization(obj):\n dic = obj.mainfield.para_dict.copy()\n for item in obj.forfield: dic.update(item.para_dict)\n for item in obj.existfield: dic.update(item.para_dict)\n\n global_dic = number_type(dic)\n obj.normal_guards = norm_rep(global_dic, obj.all_sentence)\n\n main_dic = number_type(obj.mainfield.para_dict)\n obj.mainfield.content = norm_rep(main_dic, obj.mainfield.content)\n\n for index in range(len(obj.forfield)):\n obj.forfield[index].para_dict.update(obj.mainfield.para_dict)\n # temp_dic.update(obj.mainfield.para_dict)\n # for_dic = number_type(temp_dic)\n obj.forfield[index].content = norm_rep(global_dic, obj.forfield[index].content)\n print(global_dic, obj.forfield[index].para_dict)\n obj.forfield[index].para_dict = pair_2_dict(global_dic, obj.forfield[index].para_dict)\n\n for index in range(len(obj.existfield)):\n obj.existfield[index].para_dict.update(obj.mainfield.para_dict)\n # temp_dic.update(obj.mainfield.para_dict)\n # exist_dic = number_type(temp_dic)\n obj.existfield[index].content = norm_rep(global_dic, obj.existfield[index].content)\n obj.existfield[index].para_dict = pair_2_dict(global_dic, obj.existfield[index].para_dict)\n\n # change para_dict: {'i':'NODE} -> {'NODE_1', 'NODE'}\n obj.mainfield.para_dict = pair_2_dict(global_dic, obj.mainfield.para_dict)", "def rf_extents(rf_dict):\n x_min, y_min, x_max, y_max = np.inf, np.inf, -np.inf, -np.inf\n for rf in rf_dict:\n x_min = np.min([rf['on_center_x'], x_min])\n x_max = np.max([rf['on_center_x'], x_max])\n y_min = np.min([rf['on_center_y'], y_min])\n y_max = np.max([rf['on_center_y'], y_max])\n if x_min == x_max:\n x_max += 1\n if y_min == y_max:\n y_max += 1\n return {\n 'x_min': x_min,\n 'x_max': x_max,\n 'y_min': y_min,\n 'y_max': y_max\n }", "def groups(self):\n\n\t\tprint \"completed minimization\"\n\t\tcopy(self.rootdir+'counterions-minimized.gro',self.rootdir+'system.gro')\n\t\tcopy(self.rootdir+'counterions.top',self.rootdir+'system.top')\n\t\tif self.simscale == 'aamd': grouptype = 'standard'\n\t\tif self.simscale == 'cgmd': grouptype = 'cgmd_water'\n\t\tself.grouping(grouptype=grouptype)", "def __call__(self, results):\n for key in results.get('img_fields', ['img']):\n results[key] = general_ocr.imnormalize(results[key], self.mean, self.std,\n self.to_rgb)\n results['img_norm_cfg'] = dict(\n mean=self.mean, std=self.std, to_rgb=self.to_rgb)\n return results", "def normalize(self, redraw=True):\n if self.screen_rect is not None:\n self.relative_sizes = []\n\n height = self.screen_rect.height\n left, right = self._get_columns()\n\n if left.count > 0:\n self.relative_sizes += self._split_integer(height, left.count)\n if right.count > 0:\n self.relative_sizes += self._split_integer(height, right.count)\n\n if redraw:\n self.group.layout_all()\n self.do_normalize = False", "def _group_objects(list_, attr=None, key=None, default=None,\n minimum=MIN_GROUPED):\n if not bool(attr) ^ bool(key):\n raise AttributeError(\"Either an attribute or a key must be specified.\")\n\n name = \"A-Z\" if default is None else default\n groups = collections.defaultdict(list)\n\n if list_ and (minimum is None or len(list_) > minimum):\n for item in list_:\n value = getattr(item, attr) if attr is not None else item[key]\n letter = value[0].upper()\n if letter not in string.ascii_uppercase:\n groups[\"#\"].append(item)\n else:\n groups[letter].append(item)\n elif list_:\n groups[name] = list_\n\n return groups", "def group_normalize_wrt_x(strokes):\n\n long_stroke = concat(strokes)\n x_min = min(long_stroke.x)\n x_max = max(long_stroke.x)\n y_min = min(long_stroke.y)\n x_range = float(x_max - x_min)\n normalized_strokes = []\n for stroke in strokes:\n x = ((np.array(stroke.x) - x_min) / x_range).tolist()\n y = ((np.array(stroke.y) - y_min) / x_range).tolist()\n normalized_strokes.append(Stroke(x, y))\n return normalized_strokes", "def _resize_seg(self, results):\n for key in results.get('seg_fields', []):\n if self.keep_ratio:\n gt_seg = mmcv.imrescale(\n results[key],\n results['scale'],\n interpolation='nearest',\n backend=self.backend)\n else:\n gt_seg = mmcv.imresize(\n results[key],\n results['scale'],\n interpolation='nearest',\n backend=self.backend)\n results[key] = gt_seg", "def groupCtrl (sel=None):\n if sel==None:\n sel = pm.ls(sl =1)\n listGrp = []\n for obj in sel:\n rotOrder = pm.getAttr (obj+'.rotateOrder')\n names = obj.split('_')\n\n if len(names) > 3:\n auto = obj.replace( names[-1], '%s_AUTO'%( pm.util.capitalize( names[-1] ) ) )\n zero = obj.replace( names[-1], '%s_ZERO'%( pm.util.capitalize( names[-1] ) ) )\n\n elif len(names)==3:\n cap = pm.util.capitalize( names[2] )\n auto = names[0]+'_'+names[1]+cap+'_AUTO'\n zero = names[0]+'_'+names[1]+cap+'_ZERO'\n\n elif len(names)<=2:\n auto = obj+'_AUTO'\n zero = obj+'_ZERO'\n\n auto = pm.group (em=1, n=auto )\n pm.setAttr ( auto+'.rotateOrder', rotOrder)\n\n zero = pm.group (em=1, n=zero )\n pm.setAttr (zero+'.rotateOrder', rotOrder)\n # create attr Mem\n if pm.objExists(obj+'.AUTO')==0:\n pm.addAttr (obj,ln ='AUTO', dt ='string')\n\n pm.setAttr (obj+'.AUTO' ,auto ,k=0, l=0, type ='string')\n\n if pm.objExists(obj+'.ZERO')==0:\n pm.addAttr (obj,ln ='ZERO', dt ='string')\n\n pm.setAttr (obj+'.ZERO' ,zero ,k=0, l=0, type ='string')\n # ZERO obj\n pm.addAttr (zero,ln ='obj', dt ='string')\n pm.setAttr (zero+'.obj' ,obj ,k=0, l=0, type ='string')\n # AUTO obj\n pm.addAttr (auto,ln ='obj', dt ='string')\n pm.setAttr (auto+'.obj' ,obj ,k=0, l=0, type ='string')\n #check parent of selectObj\n listParent = pm.listRelatives (obj ,p=1,typ='transform')\n #if num==0, do nothing or if num==1 do parent\n if len(listParent)==1 :\n #print 'yes'\n pm.parent (zero , listParent[0] )\n # match positionand rotation\n pm.delete (pm.pointConstraint ( obj , auto))\n pm.delete (pm.orientConstraint ( obj , auto))\n pm.delete (pm.pointConstraint ( obj , zero))\n pm.delete (pm.orientConstraint ( obj , zero))\n pm.parent ( auto , zero)\n pm.parent ( obj , auto )\n # set nonKeyAble\n grp = [zero , auto , obj]\n listGrp.append(grp)\n #print grp\n return listGrp", "def _parse_groupped_data(self):\n for i, val in enumerate(self.values.keys()):\n xy = self.values[val]\n self._set_and_get(\"x_\", val, xy[:, 0])\n self._set_and_get(\"y_\", val, xy[:, 1])", "def _resize_seg(self, results):\n for key in results.get('seg_fields', []):\n if self.keep_ratio:\n gt_seg = general_ocr.imrescale(\n results[key],\n results['scale'],\n interpolation='nearest',\n backend=self.backend)\n else:\n gt_seg = general_ocr.imresize(\n results[key],\n results['scale'],\n interpolation='nearest',\n backend=self.backend)\n results[key] = gt_seg", "def __iter__(self):\n for key in itertools.chain(list(self._opts.keys()),\n list(self._groups.keys())):\n yield key", "def normalise(self):\n total = 0\n for feat_set in self.values():\n for value in feat_set.values():\n total += value\n norm = 1/total\n for feat_set in self.values():\n for feat in feat_set:\n feat_set[feat] *= norm\n return self", "def normalize(self, normalizationLevel=\"minute\", fusionMethod=\"mean\", interpolationMethod=\"linear\"):\n # do not normalize the TimeSeries if it is already normalized, either by\n # definition or a prior call of normalize(*)\n if self._normalizationLevel == normalizationLevel:\n if self._normalized: # pragma: no cover\n return\n\n # check if all parameters are defined correctly\n if normalizationLevel not in NormalizationLevels:\n raise ValueError(\"Normalization level %s is unknown.\" % normalizationLevel)\n if fusionMethod not in FusionMethods:\n raise ValueError(\"Fusion method %s is unknown.\" % fusionMethod)\n if interpolationMethod not in InterpolationMethods:\n raise ValueError(\"Interpolation method %s is unknown.\" % interpolationMethod)\n\n # (nearly) empty TimeSeries instances do not require normalization\n if len(self) < 2:\n self._normalized = True\n return\n\n # get the defined methods and parameter\n self._normalizationLevel = normalizationLevel\n normalizationLevel = NormalizationLevels[normalizationLevel]\n fusionMethod = FusionMethods[fusionMethod]\n interpolationMethod = InterpolationMethods[interpolationMethod]\n\n # sort the TimeSeries\n self.sort_timeseries()\n\n # prepare the required buckets\n start = self._timeseriesData[0][0]\n end = self._timeseriesData[-1][0]\n span = end - start\n bucketcnt = int(span / normalizationLevel) + 1\n\n buckethalfwidth = normalizationLevel / 2.0\n bucketstart = start + buckethalfwidth\n buckets = [[bucketstart + idx * normalizationLevel] for idx in xrange(bucketcnt)]\n\n # Step One: Populate buckets\n # Initialize the timeseries data iterators\n tsdStartIdx = 0\n tsdEndIdx = 0\n tsdlength = len(self)\n\n for idx in xrange(bucketcnt):\n # get the bucket to avoid multiple calls of buckets.__getitem__()\n bucket = buckets[idx]\n\n # get the range for the given bucket\n bucketend = bucket[0] + buckethalfwidth\n\n while tsdEndIdx < tsdlength and self._timeseriesData[tsdEndIdx][0] < bucketend:\n tsdEndIdx += 1\n\n # continue, if no valid data entries exist\n if tsdStartIdx == tsdEndIdx:\n continue\n\n # use the given fusion method to calculate the fusioned value\n values = [i[1] for i in self._timeseriesData[tsdStartIdx:tsdEndIdx]]\n bucket.append(fusionMethod(values))\n\n # set the new timeseries data index\n tsdStartIdx = tsdEndIdx\n\n # Step Two: Fill missing buckets\n missingCount = 0\n lastIdx = 0\n for idx in xrange(bucketcnt):\n # bucket is empty\n if 1 == len(buckets[idx]):\n missingCount += 1\n continue\n\n # This is the first bucket. The first bucket is not empty by definition!\n if idx == 0:\n lastIdx = idx\n continue\n\n # update the lastIdx, if none was missing\n if 0 == missingCount:\n lastIdx = idx\n continue\n\n # calculate and fill in missing values\n missingValues = interpolationMethod(buckets[lastIdx][1], buckets[idx][1], missingCount)\n for idx2 in xrange(1, missingCount + 1):\n buckets[lastIdx + idx2].append(missingValues[idx2 - 1])\n\n lastIdx = idx\n missingCount = 0\n\n self._timeseriesData = buckets\n\n # at the end set self._normalized to True\n self._normalized = True", "def determinePlotLimits(self):\n max_str = \"up99\"\n min_str = \"dn99\"\n if self.keywords.get(\"limit_type\",\"99per\") == \"minmax\":\n max_str = \"max\"\n min_str = \"min\"\n \n # Determine the min/max of variables over all models\n limits = {}\n prune = False\n for fname in glob.glob(os.path.join(self.output_path,\"*.nc\")):\n with Dataset(fname) as dataset:\n if \"MeanState\" not in dataset.groups: continue\n group = dataset.groups[\"MeanState\"]\n variables = [v for v in group.variables.keys() if v not in group.dimensions.keys()]\n for vname in variables:\n var = group.variables[vname]\n pname = vname.split(\"_\")[0]\n region = vname.split(\"_\")[-1]\n if var[...].size <= 1: continue\n if space_opts.has_key(pname):\n if not limits.has_key(pname):\n limits[pname] = {}\n limits[pname][\"min\"] = +1e20\n limits[pname][\"max\"] = -1e20\n limits[pname][\"unit\"] = post.UnitStringToMatplotlib(var.getncattr(\"units\"))\n limits[pname][\"min\"] = min(limits[pname][\"min\"],var.getncattr(min_str))\n limits[pname][\"max\"] = max(limits[pname][\"max\"],var.getncattr(max_str))\n elif time_opts.has_key(pname):\n if not limits.has_key(pname): limits[pname] = {}\n if not limits[pname].has_key(region):\n limits[pname][region] = {}\n limits[pname][region][\"min\"] = +1e20\n limits[pname][region][\"max\"] = -1e20\n limits[pname][region][\"unit\"] = post.UnitStringToMatplotlib(var.getncattr(\"units\"))\n limits[pname][region][\"min\"] = min(limits[pname][region][\"min\"],var.getncattr(\"min\"))\n limits[pname][region][\"max\"] = max(limits[pname][region][\"max\"],var.getncattr(\"max\"))\n if not prune and \"Benchmark\" in fname and pname == \"timeint\":\n prune = True\n self.pruneRegions(Variable(filename = fname,\n variable_name = vname,\n groupname = \"MeanState\"))\n \n # Second pass to plot legends (FIX: only for master?)\n for pname in limits.keys():\n\n try:\n opts = space_opts[pname]\n except:\n continue\n \n # Determine plot limits and colormap\n if opts[\"sym\"]:\n vabs = max(abs(limits[pname][\"min\"]),abs(limits[pname][\"min\"]))\n limits[pname][\"min\"] = -vabs\n limits[pname][\"max\"] = vabs\n\n # if a score, force to be [0,1]\n if \"score\" in pname:\n limits[pname][\"min\"] = 0\n limits[pname][\"max\"] = 1\n\n limits[pname][\"cmap\"] = opts[\"cmap\"]\n if limits[pname][\"cmap\"] == \"choose\": limits[pname][\"cmap\"] = self.cmap\n\n # Plot a legend for each key\n if opts[\"haslegend\"]:\n fig,ax = plt.subplots(figsize=(6.8,1.0),tight_layout=True)\n label = opts[\"label\"]\n if label == \"unit\": label = limits[pname][\"unit\"]\n post.ColorBar(ax,\n vmin = limits[pname][\"min\"],\n vmax = limits[pname][\"max\"],\n cmap = limits[pname][\"cmap\"],\n ticks = opts[\"ticks\"],\n ticklabels = opts[\"ticklabels\"],\n label = label)\n fig.savefig(os.path.join(self.output_path,\"legend_%s.png\" % (pname))) \n plt.close()\n\n # Determine min/max of relationship variables\n for fname in glob.glob(os.path.join(self.output_path,\"*.nc\")):\n with Dataset(fname) as dataset:\n for g in dataset.groups.keys():\n if \"relationship\" not in g: continue\n grp = dataset.groups[g]\n if not limits.has_key(g):\n limits[g] = {}\n limits[g][\"xmin\"] = +1e20\n limits[g][\"xmax\"] = -1e20\n limits[g][\"ymin\"] = +1e20\n limits[g][\"ymax\"] = -1e20\n limits[g][\"xmin\"] = min(limits[g][\"xmin\"],grp.variables[\"ind_bnd\"][ 0, 0])\n limits[g][\"xmax\"] = max(limits[g][\"xmax\"],grp.variables[\"ind_bnd\"][-1,-1])\n limits[g][\"ymin\"] = min(limits[g][\"ymin\"],grp.variables[\"dep_bnd\"][ 0, 0])\n limits[g][\"ymax\"] = max(limits[g][\"ymax\"],grp.variables[\"dep_bnd\"][-1,-1])\n\n \n self.limits = limits", "def group2mmdetection(group: dict) -> dict:\n\n image_id, group = group\n filename = group['filename'].values[0]\n fullpath = osp.join(IMG_DIR, filename)\n assert image_id == osp.basename(filename).split('.')[0]\n\n width, height = get_image_size(fullpath)\n\n group['XMin'] = group['XMin'] * width\n group['XMax'] = group['XMax'] * width\n group['YMin'] = group['YMin'] * height\n group['YMax'] = group['YMax'] * height\n\n bboxes = [np.expand_dims(group[col].values, -1) for col in['XMin', 'YMin', 'XMax', 'YMax']]\n bboxes = np.concatenate(bboxes, axis=1)\n #print(bboxes)\n #print(bboxes.shape)\n return {\n 'filename': group['filename'].values[0], #image_id+'.jpg',\n 'width': width,\n 'height': height,\n 'ann': {\n 'bboxes': np.array(bboxes, dtype=np.float32),\n 'labels': np.array([stoi[x] for x in group['LabelName'].values]) + 1\n }\n }", "def group(df, dvmin, dvmax, step):\n\tr = step/2\n\tres = []\n\n\tfor ticker in range(dvmin, dvmax, step):\n\t\t#select values by left-right difference in sum in range (x-r, x+r). x is the middle value of a bucket. \n\t\tsubgroup = df.loc[(df['diff']>ticker-r) & (df['diff']<ticker+r)\n\t\t\t& (df['choice'] != 0.5)]\n\t\t#count frequency of choosing left\n\t\tnum = subgroup['choice'].sum()\n\t\t#total number of datapoints in the bucket\n\t\tdenom = subgroup.shape[0]\n\t\t#calculate and append the prob. append 0 if empty bucket\n\t\tres.append(num/denom) if denom else res.append(0)\n\treturn res", "def process(data):\n items = data.get('items', [])\n logging.info('- processing %d items', len(items))\n return [_flatten_dimensions(t['properties']['dimensions']) for t in items]", "def normalizer(img, norm_min: int = 0, norm_max: int = 255, norm_auto: bool = False):\n rgb_planes = cv2.split(img)\n result_planes = []\n\n for idx, plane in enumerate(rgb_planes[:3]):\n if norm_auto:\n auto_min = np.min(np.where((norm_min <= 25, 255)))\n auto_max = np.max(np.where((norm_min <= 220, 0)))\n plane = np.where(plane <= auto_min, auto_min, plane)\n plane = np.where(plane >= auto_max, auto_max, plane)\n else:\n plane = np.where(plane <= norm_min, norm_min, plane)\n plane = np.where(plane >= norm_max, norm_max, plane)\n norm_img = cv2.normalize(plane, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)\n result_planes.append(norm_img)\n\n return cv2.merge(result_planes)", "def normalize_units(data):\n for obj in data:\n obj['unit'] = normalize_units_function(obj.get('unit', ''))\n # for param in ds.get('parameters', {}).values():\n # if 'unit' in param:\n # param['unit'] = normalize_units_function(param['unit'])\n return data", "def group_normalize_wrt_y(strokes):\n\n long_stroke = concat(strokes)\n x_min = min(long_stroke.x)\n x_max = max(long_stroke.x)\n y_min = min(long_stroke.y)\n y_range = float(y_max - y_min)\n normalized_strokes = []\n for stroke in strokes:\n x = ((np.array(stroke.x) - x_min) / y_range).tolist()\n y = ((np.array(stroke.y) - y_min) / y_range).tolist()\n normalized_strokes.append(Stroke(x, y))\n return normalized_strokes", "def standardize_grouped(tX_grouped):\n\n new_tX_grouped = []\n for i in range(len(tX_grouped)):\n new_tX_grouped.append(standardize(tX_grouped[i]))\n return new_tX_grouped", "def do_grouping(prefixes, grouping_keys, xaxis_key='Age_At_IMGExam',\n plots='regressions',\n atlas='desikan', username=None, passwd=None,\n data_dir='data', output_dir='.', output_type='matplotlib'):\n\n data = get_all_data(atlas, username=username, passwd=passwd, data_dir=data_dir)\n data.filter(lambda k, v: 'fuzzy' not in k) # Remove 'fuzzy'\n data.filter([partial(lambda k, v, p: (k.startswith(p) or\n k in grouping_keys or\n k == xaxis_key),\n p=p)\n for p in prefixes])\n\n # Process & plot the data.\n measure_keys = [(k) for k in data.data_dict.keys()\n if k.startswith(p) and k.endswith('_AI')]\n for pi, yaxis_key in enumerate(sorted(measure_keys)):\n print(\"Comparing %d (%s)...\" % (pi, yaxis_key))\n\n kwargs = dict(xaxis_key=xaxis_key, yaxis_key=yaxis_key)\n group_names, group_x, group_y = compute_group_asymmetry(data, grouping_keys=grouping_keys, **kwargs)\n\n kwargs.update(dict(group_names=group_names, group_x=group_x, group_y=group_y))\n if 'regressions' in plots:\n fh = plot_regressions(**kwargs)\n fh.name = '%s-%s-regressions' % (yaxis_key, '__'.join(grouping_keys))\n\n if 'distributions' in plots:\n plot_distributions(**kwargs)\n if 'stats' in plots:\n do_stats(group_names, group_x, group_y)\n\n # Outside of loop\n if 'regression_stats' in plots:\n dump_regressions_csv(regressions,\n group_names=group_names,\n measure_names=measure_keys)\n\n plot_regressions_scatter(regressions,\n group_names=group_names,\n measure_names=measure_keys)\n\n if 'stat_distributions' in plots:\n plot_stat_distributions(stats, group_names=group_names)\n\n show_plots(plotengine=output_type, output_dir=output_dir)", "def scale_and_shift_reco_dists(self):\n e_res_scale = self.params.e_res_scale.value.m_as('dimensionless')\n cz_res_scale = self.params.cz_res_scale.value.m_as('dimensionless')\n e_reco_bias = self.params.e_reco_bias.value.m_as('GeV')\n cz_reco_bias = self.params.cz_reco_bias.value.m_as('dimensionless')\n eval_dict_mod = deepcopy(self.eval_dict)\n for flavintgroup in eval_dict_mod.keys():\n for (dim, dim_scale, dim_bias) in \\\n (('energy', e_res_scale, e_reco_bias),\n ('coszen', cz_res_scale, cz_reco_bias)):\n for i,flav_dim_dist_dict in \\\n enumerate(eval_dict_mod[flavintgroup][dim]):\n for param in flav_dim_dist_dict[\"kwargs\"].keys():\n if param == 'scale':\n flav_dim_dist_dict[\"kwargs\"][param] *= dim_scale\n elif param == 'loc':\n flav_dim_dist_dict[\"kwargs\"][param] += dim_bias\n return eval_dict_mod", "def add_hic_normalize_group(subparser):\n subparser.add_argument(\"-m\", \"--min-distance\", dest=\"mindist\", required=False, type=int, default=0,\n action='store', help=\"The minimum interaction distance to include in normalization. [default: %(default)s]\")\n subparser.add_argument(\"-x\", \"--max-distance\", dest=\"maxdist\", required=False, type=int, default=None,\n action='store', help=\"The maximum interaction distance to include in normalization (None or zero indicate no maximum). [default: %(default)s]\")\n subparser.add_argument(\"-c\", \"--chromosomes\", dest=\"chroms\", required=False, type=str, default=None,\n action='store', help=\"A comma-separated list of chromosomes to learn correction values for (None indicates all chromosomes). [default: %(default)s]\")\n subparser.add_argument(\"-o\", \"--output-file\", dest=\"output\", required=False, type=str, default=None,\n action='store', help=\"An alternate filename to save the normalized project to. If not given, the original project file will be overwritten. [default: %(default)s]\")\n subparser.add_argument(dest=\"project\", type=str,\n help=\"The name of the HiFive HiC project to normalize.\")\n add_silent_argument(subparser)\n return", "def scaleKey(*args, animation: AnyStr=\"keysOrObjects.\", attribute: Union[AnyStr,\n List[AnyStr]]=\"\", controlPoints: bool=False, float: Union[floatrange,\n List[floatrange]]=None, floatPivot: float=0.0, floatScale: float=0.0, hierarchy:\n AnyStr=\"\", includeUpperBound: bool=True, index: Union[int, List[int]]=0,\n newEndFloat: float=0.0, newEndTime: time=None, newStartFloat: float=0.0,\n newStartTime: time=None, scaleSpecifiedKeys: bool=True, shape: bool=True, time:\n Union[timerange, List[timerange]]=None, timePivot: time=None, timeScale: float=0.0,\n valuePivot: float=0.0, valueScale: float=0.0, **kwargs)->int:\n pass", "def walk(self):\n for group in self.all_groups.values():\n yield from group.calculations", "def _create_options_grid(fracs: pd.DataFrame, scale_factor: float) -> pd.DataFrame:\r\n fracs = fracs.copy(deep=True)\r\n scales = np.linspace(1, scale_factor, num=100, endpoint=True)\r\n options = pd.DataFrame(columns=['scale', 'percent_dropped', 'number'])\r\n\r\n for scale in scales:\r\n # calc new frac samples, maximum of 1\r\n fracs['control_scaled_sample_fraction'] = np.min([(fracs['treatment'] * scale/fracs['control']).values, [1]*len(fracs)], axis=0)\r\n fracs['treatment_scaled_sample_fraction'] = fracs['control_scaled_sample_fraction'] * fracs['control']/fracs['treatment']\r\n\r\n # calc %drop as difference of scale and actual ( e.g. where we pinned max at 1 in control scaled sample fraction)\r\n num_dropped = (fracs['treatment'] * (np.array([scale] * len(fracs)) - fracs['treatment_scaled_sample_fraction'])).sum()\r\n percent_dropped = num_dropped/(fracs['treatment'] * scale).sum()\r\n\r\n # calc new total\r\n number = (fracs['treatment']*fracs['treatment_scaled_sample_fraction']).sum()\r\n options = options.append({'scale': scale, 'percent_dropped': percent_dropped, 'number': number}, ignore_index=True)\r\n\r\n return options", "def normalizeResults(results, independentVariable, basis):\n normValues = {}\n if basis == \"min\":\n reduction = min\n elif basis == \"mean\":\n reduction = mean\n elif basis == \"max\":\n reduction = max\n\n print(\"Normalising by \" + basis)\n for k in results.keys():\n ik = k.split(\",\")[0]\n if ik not in normValues.keys():\n normValues[ik] = []\n values = []\n for iv in results[k].keys():\n values += [results[k][iv].__dict__[\"Mean\"]]\n normValues[ik] += values\n for ik in normValues.keys():\n normValues[ik] = reduction(normValues[ik])\n\n # And now scale everything\n for k in results.keys():\n ik = k.split(\",\")[0]\n norm = normValues[ik]\n experiment = results[k]\n for line in experiment.values():\n for value in line.__dict__.keys():\n if value == independentVariable:\n continue\n line.__dict__[value] = line.__dict__[value] / norm", "def _resize_masks(self, results):\n for key in results.get('mask_fields', []):\n if results[key] is None:\n continue\n if self.keep_ratio:\n results[key] = results[key].rescale(results['scale'])\n else:\n results[key] = results[key].resize(results['img_shape'][:2])", "def _resize_masks(self, results):\n for key in results.get('mask_fields', []):\n if results[key] is None:\n continue\n if self.keep_ratio:\n results[key] = results[key].rescale(results['scale'])\n else:\n results[key] = results[key].resize(results['img_shape'][:2])", "def gallery_groups(self):\n\n \"Collect data into fixed-length chunks or blocks\"\n # grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx\n n = 3\n iterable = self.context['gallery'].values()\n args = [iter(iterable)] * 3\n return izip_longest(fillvalue=None, *args)", "def normalize_features(self, data_dict, ind):\n pre_norm_list = []\n for title in data_dict:\n pre_norm_list.append(data_dict[title][ind])\n if self.normalization_method == 'min_max':\n mini, maxi, norm_list = normalize.min_max_normalize(pre_norm_list)\n self.normalization_n.append(mini)\n self.normalization_d.append(maxi - mini)\n elif self.normalization_method == 'z_score':\n mean, var, norm_list = normalize.z_score_normalize(pre_norm_list)\n self.normalization_n.append(mean)\n self.normalization_d.append(var)\n elif self.normalization_method == 'none':\n norm_list = pre_norm_list[:]\n self.normalization_n.append(0)\n self.normalization_d.append(1)\n for i, title in enumerate(data_dict):\n data_dict[title][ind] = norm_list[i]", "def group(self, key=None, value=None):\n if key is None:\n key = self.keys[0]\n if key not in self.keys:\n raise ValueError('The key you entered does not exist in this ALE.')\n data = sorted(self.dicts(value), key=lambda d: d.get(key))\n result = [list(g) for k, g in groupby(data)]\n return result", "def __call__(self, results):\n if 'scale' not in results:\n if 'scale_factor' in results:\n img_shape = results['img'].shape[:2]\n scale_factor = results['scale_factor']\n assert isinstance(scale_factor, float)\n results['scale'] = tuple([int(x * scale_factor) for x in img_shape][::-1])\n else:\n self._random_scale(results)\n else:\n if not self.override:\n assert 'scale_factor' not in results, 'scale and scale_factor cannot be both set.'\n else:\n results.pop('scale')\n if 'scale_factor' in results:\n results.pop('scale_factor')\n self._random_scale(results)\n\n self._resize_img(results)\n self._resize_bboxes(results)\n self._resize_cbboxes(results)\n self._resize_masks(results)\n self._resize_seg(results)\n\n return results", "def _group_separability_scores(self, sep_scores: Dict) -> Dict:\n grouped_sep_scores = {}\n\n for class_pair_key, class_pair_val in sep_scores.items():\n grouped_sep_scores[class_pair_key] = {}\n for concept_key, concept_attrs in CONCEPT_GROUPING.items():\n val = sum([class_pair_val[attr] for attr in concept_attrs]) / len(concept_attrs)\n grouped_sep_scores[class_pair_key][concept_key] = val\n return grouped_sep_scores", "def create_lod_groups(lod_dict, name='LOD_grp'):\n geo_lod_list = list()\n cmds.select(clear=True)\n for lod, lod_geo in lod_dict.items():\n for geo in lod_geo:\n if cmds.listRelatives(geo, parent=True):\n cmds.parent(geo, world=True)\n geo_lod_list.append(lod_geo[0])\n\n if geo_lod_list:\n cmds.select(geo_lod_list)\n cmds.LevelOfDetailGroup()\n lod_grp = cmds.rename(cmds.ls(selection=True)[0], name)\n for lod, lod_geo in lod_dict.items():\n if len(lod_geo) > 1:\n lod_parent = cmds.listRelatives(lod_geo[0], parent=True)\n if lod_parent:\n cmds.parent(lod_geo[1:], lod_parent[0])\n return lod_grp", "def divy_keys(self,spot):\n\n\t\tgroup_counts = [sum([i[0]=='subpattern' \n\t\t\tfor i in re.sre_parse.parse(self.spots[spot][key])]) \n\t\t\t#---apply naming convention\n\t\t\tfor key in ['top','step','part']]\n\t\tcursor = ([0]+[sum(group_counts[:i+1]) for i in range(len(group_counts))])\n\t\tslices = [slice(cursor[i],cursor[i+1]) for i in range(len(cursor)-1)]\n\t\tdivy = lambda x: [y[0] if len(y)==1 else y for y in [x[s] for s in slices]]\n\t\treturn divy", "def _compute_kratios_multilayers(self):\n for i, layer in enumerate(self._layers.keys()):\n if not layer.is_thickness_known():\n raise ValueError(\"Thickness of layer %i is unknown\" % i)\n\n # Compute\n layer = list(self._layers.keys())[0]\n thickness_low_m = layer.thickness_m\n thickness_high_m = layer.thickness_m * 10\n step = 1\n\n _thicknesses, kratios = \\\n self.compute_kratio_vs_thickness(layer, thickness_low_m,\n thickness_high_m, step)\n\n # Reorganize results\n output = {}\n for experiment, kratio in kratios.items():\n output.setdefault(experiment, kratio[0])\n\n return output", "def _make_category_groups(data_struct):\n groups = {}\n for cat in set(data_struct[\"Objects\"]): \n \n data_names = [\"left_x\",\"top_y\",\"width\",\"height\",\"FPS\",\"AVG_FPS\",\"Accuracy\"]\n indices = [i for i, x in enumerate(data_struct[\"Objects\"]) if x == cat]\n for dn in data_names:\n for idx in indices:\n groups[cat] = data_struct[dn][idx]\n return(groups)", "def scaleRanges(ranges, dims=(0,1,2)):\n max_pos_span = np.max([ranges[dim][1] - ranges[dim][0] for dim in\n dims])\n for k in ranges:\n ranges[k] = list(ranges[k])\n\n for dim in dims:\n midpoint = 0.5 * (ranges[dim][1] + ranges[dim][0])\n # import pdb; pdb.set_trace()\n ranges[dim][1] = midpoint + 0.5 * max_pos_span\n ranges[dim][0] = midpoint - 0.5 * max_pos_span", "def get_dimensionality_gorai(structure, max_hkl=2, el_radius_updates=None,\n min_slab_size=5, min_vacuum_size=5,\n standardize=True, bonds=None):\n if standardize:\n structure = SpacegroupAnalyzer(structure). \\\n get_conventional_standard_structure()\n\n if not bonds:\n bonds = get_max_bond_lengths(structure, el_radius_updates)\n\n num_surfaces = 0\n for h in range(max_hkl):\n for k in range(max_hkl):\n for l in range(max_hkl):\n if max([h, k, l]) > 0 and num_surfaces < 2:\n sg = SlabGenerator(structure, (h, k, l),\n min_slab_size=min_slab_size,\n min_vacuum_size=min_vacuum_size)\n slabs = sg.get_slabs(bonds)\n for _ in slabs:\n num_surfaces += 1\n\n return 3 - min(num_surfaces, 2)", "def normalize(self, mode: Literal[\"formula_unit\", \"atom\"] = \"formula_unit\") -> \"ComputedStructureEntry\":\n # TODO: this should raise TypeError since normalization does not make sense\n # raise TypeError(\"You cannot normalize a structure.\")\n warnings.warn(\n f\"Normalization of a `{self.__class__.__name__}` makes \"\n \"`self.composition` and `self.structure.composition` inconsistent\"\n \" - please use self.composition for all further calculations.\"\n )\n # TODO: find a better solution for creating copies instead of as/from dict\n factor = self._normalization_factor(mode)\n d = super().normalize(mode).as_dict()\n d[\"structure\"] = self.structure.as_dict()\n entry = self.from_dict(d)\n entry._composition /= factor # pylint: disable=E1101\n return entry", "def apply_cuts(chain, isotope, tree, volume):\n\n #open file which inlcudes fill levels and fill days\n infile = open(\"/users/langrock/plotting_macros/Partial_fill/split_level.txt\",\"r\")\n\n #define root file to save root files to\n outputroot = ROOT.TFile(\"/data/langrock/PartialFill/Full/root/\" + isotope + \"_\" + chain + \"_\" + volume +\".root\",\"recreate\")\n\n #define histograms\n hist = define_histograms.DefineHistograms()\n\n events_full = 0\n events_pocut = 0\n events_deltatcut = 0\n events_bifidvolcut = 0\n events_deltarcut = 0\n events_bicut = 0\n events_allcut = 0\n\n #get fill days and fill level from file, loop through each line and perform the cut selection on each day of filling\n for line in infile:\n words = line.split()\n\n if len(words)!=0:\n\n d = float(words[0])\n z_level = float(words[1])\n \n #loop through the events in the root file\n for i in range(tree.GetEntries()):\n #get variables from previous events\n tree.GetEntry(i-1)\n nhits_prev = tree.nhits\n radius_prev = math.sqrt(tree.posx*tree.posx+tree.posy*tree.posy+tree.posz*tree.posz)\n time_prev = tree.uTNSecs + tree.uTSecs*math.pow(10,9) + tree.uTDays*24*60*60*math.pow(10,9)\n energy_prev = tree.energy\n fitValid_prev = tree.fitValid\n x_prev = tree.posx\n y_prev = tree.posy\n z_prev = tree.posz\n\n #get variables from current events\n tree.GetEntry(i)\n nhits = tree.nhits\n radius = math.sqrt(tree.posx*tree.posx+tree.posy*tree.posy+tree.posz*tree.posz)\n time = tree.uTNSecs + tree.uTSecs*math.pow(10,9) + tree.uTDays*24*60*60*math.pow(10,9)\n energy = tree.energy\n fitValid = tree.fitValid\n x = tree.posx\n y = tree.posy\n z = tree.posz\n\n #get day at which events were simulated\n day = tree.uTDays\n\n #define time differance and event distance\n delta_t = time - time_prev\n delta_r = math.sqrt(math.pow((x_prev - x),2) + math.pow((y_prev - y),2) + math.pow((z_prev - z),2))\n\n fidvol_value = 5000\n \n #if the event was generated on the current day of filling, apply cuts\n if d == day:\n\n #fill histograms and count events\n hist.h_energy_full.Fill(energy)\n hist.h_nhitspo_full.Fill(nhits)\n hist.h_nhitsbi_full.Fill(nhits_prev)\n hist.h_deltat_full.Fill(delta_t)\n hist.h_deltar_full.Fill(delta_r)\n hist.h_rfidvolbi_full.Fill(radius_prev)\n\n events_full += 1\n\n #apply fiducial vlume cut\n if radius> 0 and radius < fidvol_value and z >= z_level+653:\n\n hist.h_energy_pocut.Fill(energy)\n hist.h_nhitspo_pocut.Fill(nhits)\n hist.h_nhitsbi_pocut.Fill(nhits_prev)\n hist.h_deltat_pocut.Fill(delta_t)\n hist.h_deltar_pocut.Fill(delta_r)\n hist.h_rfidvolbi_pocut.Fill(radius_prev)\n \n events_pocut += 1\n\n #bipo212 cut selection\n if chain == \"bipo212\":\n #apply polonium candidate cut\n if nhits >= 450 and nhits <= 580:\n\n hist.h_energy_deltatcut.Fill(energy)\n hist.h_nhitspo_deltatcut.Fill(nhits)\n hist.h_nhitsbi_deltatcut.Fill(nhits_prev)\n hist.h_deltat_deltatcut.Fill(delta_t)\n hist.h_deltar_deltatcut.Fill(delta_r)\n hist.h_rfidvolbi_deltatcut.Fill(radius_prev)\n\n events_deltatcut += 1\n\n #time difference cut\n if delta_t < 3690:\n\n hist.h_energy_bifidvolcut.Fill(energy)\n hist.h_nhitspo_bifidvolcut.Fill(nhits)\n hist.h_nhitsbi_bifidvolcut.Fill(nhits_prev)\n hist.h_deltat_bifidvolcut.Fill(delta_t)\n hist.h_deltar_bifidvolcut.Fill(delta_r)\n hist.h_rfidvolbi_bifidvolcut.Fill(radius_prev)\n\n events_bifidvolcut += 1\n\n #fiducial radius cut on bismuth candidate\n if radius_prev > 0 and radius_prev < fidvol_value and z_prev >= z_level+653:\n\n hist.h_energy_deltarcut.Fill(energy)\n hist.h_nhitspo_deltarcut.Fill(nhits)\n hist.h_nhitsbi_deltarcut.Fill(nhits_prev)\n hist.h_deltat_deltarcut.Fill(delta_t)\n hist.h_deltar_deltarcut.Fill(delta_r)\n hist.h_rfidvolbi_deltarcut.Fill(radius_prev)\n\n events_deltarcut += 1\n\n #distance cut\n if delta_r > 0 and delta_r < 1500:\n\n hist.h_energy_bicut.Fill(energy)\n hist.h_nhitspo_bicut.Fill(nhits)\n hist.h_nhitsbi_bicut.Fill(nhits_prev)\n hist.h_deltat_bicut.Fill(delta_t)\n hist.h_deltar_bicut.Fill(delta_r)\n hist.h_rfidvolbi_bicut.Fill(radius_prev)\n\n events_bicut += 1\n\n #nhits cut on the bismuth candidate\n if nhits_prev >= 100:\n\n hist.h_energy_allcut.Fill(energy)\n hist.h_nhitspo_allcut.Fill(nhits)\n hist.h_nhitsbi_allcut.Fill(nhits_prev)\n hist.h_deltat_allcut.Fill(delta_t)\n hist.h_deltar_allcut.Fill(delta_r)\n hist.h_rfidvolbi_allcut.Fill(radius_prev)\n\n events_allcut += 1\n \n #bipo214 cut selection\n elif chain == \"bipo214\":\n #nhits cut on polonium candidate\n if nhits >= 290 and nhits <= 450:\n\n hist.h_energy_deltatcut.Fill(energy)\n hist.h_nhitspo_deltatcut.Fill(nhits)\n hist.h_nhitsbi_deltatcut.Fill(nhits_prev)\n hist.h_deltat_deltatcut.Fill(delta_t)\n hist.h_deltar_deltatcut.Fill(delta_r)\n hist.h_rfidvolbi_deltatcut.Fill(radius_prev)\n\n events_deltatcut += 1\n\n #time difference cut\n if delta_t > 3690 and delta_t < 1798788:\n\n hist.h_energy_bifidvolcut.Fill(energy)\n hist.h_nhitspo_bifidvolcut.Fill(nhits)\n hist.h_nhitsbi_bifidvolcut.Fill(nhits_prev)\n hist.h_deltat_bifidvolcut.Fill(delta_t)\n hist.h_deltar_bifidvolcut.Fill(delta_r)\n hist.h_rfidvolbi_bifidvolcut.Fill(radius_prev)\n\n events_bifidvolcut += 1\n\n #fiducial volume cut on bismuth candidate\n if radius_prev > 0 and radius_prev < fidvol_value and z_prev >= z_level+653:\n\n hist.h_energy_deltarcut.Fill(energy)\n hist.h_nhitspo_deltarcut.Fill(nhits)\n hist.h_nhitsbi_deltarcut.Fill(nhits_prev)\n hist.h_deltat_deltarcut.Fill(delta_t)\n hist.h_deltar_deltarcut.Fill(delta_r)\n hist.h_rfidvolbi_deltarcut.Fill(radius_prev)\n \n events_deltarcut += 1\n\n #distance cut\n if delta_r > 0 and delta_r < 1500:\n\n hist.h_energy_bicut.Fill(energy)\n hist.h_nhitspo_bicut.Fill(nhits)\n hist.h_nhitsbi_bicut.Fill(nhits_prev)\n hist.h_deltat_bicut.Fill(delta_t)\n hist.h_deltar_bicut.Fill(delta_r)\n hist.h_rfidvolbi_bicut.Fill(radius_prev)\n\n events_bicut += 1\n\n #nhits cut on the bismuth candidate\n if nhits_prev >= 600:\n\n hist.h_energy_allcut.Fill(energy)\n hist.h_nhitspo_allcut.Fill(nhits)\n hist.h_nhitsbi_allcut.Fill(nhits_prev)\n hist.h_deltat_allcut.Fill(delta_t)\n hist.h_deltar_allcut.Fill(delta_r)\n hist.h_rfidvolbi_allcut.Fill(radius_prev)\n\n events_allcut += 1\n \n #write all histograms to file\n outputroot.Write()\n outputroot.Close()\n\n #create string with all event counts\n outputstring = isotope + \"\\t all events: \" + str(events_full) + \"\\t fiducial volume: \" + str(events_pocut) + \"\\t Po nhits cut: \" + str(events_deltatcut) + \"\\t Delta t cut: \" + str(events_bifidvolcut) + \"\\t fiducial volume: \" + str(events_deltarcut) + \"\\t Delta r cut: \" + str(events_bicut) + \"\\t Bi nhits cut: \" + str(events_allcut) + \"\\n \" \n\n return outputstring", "def create_grid_queries(all_data_dicts, smod=2):\n rf_width_y, rf_width_x = [], []\n rf_y, rf_x = [], []\n for dat in all_data_dicts:\n rf_width_y += [dat['on_width_y']]\n rf_width_x += [dat['on_width_x']]\n cre_line = dat['cre_line']\n structure = dat['structure']\n rf_y += [dat['on_center_y']]\n rf_x += [dat['on_center_x']]\n\n # Get 95th percentile x and y width\n y_width = int(np.ceil(np.percentile(rf_width_y, 95)))\n x_width = int(np.ceil(np.percentile(rf_width_x, 95)))\n\n # Stride of the neuron bins\n y_stride = int(y_width/smod)\n x_stride = int(x_width/smod)\n\n # Create queries that have width and height by the receptive field size\n y_limit = int(np.floor(np.max(rf_y)))\n x_limit = int(np.floor(np.max(rf_x)))\n queries = []\n for x1 in (range(0, x_limit, x_stride)):\n for y1 in range(0, y_limit, y_stride):\n\n # Add width to each x1 as long as it's not more than x_max\n x2 = x1 + x_width\n # Add width to each y1 as long as it's not more than y_max\n y2 = y1 + y_width\n\n # add new coordinate range to queries\n queries += [[{\n 'rf_coordinate_range': { # Get all cells\n 'x_min': x1,\n 'x_max': x2,\n 'y_min': y1,\n 'y_max': y2,\n },\n 'cre_line': cre_line,\n 'structure': structure}]]\n return queries, max([x_width, y_width])", "def normalize(self, mode: Literal[\"formula_unit\", \"atom\"] = \"formula_unit\") -> \"ComputedEntry\":\n\n factor = self._normalization_factor(mode)\n new_composition = self._composition / factor\n new_energy = self._energy / factor\n\n new_entry_dict = self.as_dict()\n new_entry_dict[\"composition\"] = new_composition.as_dict()\n new_entry_dict[\"energy\"] = new_energy\n\n # TODO: make sure EnergyAdjustments are _also_ immutable to avoid this hacking\n new_energy_adjustments = MontyDecoder().process_decoded(new_entry_dict[\"energy_adjustments\"])\n for ea in new_energy_adjustments:\n ea.normalize(factor)\n new_entry_dict[\"energy_adjustments\"] = [ea.as_dict() for ea in new_energy_adjustments]\n\n return self.from_dict(new_entry_dict)", "def __iter__(self):\n grouping = collections.defaultdict(list)\n\n for name, field in self.fields.items():\n group = getattr(field, 'group', '0. ')\n grouping[group].append((field.verbose_name, getattr(\n self.instance, name)))\n\n rexp = re.compile(r\"\\d+. \")\n\n for group, fields in sorted(grouping.items()):\n yield rexp.sub('', group), fields", "def _populate_widgets(self):\n\n if self.parent.session is None:\n # No point populating the widgets with the default values from the\n # SMH file because these will be updated when a session is loaded.\n return\n\n keys = (\"function\", \"order\", \"low_sigma_clip\", \"high_sigma_clip\",\n \"knot_spacing\", \"max_iterations\")\n self._cache = {\n \"input\": {}\n }\n for key in keys:\n self._cache[\"input\"][key] \\\n = self.parent.session.setting((\"normalization\", key))\n\n # Continuum masks.\n self._cache[\"masks\"] \\\n = self.parent.session.setting((\"normalization\", \"masks\"))\n self._cache[\"default_mask\"] \\\n = self.parent.session.setting((\"normalization\", \"default_mask\")) \\\n or self._cache[\"masks\"].keys()[0]\n\n\n # Put these values into the widgets.\n self.low_sigma_clip.setText(\n str(self._cache[\"input\"][\"low_sigma_clip\"]))\n self.high_sigma_clip.setText(\n str(self._cache[\"input\"][\"high_sigma_clip\"]))\n self.knot_spacing.setText(str(\n self._cache[\"input\"][\"knot_spacing\"]))\n\n functions = [self.function.itemText(i).lower() \\\n for i in range(self.function.count())]\n self.function.setCurrentIndex(functions.index(\n self._cache[\"input\"][\"function\"]))\n\n # Normalization order.\n orders = [int(self.order.itemText(i)) \\\n for i in range(self.order.count())]\n self.order.setCurrentIndex(orders.index(\n self._cache[\"input\"][\"order\"]))\n\n # Normalization maximum iterations.\n norm_max_iters = [int(self.norm_max_iter.itemText(i)) \\\n for i in range(self.norm_max_iter.count())]\n self.norm_max_iter.setCurrentIndex(norm_max_iters.index(\n self._cache[\"input\"][\"max_iterations\"]))\n\n # Mask names.\n for name in self._cache[\"masks\"].keys():\n self.continuum_mask.addItem(name)\n\n self.continuum_mask.setCurrentIndex(\n self._cache[\"masks\"].keys().index(\n self._cache[\"default_mask\"]))\n\n self.order_slide.setMaximum(len(self.parent.session.input_spectra) - 1)\n self.current_order_label.setText(\"Order 1 of {}\".format(\n len(self.parent.session.input_spectra)))\n\n # Draw the widgets.\n try:\n self.order_slide.setValue(0)\n self.update_order_index(0)\n self.update_continuum_mask(refresh=False)\n self.fit_continuum(clobber=False)\n self.draw_order(refresh=False)\n self.draw_continuum(refresh=True)\n\n except (AttributeError, KeyError):\n # HACK\n # when loading a fresh session, it will skip all those blocks\n # I think this is okay?\n pass\n return None", "def range_based_track_energy(data_dict, result_dict,\n bin_size=17, \n include_pids=[2, 4],\n data=False,\n min_points=5,\n mode='px'):\n if data:\n particles = result_dict['particles']\n truth_particles = []\n else:\n particles = result_dict['particles']\n truth_particles = result_dict['truth_particles']\n \n # Use meta info to convert units\n \n splines = {ptype: get_splines(ptype) for ptype in include_pids}\n meta = data_dict['meta']\n px_to_cm = np.mean(meta[6:9]) # TODO: ONLY TEMPORARY\n\n for i, p in enumerate(particles):\n if p.semantic_type == 1 and p.pid in include_pids:\n # if mode == 'cm':\n points = p.points\n # bin_size_cm = bin_size\n # else:\n # points = _pix_to_cm(p.points, meta)\n # bin_size_cm = bin_size * px_to_cm\n if points.shape[0] > min_points:\n length = compute_track_length(points, bin_size=bin_size)\n p.length = length\n if mode == 'cm':\n p.csda_kinetic_energy = float(splines[p.pid](length))\n else:\n p.csda_kinetic_energy = float(splines[p.pid](length * px_to_cm))\n\n for i, p in enumerate(truth_particles):\n if p.semantic_type == 1 and p.pid in include_pids:\n # if mode == 'cm':\n pts = p.points\n tng_pts = p.truth_points\n # bin_size_cm = bin_size\n # else:\n # pts = _pix_to_cm(p.points, meta)\n # tng_pts = _pix_to_cm(p.truth_points, meta)\n # bin_size_cm = bin_size * px_to_cm\n if pts.shape[0] > min_points:\n length = compute_track_length(pts, bin_size=bin_size)\n p.length = float(length)\n if mode == 'cm':\n p.csda_kinetic_energy = float(splines[p.pid](length))\n else:\n p.csda_kinetic_energy = float(splines[p.pid](length * px_to_cm))\n if tng_pts.shape[0] > min_points:\n length_tng = compute_track_length(tng_pts, bin_size=bin_size)\n p.length_tng = float(length_tng)\n if mode == 'cm':\n p.csda_kinetic_energy_tng = float(splines[p.pid](length_tng))\n else:\n p.csda_kinetic_energy_tng = float(splines[p.pid](length_tng * px_to_cm))\n \n return {}", "def normalize_bounds(self, bounds):\n scaled_bounds = []\n scalings = []\n intercepts = []\n \n non_fixed_params = []\n \n print(self.device)\n \n for name, domain in self.bounds.items():\n # Get any fixed parmeters\n if type(domain) == int or type(domain) == float:\n # Take note\n self.fixed_parameters.append(name)\n\n # Free parameters\n elif type(domain) == tuple:\n # Bookkeeping\n self.free_parameters.append(name)\n\n # Get scaling\n lower_bound = min(domain)\n upper_bound = max(domain)\n scale = upper_bound - lower_bound\n\n # Transform to [0, 1] domain\n #scaled_bound = {'name': name, 'type': 'continuous', 'domain': (0., 1.)} #torch.adjustment required\n non_fixed_params.append(name)\n \n # Store\n #scaled_bounds.append(scaled_bound)\n scalings.append(scale)\n intercepts.append(lower_bound)\n else:\n raise ValueError(\"Domain bounds not understood\")\n \n n_hyperparams = len(non_fixed_params)\n \n scaled_bounds = cat([zeros(1,n_hyperparams, device = self.device), \n ones(1, n_hyperparams, device = self.device)], 0)\n return scaled_bounds, tensor(scalings, device = self.device, requires_grad = False), tensor(intercepts, device = self.device, requires_grad = False) #torch.adjustment required", "def _convert_normalization_params(\n z_score_dict_dict, frequency_dict_dict=None,\n min_percentile_level=None, max_percentile_level=None):\n\n normalization_dict = {}\n\n for this_key in z_score_dict_dict:\n this_inner_dict = z_score_dict_dict[this_key]\n this_standard_deviation = _get_standard_deviation(this_inner_dict)\n normalization_dict[this_key] = [\n this_inner_dict[MEAN_VALUE_KEY], this_standard_deviation\n ]\n\n if frequency_dict_dict is not None:\n this_inner_dict = frequency_dict_dict[this_key]\n this_min_value = _get_percentile(\n frequency_dict=this_inner_dict,\n percentile_level=min_percentile_level)\n this_max_value = _get_percentile(\n frequency_dict=this_inner_dict,\n percentile_level=max_percentile_level)\n\n normalization_dict[this_key].append(this_min_value)\n normalization_dict[this_key].append(this_max_value)\n\n normalization_dict[this_key] = numpy.array(normalization_dict[this_key])\n\n normalization_table = pandas.DataFrame.from_dict(\n normalization_dict, orient='index')\n\n column_dict_old_to_new = {\n 0: dl_utils.MEAN_VALUE_COLUMN,\n 1: dl_utils.STANDARD_DEVIATION_COLUMN\n }\n\n if frequency_dict_dict is not None:\n column_dict_old_to_new.update({\n 2: dl_utils.MIN_VALUE_COLUMN,\n 3: dl_utils.MAX_VALUE_COLUMN\n })\n\n return normalization_table.rename(\n columns=column_dict_old_to_new, inplace=False)", "def processImage(im, options):\n\n#########################################################\n## YOU MUST ADAPT THE CODE IN THIS FUNCTIONS TO:\n## 1- CHANGE THE IMAGE TO THE CORRESPONDING COLOR SPACE FOR KMEANS\n## 2- APPLY KMEANS ACCORDING TO 'OPTIONS' PARAMETER\n## 3- GET THE NAME LABELS DETECTED ON THE 11 DIMENSIONAL SPACE\n#########################################################\n\n## 1- CHANGE THE IMAGE TO THE CORRESPONDING COLOR SPACE FOR KMEANS\n if options['colorspace'].lower() == 'ColorNaming'.lower():\n im = cn.ImColorNamingTSELabDescriptor(im)\n elif options['colorspace'].lower() == 'RGB'.lower():\n pass\n elif options['colorspace'].lower() == 'Lab'.lower():\n im = color.rgb2lab(im)\n elif options['colorspace'].lower() == 'HED'.lower():\n im = color.rgb2hed(im)\n elif options['colorspace'].lower() == 'HSV'.lower():\n im = color.rgb2hsv(im)\n '''\n elif options['colorspace'].lower() == 'opponent'.lower():\n im = color.rgb2lab(im)\n elif options['colorspace'].lower() == 'HSL'.lower():\n im = color.rgb2(im)\n elif options['colorspace'].lower() == 'Lab'.lower():\n im = color.rgb2lab(im)\n '''\n\n\n## 2- APPLY KMEANS ACCORDING TO 'OPTIONS' PARAMETER\n if options['K']<2: # find the bes K\n kmeans = km.KMeans(im, 0, options)\n kmeans.bestK()\n else:\n kmeans = km.KMeans(im, options['K'], options)\n kmeans.run()\n\n## 3- GET THE NAME LABELS DETECTED ON THE 11 DIMENSIONAL SPACE\n if options['colorspace'].lower() == 'Lab'.lower():\n kmeans.centroids = cn.ImColorNamingTSELabDescriptor((color.lab2rgb(kmeans.centroids.reshape(1,len(kmeans.centroids),3))*255).reshape(len(kmeans.centroids),3))\n elif options['colorspace'].lower() == 'HED'.lower():\n kmeans.centroids = cn.ImColorNamingTSELabDescriptor(color.hed2rgb(kmeans.centroids.reshape(1,len(kmeans.centroids),3)).reshape(len(kmeans.centroids),3))\n elif options['colorspace'].lower() == 'HSV'.lower():\n kmeans.centroids = cn.ImColorNamingTSELabDescriptor((color.hsv2rgb(kmeans.centroids.reshape(1,len(kmeans.centroids),3))*255).reshape(len(kmeans.centroids),3))\n elif options['colorspace'].lower() == 'RGB'.lower():\n kmeans.centroids = cn.ImColorNamingTSELabDescriptor(kmeans.centroids)\n\n#########################################################\n## THE FOLLOWING 2 END LINES SHOULD BE KEPT UNMODIFIED\n#########################################################\n colors, which = getLabels(kmeans, options)\n return colors, which, kmeans", "def set_image_ranges_in_scaling_models(experiments):\n for exp in experiments:\n if exp.scan:\n valid_image_ranges = exp.scan.get_valid_image_ranges(exp.identifier)\n if \"valid_image_range\" not in exp.scaling_model.configdict:\n # only set if not currently set i.e. set initial\n exp.scaling_model.set_valid_image_range(exp.scan.get_image_range())\n if exp.scaling_model.configdict[\"valid_image_range\"] != [\n valid_image_ranges[0][0],\n valid_image_ranges[-1][1],\n ]:\n # first and last values in whole list of tuples\n exp.scaling_model.limit_image_range(\n (valid_image_ranges[0][0], valid_image_ranges[-1][1])\n )\n return experiments", "def __call__(self, results):\n\n if 'scale' not in results:\n if 'scale_factor' in results:\n img_shape = results['img'].shape[:2]\n scale_factor = results['scale_factor']\n assert isinstance(scale_factor, float)\n results['scale'] = tuple(\n [int(x * scale_factor) for x in img_shape][::-1])\n else:\n self._random_scale(results)\n else:\n if not self.override:\n assert 'scale_factor' not in results, (\n 'scale and scale_factor cannot be both set.')\n else:\n results.pop('scale')\n if 'scale_factor' in results:\n results.pop('scale_factor')\n self._random_scale(results)\n\n self._resize_img(results)\n self._resize_bboxes(results)\n self._resize_masks(results)\n self._resize_seg(results)\n return results", "def __call__(self, results):\n\n if 'scale' not in results:\n if 'scale_factor' in results:\n img_shape = results['img'].shape[:2]\n scale_factor = results['scale_factor']\n assert isinstance(scale_factor, float)\n results['scale'] = tuple(\n [int(x * scale_factor) for x in img_shape][::-1])\n else:\n self._random_scale(results)\n else:\n if not self.override:\n assert 'scale_factor' not in results, (\n 'scale and scale_factor cannot be both set.')\n else:\n results.pop('scale')\n if 'scale_factor' in results:\n results.pop('scale_factor')\n self._random_scale(results)\n\n self._resize_img(results)\n self._resize_bboxes(results)\n self._resize_masks(results)\n self._resize_seg(results)\n return results", "def normalize(self, normalizationLevel=\"minute\", fusionMethod=\"mean\", interpolationMethod=\"linear\"):\n raise NotImplementedError", "def _compute_sizing_mode(self, children, props):\n margin = props.get('margin', self.margin)\n sizing_mode = props.get('sizing_mode', self.sizing_mode)\n if sizing_mode == 'fixed':\n return {}\n\n # Iterate over children and determine responsiveness along\n # each axis, scaling and the widths of each component.\n heights, widths = [], []\n all_expand_height, expand_width, expand_height, scale = True, False, False, False\n for child in children:\n smode = child.sizing_mode\n if smode and 'scale' in smode:\n scale = True\n\n width_expanded = smode in ('stretch_width', 'stretch_both', 'scale_width', 'scale_both')\n height_expanded = smode in ('stretch_height', 'stretch_both', 'scale_height', 'scale_both')\n expand_width |= width_expanded\n expand_height |= height_expanded\n if width_expanded:\n width = child.min_width\n else:\n width = child.width\n if not child.width:\n width = child.min_width\n if width:\n if isinstance(margin, tuple):\n if len(margin) == 2:\n width += margin[1]*2\n else:\n width += margin[1] + margin[3]\n else:\n width += margin*2\n widths.append(width)\n\n if height_expanded:\n height = child.min_height\n else:\n height = child.height\n if height:\n all_expand_height = False\n else:\n height = child.min_height\n if height:\n if isinstance(margin, tuple):\n if len(margin) == 2:\n height += margin[0]*2\n else:\n height += margin[0] + margin[2]\n else:\n height += margin*2\n heights.append(height)\n\n # Infer new sizing mode based on children\n mode = 'scale' if scale else 'stretch'\n if self._direction == 'horizontal':\n allow_height_scale = all_expand_height\n else:\n allow_height_scale = True\n if expand_width and expand_height and not self.width and not self.height:\n if allow_height_scale or 'both' in (sizing_mode or ''):\n sizing_mode = f'{mode}_both'\n else:\n sizing_mode = f'{mode}_width'\n elif expand_width and not self.width:\n sizing_mode = f'{mode}_width'\n elif expand_height and not self.height and allow_height_scale:\n sizing_mode = f'{mode}_height'\n if sizing_mode is None:\n return {'sizing_mode': props.get('sizing_mode')}\n\n properties = {'sizing_mode': sizing_mode}\n if ((sizing_mode.endswith('_width') or sizing_mode.endswith('_both')) and\n widths and 'min_width' not in properties):\n width_op = max if self._direction == 'vertical' else sum\n min_width = width_op(widths)\n op_widths = [min_width]\n if 'max_width' in properties:\n op_widths.append(properties['max_width'])\n properties['min_width'] = min(op_widths)\n if ((sizing_mode.endswith('_height') or sizing_mode.endswith('_both')) and\n heights and 'min_height' not in properties):\n height_op = max if self._direction == 'horizontal' else sum\n min_height = height_op(heights)\n op_heights = [min_height]\n if 'max_height' in properties:\n op_heights.append(properties['max_height'])\n properties['min_height'] = min(op_heights)\n return properties", "def _items_divide(self, numerator_data, denominator_data):\n items = {}\n if numerator_data['items'] is None:\n items = None\n else:\n for n in numerator_data['items']:\n # TODO what should we do when a matching item isn't found?\n matching_d = next((item for item in denominator_data['items'] if\n item['group'] == n['group']),\n {'group': '_unknown', 'value': None})\n if matching_d['value'] is None or n['value'] is None:\n divided = None\n else:\n divided = n['value'] / matching_d['value']\n\n # item = dict({'group': n['group'],\n # 'value': divided})\n items[n['group']] = divided\n\n return {'items': items, 'grouping': numerator_data['grouping'],\n 'data_id': numerator_data['data_id']}", "def process_struct_divs(div, ranges):\n\trangeKey = get_rangeKey(div)\n\n\t# when the top level div is a PAGE\n\tif is_page(div):\n\t\tp_range = process_page(div)\n if p_range:\n ranges.append(p_range)\n else:\n subdivs = div.xpath('./mets:div', namespaces = XMLNS)\n if len(subdivs) > 0:\n ranges.append(process_intermediate(div))\n\n\treturn ranges", "def get_objects(color, depth, threshold1, threshold2):\n\n gray = cv2.cvtColor(color, cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(gray, (5, 5), 0)\n surf = cv2.xfeatures2d.SURF_create(500)\n\n # find and draw the keypoints\n kp = surf.detect(blur,None)\n\n pts = [p.pt for p in kp]\n xpts = []\n ypts = []\n\n # evaluate the keypoints and only save the keypoints who are between the given threshold\n depth_values = []\n for i in range(0,len(pts)):\n xco = int(pts[i][0])\n yco = int(pts[i][1])\n depth_value = depth[yco][xco]\n if depth_value >= float(threshold1) and depth_value <= float(threshold2):\n xpts.append(xco)\n ypts.append(yco)\n depth_values.append(depth_value)\n\n # make histogram of x coordinates of the saved keypoints\n n, distr, _ = plt.hist(xpts)\n plt.savefig('hist.png')\n\n # evaluate the histogram and make seperate arrays for the different objects\n objectarray = []\n temp = []\n for i in range(len(n)):\n if n[i] > 0:\n temp.append(distr[i])\n temp.append(distr[i+1])\n else:\n if len(temp)!=0:\n objectarray.append(temp)\n temp = []\n objectarray.append(temp)\n\n objects = []\n\n # determine the objects with the previous calculated arrays\n for i in range(len(objectarray)):\n y_values = []\n min_x = int(np.amin(objectarray[i]))\n max_x = int(np.amax(objectarray[i]))\n\n for j in range(len(xpts)):\n if xpts[j] > min_x and xpts[j] < max_x:\n y_values.append(ypts[j])\n\n min_y = int(np.amin(y_values))\n max_y = int(np.amax(y_values))\n x = min_x\n y = min_y\n w = max_x - min_x\n h = max_y - min_y\n\n depth_mean = round(get_depth_mean(depth, x, y, w, h), 3)\n\n object = DetectedObject(x, y, w, h, depth_mean)\n objects.append(object)\n\n return objects", "def standardize(self, options: ParameterSetSchema) -> \"Parameters\":\n replace_dict = {}\n for key, val in asdict(self).items():\n dimensions = (val.shape[0], getattr(options, key).degree + 1)\n val_new = np.zeros(dimensions)\n max_dimension = (np.minimum(val.shape[1], dimensions[1]))\n val_new[:, :max_dimension] = val[:, :max_dimension]\n replace_dict.update({key: val_new})\n return replace(self, **replace_dict)", "def _normalizeKeySlice(self, key):\n if key.start is None:\n kstart = (0, 0)\n else:\n kstart = key.start\n\n if key.stop is None:\n kstop = (self.width, self.height)\n else:\n kstop = key.stop\n\n if key.step is None:\n kstep = (1, 1)\n elif isinstance(key.step, int):\n # if only one int is specified, use it for both steps\n kstep = (key.step, key.step)\n else:\n kstep = key.step\n\n # x1 & y1 should be top-left, x2 & y2 should be bottom-right\n # So swap these values if need be.\n x1, y1 = kstart\n x2, y2 = kstop\n if x1 > x2:\n x1, x2 = x2, x1\n if y1 > y2:\n y1, y2 = y2, y1\n\n try:\n x1, y1 = self._convertNegativeTupleKeyToPositiveTupleKey((x1, y1))\n\n # Because x2 and y2 can go 1 past the end of the max index, the\n # _convertNegativeTupleKeyToPositiveTupleKey() may raise an exception.\n # So we need to pass dummy values so the exception isn't raised.\n if x2 != self.width and x2 != -(self.width - 1) and \\\n y2 != self.height and y2 != -(self.height - 1):\n x2, y2 = self._convertNegativeTupleKeyToPositiveTupleKey((x2, y2))\n elif x2 != self.width and x2 != -(self.width - 1):\n x2, _dummy = self._convertNegativeTupleKeyToPositiveTupleKey((x2, 0))\n elif y2 != self.height and y2 != -(self.height - 1):\n _dummy, y2 = self._convertNegativeTupleKeyToPositiveTupleKey((0, y2))\n else:\n pass # In this case, we don't need to adust x2 and y2 at all. So do nothing.\n except KeyError:\n raise PyTextCanvasException('key must be a tuple of two ints')\n\n return (x1, y1, x2, y2, kstep[0], kstep[1])", "def object_separations(self):\n\n for cutout_info in self._catalog_dictionary.values():\n catalog = cutout_info['catalog']\n\n # Create SkyCoord objects for all objects in the catalog as well as the image center\n object_coords = SkyCoord(catalog['ALPHA_J2000'], catalog['DELTA_J2000'], unit=u.deg)\n center_coord = SkyCoord(catalog['SZ_RA'][0], catalog['SZ_DEC'][0], unit=u.deg)\n\n # Calculate the angular separations between the objects and the image center in arcminutes\n separations_arcmin = object_coords.separation(center_coord).to(u.arcmin)\n\n # Add our new column to the catalog\n catalog['RADIAL_SEP_ARCMIN'] = separations_arcmin\n\n # Update the catalog in the data structure\n cutout_info['catalog'] = catalog", "def get_extents(self, view, ranges):\n ndims = len(view.dimensions())\n num = 6 if self.projection == '3d' else 4\n if self.apply_ranges:\n if ranges:\n dims = view.dimensions()\n x0, x1 = ranges[dims[0].name]\n if ndims > 1:\n y0, y1 = ranges[dims[1].name]\n else:\n y0, y1 = (np.NaN, np.NaN)\n if self.projection == '3d':\n if len(dims) > 2:\n z0, z1 = ranges[dims[2].name]\n else:\n z0, z1 = np.NaN, np.NaN\n else:\n x0, x1 = view.range(0)\n y0, y1 = view.range(1) if ndims > 1 else (np.NaN, np.NaN)\n if self.projection == '3d':\n z0, z1 = view.range(2)\n if self.projection == '3d':\n range_extents = (x0, y0, z0, x1, y1, z1)\n else:\n range_extents = (x0, y0, x1, y1)\n else:\n range_extents = (np.NaN,) * num\n\n if self.apply_extents:\n norm_opts = self.lookup_options(view, 'norm').options\n if norm_opts.get('framewise', False) or self.dynamic:\n extents = view.extents\n else:\n extent_list = self.hmap.traverse(lambda x: x.extents, [Element])\n extents = util.max_extents(extent_list, self.projection == '3d')\n else:\n extents = (np.NaN,) * num\n return tuple(l1 if l2 is None or not np.isfinite(l2) else\n l2 for l1, l2 in zip(range_extents, extents))", "def get_options(self):\n additional_data = self.get_additional_data()\n options_out = []\n for key, value in additional_data['DIMENSION_VALUES'].items():\n key_label = ' '.join(key.split('_')).strip().title()\n data = {'specification': key_label, 'specification_key': key, 'choices': value}\n options_out.append(data)\n return options_out", "def normalizeValuesByWindows(data, window_size=None):\n\n if not options.window_size:\n window_size = min(map(lambda x: x[1] - x[0], data))\n\n new_values = []\n\n for start, end, value in data:\n if end - start < window_size:\n continue\n start = start - start % window_size\n for z in range(start, end, window_size):\n new_values.append((z, value))\n\n new_values.sort()\n\n # interpolate values for the same windows with average\n xvals = []\n yvals = []\n last_x = None\n values = []\n for x, value in new_values:\n\n if last_x != x:\n if last_x is not None:\n xvals.append(last_x)\n yvals.append(numpy.mean(values))\n values = []\n\n last_x = x\n values.append(value)\n\n return xvals, yvals", "def dset_full():\n _dset = dataset.Dataset(5)\n _dset.add_bool(\"idx\", val=[0, 1, 1, 0, 1])\n _dset.add_float(\"numbers\", val=[1, 2, 3, 4, 5])\n _dset.add_position(\"sat_pos\", val=np.ones((5, 3)), system=\"trs\")\n _dset.add_position(\"site_pos\", val=np.ones((5, 3)) * 2, system=\"trs\", other=_dset.sat_pos)\n _dset.add_position_delta(\"site_delta\", val=np.ones((5, 3)) * 0.5, system=\"trs\", ref_pos=_dset.site_pos)\n _dset.add_posvel(\"sat_posvel\", val=np.ones((5, 6)), system=\"trs\")\n _dset.add_posvel(\"site_posvel\", val=np.ones((5, 6)) * 2, system=\"trs\", other=_dset.sat_posvel)\n _dset.add_posvel_delta(\"site_posvel_delta\", val=np.ones((5, 6)) * 0.5, system=\"trs\", ref_pos=_dset.site_posvel)\n _dset.add_sigma(\"numbers2\", val=[3, 3, 3, 3, 3], sigma=[0.2, 0.2, 0.2, 0.2, 0.2])\n _dset.add_text(\"text\", val=[\"aaa\", \"aaa\", \"aaa\", \"aaa\", \"aaa\"])\n _dset.add_time(\"time\", val=[datetime(2015, 1, i) for i in range(5, 10)], scale=\"utc\", fmt=\"datetime\")\n _dset.add_time_delta(\"time_delta\", val=[timedelta(seconds=i) for i in range(20, 25)], scale=\"utc\", fmt=\"timedelta\")\n\n # Collections\n _dset.add_bool(\"group.idx\", val=[0, 0, 0, 0, 0])\n _dset.add_float(\"group.numbers\", val=[6, 7, 8, 9, 10])\n _dset.add_position(\"group.sat_pos\", val=np.ones((5, 3)) * 7, system=\"trs\")\n _dset.add_position(\"group.site_pos\", val=np.ones((5, 3)) * 8, system=\"trs\", other=_dset.group.sat_pos)\n _dset.add_position_delta(\"group.site_delta\", val=np.ones((5, 3)) * 9.5, system=\"trs\", ref_pos=_dset.group.site_pos)\n _dset.add_posvel(\"group.sat_posvel\", val=np.ones((5, 6)) * 6, system=\"trs\")\n _dset.add_posvel(\"group.site_posvel\", val=np.ones((5, 6)) * 5, system=\"trs\", other=_dset.group.sat_posvel)\n _dset.add_posvel_delta(\n \"group.site_posvel_delta\", val=np.ones((5, 6)) * 1.5, system=\"trs\", ref_pos=_dset.group.site_posvel\n )\n _dset.add_sigma(\"group.numbers2\", val=[1.2, 1.2, 1.2, 1.2, 1.2], sigma=[3.2, 3.2, 3.2, 3.2, 3.2])\n _dset.add_text(\"group.text\", val=[\"bbb\", \"bbb\", \"bbb\", \"bbb\", \"bbb\"])\n _dset.add_time(\"group.time\", val=[datetime(2015, 1, i) for i in range(10, 15)], scale=\"utc\", fmt=\"datetime\")\n _dset.add_time_delta(\n \"group.time_delta\", val=[timedelta(seconds=i) for i in range(0, 5)], scale=\"utc\", fmt=\"timedelta\"\n )\n return _dset", "def _split_into_categories(data_struct):\n data_names = [\"left_x\",\"top_y\",\"width\",\"height\",\"FPS\",\"AVG_FPS\",\"Accuracy\"]\n groups = {}\n\n for cat in set(data_struct[\"Objects\"]): \n indices = [i for i, x in enumerate(data_struct[\"Objects\"]) if x == cat]\n mask = []\n mask = np.empty((len(indices),len(data_names)))\n\n for counter,value in enumerate(data_names):\n mask[:,counter] = np.array(data_struct[value])[indices]\n\n groups[cat] = mask\n \n return(groups,data_names)", "def normalize(self, groups: dict, full_match: str):\n for divisor, regex in self.SYMBOL_MAP.items():\n if re.search(regex, groups[PERCENTAGE_SYMBOL_GROUP], self.FLAGS) is not None:\n return str(normalize_percentage_value(groups[PERCENTAGE_VALUE_GROUP]).quantize(Decimal('0.001')) /\n Decimal(divisor))\n raise ValueError('Percentage {} could not be normalized!'.format(full_match))", "def Group(self) -> _n_5_t_0:", "def Group(self) -> _n_5_t_0:", "def _set_grouping(self, change) -> None:\n grouping = self._grouping_full\n self.options = self._flat_groupings(grouping)\n self.set_trait(\n \"_grouping_labels\",\n tuple(\n [\n (header, tuple([_[0] for _ in options]))\n for header, options in grouping\n ]\n ),\n )\n if not self._initializing_traits_:\n for index, option in enumerate(self._flat_groupings()):\n if (\n option not in self.disabled_options\n and option not in self._group_headers\n ):\n if self.index == index:\n self._notify_trait(\"index\", index, index)\n else:\n self.index = index\n break\n else:\n self.index = None", "def gizmo_to_group(gizmo):\n\n if not isinstance(gizmo, nuke.Gizmo):\n return gizmo\n\n _selected = gizmo['selected'].value()\n _group = gizmo.makeGroup()\n\n # Set Input.\n for i in range(gizmo.inputs()):\n _group.setInput(i, gizmo.input(i))\n # Set Output.\n for n in nuke.allNodes():\n for i in range(n.inputs()):\n if n.input(i) is gizmo:\n n.setInput(i, _group)\n\n # Set position and name.\n if gizmo.shown():\n _group.showControlPanel()\n _group.setXYpos(gizmo.xpos(), gizmo.ypos())\n _name = gizmo['name'].value()\n nuke.delete(gizmo)\n _group.setName(_name)\n _group['selected'].setValue(_selected)\n\n return _group", "def _generate_modifiers(dm_schema_version=4, bands=None, pixel_scale=0.2, **kwargs): # pylint: disable=arguments-differ\n\n bands = bands or 'ugrizy'\n FLUX = 'flux' if dm_schema_version <= 2 else 'instFlux'\n ERR = 'Sigma' if dm_schema_version <= 1 else 'Err'\n\n modifiers = {\n 'objectId': 'id',\n 'parentObjectId': 'parent',\n 'ra': (np.rad2deg, 'coord_ra'),\n 'dec': (np.rad2deg, 'coord_dec'),\n 'x': 'base_SdssCentroid_x',\n 'y': 'base_SdssCentroid_y',\n 'xErr': f'base_SdssCentroid_x{ERR}',\n 'yErr': f'base_SdssCentroid_y{ERR}',\n 'xy_flag': 'base_SdssCentroid_flag',\n 'psNdata': 'base_PsfFlux_area',\n 'extendedness': 'base_ClassificationExtendedness_value',\n 'blendedness': 'base_Blendedness_abs',\n }\n\n not_good_flags = (\n 'base_PixelFlags_flag_edge',\n 'base_PixelFlags_flag_interpolatedCenter',\n 'base_PixelFlags_flag_saturatedCenter',\n 'base_PixelFlags_flag_crCenter',\n 'base_PixelFlags_flag_bad',\n 'base_PixelFlags_flag_suspectCenter',\n 'base_PixelFlags_flag_clipped',\n )\n\n modifiers['good'] = (create_basic_flag_mask,) + not_good_flags\n modifiers['clean'] = (\n create_basic_flag_mask,\n 'deblend_skipped',\n ) + not_good_flags\n\n # cross-band average, second moment values\n modifiers['I_flag'] = 'ext_shapeHSM_HsmSourceMoments_flag'\n for ax in ['xx', 'yy', 'xy']:\n modifiers[f'I{ax}_pixel'] = f'ext_shapeHSM_HsmSourceMoments_{ax}'\n modifiers[f'I{ax}PSF_pixel'] = f'base_SdssShape_psf_{ax}'\n\n for band in bands:\n modifiers[f'psFlux_{band}'] = (convert_flux_to_nanoJansky,\n f'{band}_slot_PsfFlux_{FLUX}',\n f'{band}_FLUXMAG0')\n modifiers[f'psFlux_flag_{band}'] = f'{band}_slot_PsfFlux_flag'\n modifiers[f'psFluxErr_{band}'] = (convert_flux_to_nanoJansky,\n f'{band}_slot_PsfFlux_{FLUX}{ERR}',\n f'{band}_FLUXMAG0')\n modifiers[f'mag_{band}'] = (convert_flux_to_mag,\n f'{band}_slot_PsfFlux_{FLUX}',\n f'{band}_FLUXMAG0')\n modifiers[f'magerr_{band}'] = (convert_flux_err_to_mag_err,\n f'{band}_slot_PsfFlux_{FLUX}',\n f'{band}_slot_PsfFlux_{FLUX}{ERR}')\n\n modifiers[f'cModelFlux_{band}'] = (convert_flux_to_nanoJansky,\n f'{band}_modelfit_CModel_{FLUX}',\n f'{band}_FLUXMAG0')\n modifiers[f'cModelFluxErr_{band}'] = (convert_flux_to_nanoJansky,\n f'{band}_modelfit_CModel_{FLUX}{ERR}',\n f'{band}_FLUXMAG0')\n modifiers[f'cModelFlux_flag_{band}'] = f'{band}_modelfit_CModel_flag'\n modifiers[f'mag_{band}_cModel'] = (convert_flux_to_mag,\n f'{band}_modelfit_CModel_{FLUX}',\n f'{band}_FLUXMAG0')\n modifiers[f'magerr_{band}_cModel'] = (convert_flux_err_to_mag_err,\n f'{band}_modelfit_CModel_{FLUX}',\n f'{band}_modelfit_CModel_{FLUX}{ERR}')\n modifiers[f'snr_{band}_cModel'] = (np.divide,\n f'{band}_modelfit_CModel_{FLUX}',\n f'{band}_modelfit_CModel_{FLUX}{ERR}')\n\n # Per-band shape information\n modifiers[f'I_flag_{band}'] = f'{band}_base_SdssShape_flag'\n\n for ax in ['xx', 'yy', 'xy']:\n modifiers[f'I{ax}_pixel_{band}'] = f'{band}_base_SdssShape_{ax}'\n modifiers[f'I{ax}PSF_pixel_{band}'] = f'{band}_base_SdssShape_psf_{ax}'\n\n modifiers[f'psf_fwhm_{band}'] = (\n lambda xx, yy, xy: pixel_scale * 2.355 * (xx * yy - xy * xy) ** 0.25,\n f'{band}_base_SdssShape_psf_xx',\n f'{band}_base_SdssShape_psf_yy',\n f'{band}_base_SdssShape_psf_xy')\n\n return modifiers", "def find_minmax_criteria(self, data):\r\n found = {}\r\n data = dict(data)\r\n for k in data.keys():\r\n m = re.match(r'(?P<minmax>min|max)\\[(?P<property_id>\\d+)\\]', k)\r\n if m is not None:\r\n minmax = m.group('minmax')\r\n property_id = int(m.group('property_id'))\r\n if not found.has_key(property_id):\r\n found[property_id] = MinMaxCriteria(property_id)\r\n if minmax == 'min':\r\n found[property_id].min_value = self.__to_value(data[k])\r\n elif minmax == 'max':\r\n found[property_id].max_value = self.__to_value(data[k])\r\n self.minmax_criteria = found", "def frac_diff_groups_model(param_dict, proj_dict, plot_opt='mhalo',\n nlim_min=5, nlim_threshold=False, arr_len=10, bin_statval='left',\n fig_fmt='pdf', figsize=(10, 8), fig_number=1):\n file_msg = param_dict['Prog_msg']\n ## Matplotlib option\n matplotlib.rcParams['axes.linewidth'] = 2.5\n matplotlib.rcParams['axes.edgecolor'] = 'black'\n ##\n # Constants\n cm = plt.cm.get_cmap('viridis')\n plot_dict = param_dict['plot_dict']\n ham_color = 'red'\n alpha = 0.6\n alpha_mass = 0.2\n zorder_mass = 10\n zorder_shade = zorder_mass - 1\n zorder_ml = zorder_mass + 1\n bin_width = param_dict['ml_args'].mass_bin_width\n ##\n ## Figure name\n fname = os.path.join( proj_dict['figure_dir'],\n 'Fig_{0}a_{1}_group_mass_comparison.{2}'.format(\n fig_number,\n param_dict['catl_str_fig'],\n fig_fmt))\n ##\n ## Paper Figure\n fname_paper = os.path.join( proj_dict['paper_fig_dir'],\n 'Figure_01a.{0}'.format(fig_fmt))\n ##\n ## Reading in 'master' combined catalogue\n catl_pd_tot = param_dict['ml_args'].extract_merged_catl_info(\n opt='combined')\n ##\n ## Only selecting groups with `nlim_min` galaxies or larger\n if nlim_threshold:\n catl_pd_tot = catl_pd_tot.loc[(catl_pd_tot['GG_ngals'] >= nlim_min)]\n ##\n ## Temporarily fixing `GG_mdyn_rproj`\n # catl_pd_tot.loc[:, 'GG_mdyn_rproj'] /= 0.96\n ##\n ## Dropping NaN's\n catl_pd_tot.dropna(how='any', inplace=True)\n ##\n ## Selecting only 'Good' groups\n good_p_opt = int(1)\n catl_cols_arr = ['GG_pointing', 'GG_mhalo_point', 'GG_M_group',\n 'GG_mdyn_rproj', 'groupid']\n catl_pd_tot_mod = catl_pd_tot.loc[catl_pd_tot['GG_pointing'] == good_p_opt,\n catl_cols_arr].drop_duplicates().reset_index(drop=True)\n ##\n ## Selecting Masses\n # - HAM Mass -\n ham_cols = ['GG_M_group', 'GG_mhalo_point']\n ( ham_pred,\n ham_true) = catl_pd_tot_mod.loc[:, ham_cols].values.T\n ham_frac_diff = 100. * (ham_pred - ham_true) / ham_true\n # - DYN Mass -\n dyn_cols = ['GG_mdyn_rproj', 'GG_mhalo_point']\n ( dyn_pred,\n dyn_true) = catl_pd_tot_mod.loc[\n catl_pd_tot_mod['GG_mdyn_rproj'] >= 11.0,\n dyn_cols].values.T\n dyn_frac_diff = 100. * (dyn_pred - dyn_true) / dyn_true\n ##\n ## Choosing which mass to plot\n if (plot_opt == 'mgroup'):\n ham_x = ham_pred\n dyn_x = dyn_pred\n elif (plot_opt == 'mhalo'):\n ham_x = ham_true\n dyn_x = dyn_true\n ##\n ## Binning the data\n ## -- HAM --\n ( x_stat_ham ,\n y_stat_ham ,\n y_std_ham ,\n y_std_err_ham) = cstats.Stats_one_arr( ham_x,\n ham_frac_diff,\n base=bin_width,\n arr_len=arr_len,\n bin_statval=bin_statval)\n y1_ham = y_stat_ham - y_std_ham\n y2_ham = y_stat_ham + y_std_ham\n ## -- DYN --\n ( x_stat_dyn ,\n y_stat_dyn ,\n y_std_dyn ,\n y_std_err_dyn) = cstats.Stats_one_arr( dyn_x,\n dyn_frac_diff,\n base=bin_width,\n arr_len=arr_len,\n bin_statval=bin_statval)\n y1_dyn = y_stat_dyn - y_std_dyn\n y2_dyn = y_stat_dyn + y_std_dyn\n ## Figure details\n # Labels\n # X-label\n if (plot_opt == 'mgroup'):\n xlabel = r'\\boldmath$\\log M_{predicted} \\textrm{ - Groups}\\left[ h^{-1} M_{\\odot}\\right]$'\n elif (plot_opt == 'mhalo'):\n xlabel = r'\\boldmath$\\log M_{halo,\\textrm{true}} \\textrm{ - Groups}\\left[ h^{-1} M_{\\odot}\\right]$'\n # Y-label\n ylabel = r'Frac. Difference - Groups \\boldmath$[\\%]$'\n ##\n plt.clf()\n plt.close()\n fig = plt.figure(figsize=figsize)\n ax1 = fig.add_subplot(111, facecolor='white')\n ## Horizontal line\n ax1.axhline(y=0, color='black', linestyle='--', zorder=10)\n ##\n ## HAM Masses\n ax1.plot( x_stat_ham,\n y_stat_ham,\n color=plot_dict['color_ham'],\n linestyle='-',\n marker='o',\n zorder=zorder_mass)\n ax1.fill_between( x_stat_ham,\n y1_ham,\n y2_ham, \n color=plot_dict['color_ham'],\n alpha=alpha_mass,\n label='HAM',\n zorder=zorder_shade)\n ## Dynamical Masses\n ax1.plot( x_stat_dyn,\n y_stat_dyn,\n color=plot_dict['color_dyn'],\n linestyle='-',\n marker='o',\n zorder=zorder_mass)\n ax1.fill_between( x_stat_dyn,\n y1_dyn,\n y2_dyn, \n color=plot_dict['color_dyn'],\n alpha=alpha_mass,\n label='Dynamical',\n zorder=zorder_shade)\n ## Legend\n leg = ax1.legend(loc='upper left', numpoints=1, frameon=False,\n prop={'size':14})\n leg.get_frame().set_facecolor('none')\n ## Ticks\n # Y-axis\n xaxis_major_ticker = 1\n xaxis_minor_ticker = 0.2\n ax_xaxis_major_loc = ticker.MultipleLocator(xaxis_major_ticker)\n ax_xaxis_minor_loc = ticker.MultipleLocator(xaxis_minor_ticker)\n ax1.xaxis.set_major_locator(ax_xaxis_major_loc)\n ax1.xaxis.set_minor_locator(ax_xaxis_minor_loc)\n # Y-axis\n yaxis_major_ticker = 5\n yaxis_minor_ticker = 2\n ax_yaxis_major_loc = ticker.MultipleLocator(yaxis_major_ticker)\n ax_yaxis_minor_loc = ticker.MultipleLocator(yaxis_minor_ticker)\n ax1.yaxis.set_major_locator(ax_yaxis_major_loc)\n ax1.yaxis.set_minor_locator(ax_yaxis_minor_loc)\n ## Labels\n ax1.set_xlabel(xlabel, fontsize=plot_dict['size_label'])\n ax1.set_ylabel(ylabel, fontsize=plot_dict['size_label'])\n ##\n ## Limits\n ax1.set_ylim(-10, 10)\n ##\n ## Saving figure\n if fig_fmt=='pdf':\n plt.savefig(fname, bbox_inches='tight')\n plt.savefig(fname_paper, bbox_inches='tight')\n else:\n plt.savefig(fname, bbox_inches='tight', dpi=400)\n plt.savefig(fname_paper, bbox_inches='tight', dpi=400)\n ##\n ##\n print('{0} Figure saved as: {1}'.format(file_msg, fname))\n print('{0} Paper Figure saved as: {1}'.format(file_msg, fname_paper))\n plt.clf()\n plt.close()", "def _get_grouper(obj, key=None, axis=0, level=None, sort=True):\n\n # The implementation is essentially the same as pandas.core.groupby\n\n group_axis = obj._get_axis(axis)\n\n # validate thatthe passed level is compatible with the passed\n # axis of the object\n if level is not None:\n if not isinstance(group_axis, MultiIndex):\n if isinstance(level, compat.string_types):\n if obj.index.name != level:\n raise ValueError('level name %s is not the name of the '\n 'index' % level)\n elif level > 0:\n raise ValueError('level > 0 only valid with MultiIndex')\n\n level = None\n key = group_axis\n\n # a passed in Grouper, directly convert\n if isinstance(key, Grouper):\n binner, grouper, obj = key._get_grouper(obj)\n if key.key is None:\n return grouper, [], obj\n else:\n return grouper, set([key.key]), obj\n\n # already have a BaseGrouper, just return it\n elif isinstance(key, BaseGrouper):\n return key, [], obj\n\n if not isinstance(key, (tuple, list)):\n keys = [key]\n else:\n keys = key\n\n # what are we after, exactly?\n match_axis_length = len(keys) == len(group_axis)\n any_callable = any(callable(g) or isinstance(g, dict) for g in keys)\n any_arraylike = any(isinstance(g, (list, tuple, Series, Index, np.ndarray))\n for g in keys)\n\n try:\n if isinstance(obj, DataFrame):\n all_in_columns = all(g in obj.columns for g in keys)\n else:\n all_in_columns = False\n except Exception:\n all_in_columns = False\n\n if (not any_callable and not all_in_columns\n and not any_arraylike and match_axis_length\n and level is None):\n keys = [com._asarray_tuplesafe(keys)]\n\n if isinstance(level, (tuple, list)):\n if key is None:\n keys = [None] * len(level)\n levels = level\n else:\n levels = [level] * len(keys)\n\n groupings = []\n exclusions = []\n\n # if the actual grouper should be obj[key]\n def is_in_axis(key):\n if not _is_label_like(key):\n try:\n obj._data.items.get_loc(key)\n except Exception:\n return False\n\n return True\n\n # if the the grouper is obj[name]\n def is_in_obj(gpr):\n try:\n return id(gpr) == id(obj[gpr.name])\n except Exception:\n return False\n\n for i, (gpr, level) in enumerate(zip(keys, levels)):\n\n if is_in_obj(gpr): # df.groupby(df['name'])\n in_axis, name = True, gpr.name\n exclusions.append(name)\n\n elif is_in_axis(gpr): # df.groupby('name')\n in_axis, name, gpr = True, gpr, obj[gpr]\n exclusions.append(name)\n\n else:\n in_axis, name = False, None\n\n if com.is_categorical_dtype(gpr) and len(gpr) != len(obj):\n raise ValueError(\n \"Categorical dtype grouper must have len(grouper) == len(data)\")\n\n ping = Grouping(group_axis, gpr, obj=obj, name=name,\n level=level, sort=sort, in_axis=in_axis)\n\n groupings.append(ping)\n\n if len(groupings) == 0:\n raise ValueError('No group keys passed!')\n\n # create the internals grouper\n # Modified to insert CustomGrouper\n grouper = CustomGrouper(group_axis, groupings, sort=sort)\n return grouper, exclusions, obj", "def __call__(self, results):\n\n if random.uniform(0, 1) > self.prob:\n return results\n\n if 'img_fields' in results:\n assert results['img_fields'] == ['img'], \\\n 'Only single img_fields is allowed'\n img = results['img']\n\n h, w, c = img.shape\n ratio = random.uniform(self.min_ratio, self.max_ratio)\n # speedup expand when meets large image\n if np.all(self.mean == self.mean[0]):\n expand_img = np.empty((int(h * ratio), int(w * ratio), c),\n img.dtype)\n expand_img.fill(self.mean[0])\n else:\n expand_img = np.full((int(h * ratio), int(w * ratio), c),\n self.mean,\n dtype=img.dtype)\n left = int(random.uniform(0, w * ratio - w))\n top = int(random.uniform(0, h * ratio - h))\n expand_img[top:top + h, left:left + w] = img\n\n results['img'] = expand_img\n # expand bboxes\n for key in results.get('bbox_fields', []):\n results[key] = results[key] + np.tile(\n (left, top), 2).astype(results[key].dtype)\n\n # expand masks\n for key in results.get('mask_fields', []):\n results[key] = results[key].expand(\n int(h * ratio), int(w * ratio), top, left)\n\n # expand segs\n for key in results.get('seg_fields', []):\n gt_seg = results[key]\n expand_gt_seg = np.full((int(h * ratio), int(w * ratio)),\n self.seg_ignore_label,\n dtype=gt_seg.dtype)\n expand_gt_seg[top:top + h, left:left + w] = gt_seg\n results[key] = expand_gt_seg\n return results", "def Adjust_Data(img,mask,feature_dict, normalize):\n ## Normalize image\n if normalize:\n img = Normalize_Image(img)\n\n ## Assume mask shape has 4 dimensions - mask is (batch, x, y, color-channel)\n ## color-channels are redundant, so just choose the first. \n mask = mask[:,:,:,0]\n \n ## Image_datagen performs interpolation when rotating, resulting in non-integer\n ## mask values. Round these back to integers before expanding the mask. \n mask = mask.round() \n mask = Expand_Mask(mask, feature_dict)\n #print(mask.shape, np.unique(mask, axis = 0))\n return (img,mask)", "def normalize_spectra(self, spectra):\n self._compute_hull(spectra)\n normalized_spectra = Spectra()\n for p in self._points:\n normalized_spectra.add_point(self._normalize_point(p))\n return normalized_spectra" ]
[ "0.6445774", "0.48843622", "0.4852263", "0.48259893", "0.4770748", "0.47455063", "0.4726611", "0.4720005", "0.4644569", "0.46267", "0.45563284", "0.4543702", "0.454177", "0.45273983", "0.4494524", "0.4494449", "0.44733185", "0.44697282", "0.44692782", "0.44428465", "0.44359696", "0.4435475", "0.44344687", "0.44155663", "0.4409761", "0.43901405", "0.43896997", "0.4389262", "0.43817148", "0.43754458", "0.43684897", "0.43568775", "0.43475693", "0.4347206", "0.43450195", "0.43207496", "0.4314344", "0.43061998", "0.43021852", "0.42772564", "0.42736396", "0.42679062", "0.42644557", "0.42626014", "0.4257663", "0.42399475", "0.42381248", "0.4236802", "0.42302412", "0.4230134", "0.4230134", "0.42291063", "0.4226306", "0.42250884", "0.4220287", "0.4211828", "0.42090997", "0.4205441", "0.42017174", "0.42008767", "0.42001534", "0.41826072", "0.41761774", "0.41740185", "0.41706145", "0.41680807", "0.4165859", "0.41573024", "0.41478342", "0.41436803", "0.41424477", "0.4141989", "0.4141492", "0.41412953", "0.41412953", "0.41378307", "0.41353065", "0.4134682", "0.41317853", "0.41314372", "0.41309363", "0.41260275", "0.41146797", "0.41143626", "0.4112444", "0.41120806", "0.41026363", "0.4102362", "0.40970078", "0.40932018", "0.40932018", "0.4093122", "0.40876088", "0.4085639", "0.40821508", "0.40815064", "0.40766025", "0.40754908", "0.4075296", "0.4064513" ]
0.689226
0
Gets the normalization options for a LabelledData object by traversing the object for to find elements and their ids. The id is then used to select the appropriate OptionsTree, accumulating the normalization options into a dictionary. Returns a dictionary of normalization options for each element in the tree.
def _get_norm_opts(self, obj): norm_opts = {} # Get all elements' type.group.label specs and ids type_val_fn = lambda x: (x.id, (type(x).__name__, util.group_sanitizer(x.group, escape=False), util.label_sanitizer(x.label, escape=False))) \ if isinstance(x, Element) else None element_specs = {(idspec[0], idspec[1]) for idspec in obj.traverse(type_val_fn) if idspec is not None} # Group elements specs by ID and override normalization # options sequentially key_fn = lambda x: -1 if x[0] is None else x[0] id_groups = groupby(sorted(element_specs, key=key_fn), key_fn) for gid, element_spec_group in id_groups: gid = None if gid == -1 else gid group_specs = [el for _, el in element_spec_group] backend = self.renderer.backend optstree = Store.custom_options( backend=backend).get(gid, Store.options(backend=backend)) # Get the normalization options for the current id # and match against customizable elements for opts in optstree: path = tuple(opts.path.split('.')[1:]) applies = any(path == spec[:i] for spec in group_specs for i in range(1, 4)) if applies and 'norm' in opts.groups: nopts = opts['norm'].options if 'axiswise' in nopts or 'framewise' in nopts: norm_opts.update({path: (nopts.get('axiswise', False), nopts.get('framewise', False))}) element_specs = [spec for _, spec in element_specs] norm_opts.update({spec: (False, False) for spec in element_specs if not any(spec[:i] in norm_opts.keys() for i in range(1, 4))}) return norm_opts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dictize(self):\n dict = {}\n for node in self.sort():\n logger.debug(\"Dictize: id %s has name %s\" % (node._id, node.name))\n x = node._kwargs()\n dict[node._id]={\"klass\":node.__class__.__name__, \n \"kwargs\": x,\n \"children\":[child._id for child in node.children()]}\n return dict", "def collect_children_by_id(self):\n self.children_by_id = {}\n self.root_by_id = {}\n self.ns_for_root_id = {}\n\n def recursive_fill_root_id(entry):\n root_id = self.root_by_id.get(entry.mount_id)\n if root_id is not None:\n return root_id\n\n if entry.parent_id == entry.mount_id:\n # self-referencing is a root\n root_id = entry.mount_id\n self.root_by_id[root_id] = root_id\n return root_id\n\n parent_entry = self.items.get(entry.parent_id)\n if parent_entry is None:\n # The parent is unknown, so it is an implicit root\n root_id = entry.mount_id\n self.root_by_id[root_id] = root_id\n return root_id\n\n root_id = recursive_fill_root_id(parent_entry)\n self.root_by_id[entry.mount_id] = root_id\n return root_id\n\n for entry in self.items.values():\n if entry.parent_id not in self.children_by_id:\n self.children_by_id[entry.parent_id] = {}\n self.children_by_id[entry.parent_id][entry.mount_id] = entry.abs_mount_point(no_question=True)\n root_id = recursive_fill_root_id(entry)\n if root_id not in self.ns_for_root_id:\n self.ns_for_root_id[root_id] = set()\n self.ns_for_root_id[root_id].add(entry.mount_ns)\n\n # Sanity check\n assert len(self.items) == len(self.root_by_id)", "def clean(self, data: BaseModel, id: Optional[str] = None) -> Dict[str, Any]:\n r = {}\n for name, value in data.dict().items():\n for fn in self.cleaners[name]:\n value = fn(value)\n r[name] = value\n return r", "def get_descendant_objective_id_terms(self):\n return # osid.search.terms.IdTerm", "def _mapping(self, pdb_id, loop_type, normalizer):\n\n mapping = {}\n with self.session() as session:\n query = self.query(session, pdb_id).filter_by(type=loop_type)\n for result in query:\n unit_ids = normalizer(result.unit_ids)\n if unit_ids in mapping:\n self.logger.error(\"Loop %s duplicates %s\",\n result.loop_id, mapping[unit_ids])\n continue\n mapping[unit_ids] = result.loop_id\n return mapping", "def normalize_features(self, data_dict, ind):\n pre_norm_list = []\n for title in data_dict:\n pre_norm_list.append(data_dict[title][ind])\n if self.normalization_method == 'min_max':\n mini, maxi, norm_list = normalize.min_max_normalize(pre_norm_list)\n self.normalization_n.append(mini)\n self.normalization_d.append(maxi - mini)\n elif self.normalization_method == 'z_score':\n mean, var, norm_list = normalize.z_score_normalize(pre_norm_list)\n self.normalization_n.append(mean)\n self.normalization_d.append(var)\n elif self.normalization_method == 'none':\n norm_list = pre_norm_list[:]\n self.normalization_n.append(0)\n self.normalization_d.append(1)\n for i, title in enumerate(data_dict):\n data_dict[title][ind] = norm_list[i]", "def normalization(obj):\n dic = obj.mainfield.para_dict.copy()\n for item in obj.forfield: dic.update(item.para_dict)\n for item in obj.existfield: dic.update(item.para_dict)\n\n global_dic = number_type(dic)\n obj.normal_guards = norm_rep(global_dic, obj.all_sentence)\n\n main_dic = number_type(obj.mainfield.para_dict)\n obj.mainfield.content = norm_rep(main_dic, obj.mainfield.content)\n\n for index in range(len(obj.forfield)):\n obj.forfield[index].para_dict.update(obj.mainfield.para_dict)\n # temp_dic.update(obj.mainfield.para_dict)\n # for_dic = number_type(temp_dic)\n obj.forfield[index].content = norm_rep(global_dic, obj.forfield[index].content)\n print(global_dic, obj.forfield[index].para_dict)\n obj.forfield[index].para_dict = pair_2_dict(global_dic, obj.forfield[index].para_dict)\n\n for index in range(len(obj.existfield)):\n obj.existfield[index].para_dict.update(obj.mainfield.para_dict)\n # temp_dic.update(obj.mainfield.para_dict)\n # exist_dic = number_type(temp_dic)\n obj.existfield[index].content = norm_rep(global_dic, obj.existfield[index].content)\n obj.existfield[index].para_dict = pair_2_dict(global_dic, obj.existfield[index].para_dict)\n\n # change para_dict: {'i':'NODE} -> {'NODE_1', 'NODE'}\n obj.mainfield.para_dict = pair_2_dict(global_dic, obj.mainfield.para_dict)", "def get_list_by_id(driver, id):\n try:\n flat_list = [\n a.get_attribute(\"value\")\n for a in driver.find_element_by_id(id).find_elements_by_tag_name(\"option\")\n ]\n return flat_list\n except Exception:\n return []", "def _get_identifiers_from_kbs(self) -> dict:\n id_mapping_dict = defaultdict(set)\n\n for kb in self.kbs:\n sys.stdout.write('\\n%s \\n' % kb.name)\n for p in tqdm.tqdm(kb.pathways, total=len(kb.pathways)):\n for ent in p.entities:\n id_set = list(set(ent.xrefs))\n if len(id_set) == 1:\n id_mapping_dict[id_set.pop()] = set([])\n for p, q in itertools.combinations(id_set, 2):\n id_mapping_dict[p].add(q)\n id_mapping_dict[q].add(p)\n\n return id_mapping_dict", "def normalize(ds, config):\n logger.info(\"Applying normalization with config:\\n %s\", _dict_to_logstring(config))\n\n key = config[\"key\"]\n def _normalize(x):\n return dict(x, **{key: features.cmvn(x[key], **config.get(\"kwargs\", {}))})\n\n return (ds.batch(config.get(\"batch_size\", 1))\n .map(_normalize, num_parallel_calls=TF_AUTOTUNE)\n .unbatch())", "def words_to_word_ids(data, word_to_id):\n # if isinstance(data[0], six.string_types):\n # print(type(data[0]))\n # # exit()\n # print(data[0])\n # print(word_to_id)\n # return [word_to_id[str(word)] for word in data]\n # else:\n return [word_to_id[word] for word in data]\n\n # if isinstance(data[0], str):\n # # print('is a string object')\n # return [word_to_id[word] for word in data]\n # else:#if isinstance(s, bytes):\n # # print('is a unicode object')\n # # print(data[0])\n # return [word_to_id[str(word)] f", "def flatten(orig):\n\n\t# Empty dictionary\n\tdata = {}\n\tfor c in orig['tree']['children']:\n\t\t# in operator\n\t\tif 'children' in c:\n\t\t\tfor c2 in c['children']:\n\t\t\t\tif 'children' in c2:\n\t\t\t\t\tfor c3 in c2['children']:\n\t\t\t\t\t\tif 'children' in c3:\n\t\t\t\t\t\t\tfor c4 in c3['children']:\n\t\t\t\t\t\t\t\tif (c4['category'] == 'personality'):\n\t\t\t\t\t\t\t\t\tdata[c4['id']] = c4['percentage']\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif (c3['category'] == 'personality'):\n\t\t\t\t\t\t\t\tdata[c3['id']] = c3['percentage']\n\n\treturn data", "def unflatten_to_tree(df, label_map=None, label_col='label', id_col='id'):\r\n\r\n tf_df = df.filter(like='level')\r\n n_lvls = len(tf_df.columns)\r\n lvl_list = range(n_lvls)\r\n\r\n # Construct all nodes\r\n uniq_ids = pd.Series(pd.unique(tf_df.values.ravel()))\r\n uniq_ids = uniq_ids.dropna()\r\n\r\n if label_map is not None:\r\n assert len(set(uniq_ids)-set(label_map[label_col].unique()))==0, '''\r\n If a label_map is specified, all labels in df must\r\n be present in the map '''\r\n rdict = { r[label_col]: r[id_col] for i, r in label_map.iterrows() }\r\n tf_df = tf_df.replace(rdict)\r\n uniq_ids = pd.Series(pd.unique(tf_df.values.ravel()))\r\n uniq_ids = uniq_ids.dropna()\r\n uniq_ids = uniq_ids.astype('int')\r\n\r\n assert len(tf_df['level_0'].unique())==1, '''there can only be\r\n one level_0 id'''\r\n root_id = tf_df['level_0'].unique()[0]\r\n\r\n nodes = {}\r\n for nid in uniq_ids:\r\n nodes[nid] = Node(nid, {}, None)\r\n\r\n # Make relationships\r\n for i in lvl_list:\r\n lvl_col = 'level_%s' % i\r\n nxtlvl_col = 'level_%s' % (i+1)\r\n assert ~tf_df[lvl_col].isin(tf_df.drop(lvl_col, axis=1)).any(), '''\r\n ids cannot span multiple levels'''\r\n\r\n if i<lvl_list[-1]:\r\n for pnid in tf_df[lvl_col].unique():\r\n child_locs = pd.Series(tf_df.ix[tf_df[lvl_col]==pnid,\r\n nxtlvl_col].unique()).dropna()\r\n for cnid in child_locs:\r\n nodes[cnid].parent = nodes[pnid]\r\n nodes[pnid].add_child(nodes[cnid])\r\n\r\n t = Tree(nodes[root_id])\r\n return t", "def get_label_set(corpusID):\n existing_corpus = DBcorpus.query.get_or_404(corpusID)\n corpus_data = fix_corpus_format(CorpusSchema().dump(existing_corpus).data)\n\n results = []\n for label in labels_set(existing_corpus):\n results.append(LabelSchema().dump(label).data)\n\n return {\"corpus\": corpus_data, \"labels\": results }, 200", "def get_descendant_objective_bank_id_terms(self):\n return # osid.search.terms.IdTerm", "def _find_options(self):\r\n elements = self.xml.findall('./options/option')\r\n return [{\r\n 'id': index,\r\n 'description': option.text,\r\n 'choice': option.get('choice')\r\n } for (index, option) in enumerate(elements)]", "def id_forms(self, pydic_id):\n try:\n return self.dictionaries[pydic_id.dict].id_forms(pydic_id)\n except (ValueError, KeyError):\n return None", "def preprocess_data(self):\n\n selected_data = []\n selected_name = []\n quant_norm_applied = []\n\n rgb_color_to_keys = self.get_rgb_items_for_plot()\n for data_key in rgb_color_to_keys.values():\n if data_key in self.dict_to_plot:\n selected_name.append(data_key)\n\n if self.scaler_data is not None:\n if np.count_nonzero(self.scaler_data) == 0:\n logger.warning(\"scaler is zero - scaling was not applied\")\n elif len(self.scaler_data[self.scaler_data == 0]) > 0:\n logger.warning(\"scaler data has zero values\")\n\n for i, k in enumerate(selected_name):\n q_norm_applied = False\n if self.quantitative_normalization:\n # Quantitative normalization\n (\n data_arr,\n q_norm_applied,\n ) = self.img_model_adv.param_quant_analysis.apply_quantitative_normalization(\n data_in=self.dict_to_plot[k],\n scaler_dict=self.scaler_norm_dict,\n scaler_name_default=self.get_selected_scaler_name(),\n data_name=k,\n ref_name=self.quantitative_ref_eline,\n name_not_scalable=self.name_not_scalable,\n )\n else:\n # Normalize by the selected scaler in a regular way\n data_arr = normalize_data_by_scaler(\n data_in=self.dict_to_plot[k],\n scaler=self.scaler_data,\n data_name=k,\n name_not_scalable=self.name_not_scalable,\n )\n\n selected_data.append(data_arr)\n quant_norm_applied.append(q_norm_applied)\n\n return selected_data, selected_name, rgb_color_to_keys, quant_norm_applied", "def getAttributsByIdref(self, id) :\n\t\t# if id in self.lid.keys() :\n\t\t# \treturn self.lid[id]\n\t\t# else :\n\t\treturn self._getIdrefs(self.doc.documentElement, id)", "def get_ancestor_objective_id_terms(self):\n return # osid.search.terms.IdTerm", "def _dictRoundTripNormalize(self, treedict):\n for key, value in list(treedict.items()):\n if isinstance(value, dict):\n self._dictRoundTripNormalize(value)\n\n # Expand treedict[(\"group\", \"attr_name\")]\n # to treedict[\"group\"][\"attr_name\"]\n for key, value in list(treedict.items()):\n if not isinstance(key, tuple):\n continue\n # Put the attribute inside the group\n grpname, attr = key\n if not grpname:\n continue\n group = treedict.setdefault(grpname, dict())\n if isinstance(group, dict):\n del treedict[key]\n group[(\"\", attr)] = value", "def get_translated_ids(id):", "def get_descendant_agency_id_terms(self):\n return # osid.search.terms.IdTerm", "def compute_ranges(self, obj, key, ranges):\n all_table = all(isinstance(el, Table) for el in obj.traverse(lambda x: x, [Element]))\n if obj is None or not self.normalize or all_table:\n return OrderedDict()\n # Get inherited ranges\n ranges = self.ranges if ranges is None else dict(ranges)\n\n # Get element identifiers from current object and resolve\n # with selected normalization options\n norm_opts = self._get_norm_opts(obj)\n\n # Traverse displayed object if normalization applies\n # at this level, and ranges for the group have not\n # been supplied from a composite plot\n return_fn = lambda x: x if isinstance(x, Element) else None\n for group, (axiswise, framewise) in norm_opts.items():\n elements = []\n # Skip if ranges are cached or already computed by a\n # higher-level container object.\n framewise = framewise or self.dynamic\n if group in ranges and (not framewise or ranges is not self.ranges):\n continue\n elif not framewise: # Traverse to get all elements\n elements = obj.traverse(return_fn, [group])\n elif key is not None: # Traverse to get elements for each frame\n frame = self._get_frame(key)\n elements = [] if frame is None else frame.traverse(return_fn, [group])\n if not axiswise or ((not framewise or len(elements) == 1)\n and isinstance(obj, HoloMap)): # Compute new ranges\n self._compute_group_range(group, elements, ranges)\n self.ranges.update(ranges)\n return ranges", "def get_level_id_terms(self):\n return # osid.search.terms.IdTerm", "def labels_set(self):\n if len(self.children) == 0:\n return {self.label}\n else:\n children_labels = set()\n for c in self.children:\n children_labels = children_labels | c.labels_set()\n return set([self.label]) | children_labels", "def get_equivalent_objective_id_terms(self):\n return # osid.search.terms.IdTerm", "def prepare_value(self, data):\n\n if data is None:\n data = {}\n\n relations = self.instance.get_related(self.relation_type)\n\n relations = [x for x in relations if x not in data.get('rm', [])]\n\n relations += data.get('add', [])\n\n return [{'label': rel.title, 'value': object_to_urn(rel)} for rel in\n relations if rel]", "def build_hierarchy_from_id_lookup(id_lookup_file=\"idlookups.csv\"):\n df_id_lookups = pd.read_csv(id_lookup_file, index_col=0)\n\n # The naming convention separates layers of the hierarchy with a colon ':', so we can break this into a list of descendents, and calculate the depth of the tree.\n df_id_lookups[\"parsed_name\"] = df_id_lookups.name.apply(lambda s: s.split(\": \"))\n df_id_lookups[\"depth\"] = df_id_lookups.parsed_name.apply(lambda d: len(d))\n\n # The two top nodes \"Biota\" and \"Physical\" are not prepended to their children, so we need to do this manually.\n # Manually define biota and physical children\n biota_kids = [\n \"Worms\",\n \"Sponges\",\n \"Seagrasses\",\n \"Molluscs\",\n \"Macroalgae\",\n \"Jellies\",\n \"Fishes\",\n \"Echinoderms\",\n \"Crustacea\",\n \"Cnidaria\",\n \"Bryozoa\",\n \"Bioturbation\",\n \"Bacterial mats\",\n \"Ascidians\",\n ]\n\n physical_kids = [\"Substrate\"]\n\n # Prepend them to name lists, and add to depth.\n biota_inds = df_id_lookups.parsed_name.apply(lambda d: d[0] in biota_kids)\n df_id_lookups.loc[biota_inds, \"depth\"] += 1\n df_id_lookups.loc[biota_inds, \"parsed_name\"] = df_id_lookups.loc[biota_inds, \"parsed_name\"].apply(\n lambda d: [\"Biota\"] + d\n )\n\n physical_inds = df_id_lookups.parsed_name.apply(lambda d: d[0] in physical_kids)\n df_id_lookups.loc[physical_inds, \"depth\"] += 1\n df_id_lookups.loc[physical_inds, \"parsed_name\"] = df_id_lookups.loc[physical_inds, \"parsed_name\"].apply(\n lambda d: [\"Physical\"] + d\n )\n\n # Create columns for ancestor and descendant lists.\n df_id_lookups[\"child_name\"] = df_id_lookups.parsed_name.apply(lambda d: d[-1])\n\n df_id_lookups[\"ancestor_id_list\"] = [get_ancestor_ids(d, df_id_lookups) for d in df_id_lookups.index]\n\n df_id_lookups[\"descendant_id_list\"] = [get_descendant_ids(d, df_id_lookups) for d in df_id_lookups.index]\n\n # Create a multilabel, one hot encoded bit vector for each class, taking into account the hierarchy of ancestors, and unspecified descendants.\n # We now want to represent this class hierarchy as a bit-vector. Each class index has a unique bit in the vector. A root level class will turn on a single bit. A depth 4 class will turn on 4 bits.\n df_id_lookups[\"bit_vector\"] = [get_bit_vector(d, df_id_lookups) for d in df_id_lookups.index]\n df_id_lookups\n\n return df_id_lookups", "def word_ids_to_words(data, id_to_word):\n return [id_to_word[i] for i in data]", "def get_ancestor_objective_bank_id_terms(self):\n return # osid.search.terms.IdTerm", "def Rescaling_Normalization(data: pd.DataFrame) -> pd.DataFrame:\n\n labels = data.pop('Labels')\n\n norm_data = (data - data.min()) / (data.max() - data.min())\n\n norm_data['Labels'] = labels\n\n return norm_data", "def find_parent_terms(go_id, go_dict):\n\n go_set = set()\n values = go_dict[go_id]\n\n for value in values:\n go_set.add(value)\n more_values = find_parent_terms(value, go_dict)\n for more_value in more_values:\n go_set.add(more_value)\n\n return go_set", "def normalizeData(self, data):\n return _normalizeData(data)", "def normalizeData(self, data):\n return _normalizeData(data)", "def get_tree_data(data, attributes):\n for attr in attributes:\n if type(attr) is int:\n if attr < len(data):\n data = data[attr]\n else:\n return None\n elif type(attr) is list:\n datas = {}\n for key in attr:\n if key in data:\n datas[key] = data[key]\n return datas\n elif attr in data:\n data = data[attr]\n else:\n return None\n return data", "def encode_labels(self):\n normalized_encoder = {}\n normalized_decoder = {}\n decoder = dict(self.tree.nodes(data=\"name\"))\n encoder = dict([(value, key) for key, value in decoder.items()])\n\n leaf_nodes = [node[0] for node in self.tree.out_degree(self.tree.nodes()) if node[1] == 0]\n leaf_nodes = [decoder[node] for node in leaf_nodes]\n\n counter = 0\n longest_path = 0\n for key in encoder:\n if key in leaf_nodes:\n path = self.determine_path_to_root([encoder[key]])\n if 'exploit_hierarchy' in self.parameter and self.parameter['exploit_hierarchy']:\n path = self.normalize_path_from_root_per_parent(path)\n\n normalized_encoder[key] = {'original_key': encoder[key], 'derived_key': counter,\n 'derived_path': path}\n normalized_decoder[counter] = {'original_key': encoder[key], 'value': key}\n if len(path) > longest_path:\n longest_path = len(path)\n\n counter += 1\n\n # Align path length\n fill_up_category = len(self.tree)\n\n for key in normalized_encoder:\n while len(normalized_encoder[key]['derived_path']) < longest_path:\n normalized_encoder[key]['derived_path'].append(fill_up_category)\n\n # Total number of labels is determined by the number of labels in the tree + 1 for out of category\n number_of_labels = len(self.tree) + 1\n\n return normalized_encoder, normalized_decoder, number_of_labels", "def import_labels():\n\n dict_labels = df.set_index('id').to_dict()['breed']\n unique_labels = sorted(list(set(dict_labels.values())))\n for index, label in dict_labels.items():\n dict_labels[index] = unique_labels.index(label)\n return dict_labels, unique_labels", "def readFromMd(self, md, objId):\n self._labelDict.clear()\n self._objId = objId\n \n for label in md.getActiveLabels():\n self._labelDict[label] = md.getValue(label, objId)", "def normalize(data):\n data = lowercase(data)\n data = remove_punct(data)\n data = remove_apostrophes(data)\n data = remove_stopwords(data)\n data = num_to_words(data)\n data = lemmatize(data)\n data = stemming(data)\n data = remove_punct(data)\n data = num_to_words(data)\n data = lemmatize(data)\n data = stemming(data)\n data = remove_punct(data) #done again to remove hyphens produced by num2words\n data = remove_stopwords(data) #done agan to remove stopwords produced by num2words\n return data", "def _fetch_placeholders_from_ids(self, obj):\n return Role.nested_object_traversal(\n obj, lambda x: self.placeholders[x.value], PlaceholderId\n )", "def show_tree(data):\r\n tree_dict = []\r\n ids = []\r\n for i in data:\r\n d = dict()\r\n if i[0] in ids:\r\n continue\r\n ids.append(i[0])\r\n d[\"id\"] = i[0]\r\n d[\"name\"] = i[1]\r\n d[\"parent_id\"] = i[2]\r\n tree_dict.append(d)\r\n return {\"nodes\": tree_dict}", "def processData(data):\n ids, instances, labels = [], [], []\n for i in data:\n idField = int(i[0])\n instance = i[1:-1]\n label = i[-1]\n ids.append(idField)\n instances.append(instance)\n labels.append(label)\n\n ids = np.array(ids)\n instances = np.array(instances)\n labels = np.array(labels)\n\n return (ids, instances, labels)", "def get_options(self):\n additional_data = self.get_additional_data()\n options_out = []\n for key, value in additional_data['DIMENSION_VALUES'].items():\n key_label = ' '.join(key.split('_')).strip().title()\n data = {'specification': key_label, 'specification_key': key, 'choices': value}\n options_out.append(data)\n return options_out", "def iter_choices(self):\n\n for pk, obj in self._get_object_list():\n if hasattr(obj, self.id):\n selected = getattr(obj, self.id)\n else:\n selected = obj in self.data\n\n yield (pk, self.get_label(obj), selected)", "def _set_level_depth(self, optobj):\n has_relationship = optobj is not None and 'relationship' in optobj.optional_attrs\n\n def _init_level(rec):\n if rec.level is None:\n if rec.parents:\n rec.level = min(_init_level(rec) for rec in rec.parents) + 1\n else:\n rec.level = 0\n return rec.level\n\n def _init_depth(rec):\n if rec.depth is None:\n if rec.parents:\n rec.depth = max(_init_depth(rec) for rec in rec.parents) + 1\n else:\n rec.depth = 0\n return rec.depth\n\n def _init_reldepth(rec):\n if not hasattr(rec, 'reldepth'):\n up_terms = rec.get_goterms_upper()\n if up_terms:\n rec.reldepth = max(_init_reldepth(rec) for rec in up_terms) + 1\n else:\n rec.reldepth = 0\n return rec.reldepth\n\n for rec in self.values():\n\n # Add invert relationships\n if has_relationship:\n if rec.depth is None:\n _init_reldepth(rec)\n\n # print(\"BBBBBBBBBBB1\", rec.id, rec.relationship)\n #for (typedef, terms) in rec.relationship.items():\n # invert_typedef = self.typedefs[typedef].inverse_of\n # # print(\"BBBBBBBBBBB2 {} ({}) ({}) ({})\".format(\n # # rec.id, rec.relationship, typedef, invert_typedef))\n # if invert_typedef:\n # # Add inverted relationship\n # for term in terms:\n # if not hasattr(term, 'relationship'):\n # term.relationship = defaultdict(set)\n # term.relationship[invert_typedef].add(rec)\n # print(\"BBBBBBBBBBB3\", rec.id, rec.relationship)\n\n if rec.level is None:\n _init_level(rec)\n\n if rec.depth is None:\n _init_depth(rec)", "def fromRootToListDic(t,labels = []):\r\n print \"creating list of dictionaries from Root Tuple\"\r\n N=t.GetEntries()\r\n labels2 = []\r\n list=[]\r\n a=t.GetListOfBranches()\r\n if not labels:\r\n\t for branch in a:\r\n\t\t labels.append(branch.GetName())\r\n for label in labels:\r\n\t if hasattr(t,label): labels2.append(label)\r\n\t else: print \"ignoring \", label\r\n for i in range(N):\r\n thing={}\r\n t.GetEntry(i)\r\n for label in labels2:\r\n thing[label]=getattr(t,label)\r\n list.append(thing)\r\n labels = []\r\n return list", "def translate_ids(dwi):\n def get_ids(*args):\n \"\"\"Translates the ids to the format the :class:`DataWrapper` instance expects.\n\n Arguments:\n *args (int): integers with :obj:`Detection` ids\n\n Returns:\n list of int or str: A list with the ids in the expected format.\n \"\"\"\n return [ids[i] for i in args]\n if isinstance(dwi, DataWrapperTracks):\n dwi = dwi.data\n if isinstance(dwi, DataWrapperPandas):\n values = detections_clean.id\n elif isinstance(dwi, DataWrapperBinary):\n values = detections_clean.generatedID\n else:\n values = detections_clean.generatedID\n ids = {key: val for key, val in zip(detections_clean.id.tolist(), values)}\n return get_ids", "def from_etree(self, data):\r\n if data.tag == 'request':\r\n # if \"object\" or \"objects\" exists, return deserialized forms.\r\n elements = data.getchildren()\r\n for element in elements:\r\n if element.tag in ('object', 'objects'):\r\n return self.from_etree(element)\r\n return dict((element.tag, self.from_etree(element)) for element in elements)\r\n elif data.tag == 'object' or data.get('type') == 'hash':\r\n return dict((element.tag, self.from_etree(element)) for element in data.getchildren())\r\n elif data.tag == 'objects' or data.get('type') == 'list':\r\n return [self.from_etree(element) for element in data.getchildren()]\r\n else:\r\n type_string = data.get('type')\r\n if type_string in ('string', None):\r\n return data.text\r\n elif type_string == 'integer':\r\n return int(data.text)\r\n elif type_string == 'float':\r\n return float(data.text)\r\n elif type_string == 'boolean':\r\n if data.text == 'True':\r\n return True\r\n else:\r\n return False\r\n else:\r\n return None", "def _traverse_options(cls, obj, opt_type, opts, specs=None, keyfn=None, defaults=True):\n def lookup(x):\n \"\"\"\n Looks up options for object, including plot defaults,\n keyfn determines returned key otherwise None key is used.\n \"\"\"\n options = cls.lookup_options(x, opt_type)\n selected = {o: options.options[o]\n for o in opts if o in options.options}\n if opt_type == 'plot' and defaults:\n plot = Store.registry[cls.backend].get(type(x))\n selected['defaults'] = {o: getattr(plot, o) for o in opts\n if o not in selected and hasattr(plot, o)}\n key = keyfn(x) if keyfn else None\n return (key, selected)\n\n # Traverse object and accumulate options by key\n traversed = obj.traverse(lookup, specs)\n options = defaultdict(lambda: defaultdict(list))\n default_opts = defaultdict(lambda: defaultdict(list)) \n for key, opts in traversed:\n defaults = opts.pop('defaults', {})\n for opt, v in opts.items():\n options[key][opt].append(v)\n for opt, v in defaults.items():\n default_opts[key][opt].append(v)\n\n # Merge defaults into dictionary if not explicitly specified\n for key, opts in default_opts.items():\n for opt, v in opts.items():\n if opt not in options[key]:\n options[key][opt] = v\n return options if keyfn else options[None]", "def mk_id_lookups(self):\n id_lookups = {}\n for ns in self.ddef.keys():\n id_lookups[ns] = self.mk_id_lookup(ns)\n return id_lookups", "def idf_dict1(l):\n idf = {}\n # first look for idf in abstract field\n solrdict = list_to_dict(l)\n for doc in solrdict.itervalues():\n terms = doc.get('abstract',{})\n for w,t in terms.iteritems():\n if not idf.has_key(w):\n idf[w] = t['tf-idf'][0] / t['tf'][0]\n # next add idf values from title field\n for doc in solrdict.itervalues():\n terms = doc.get('title',{})\n for w,t in terms.iteritems():\n if not idf.has_key(w):\n idf[w] = t['tf-idf'][0] / t['tf'][0]\n return idf", "def unify_ids(pdbmodels, verbose=False):\n seq_dict = dict() # Dictionary where sequences are keys and ids are values\n if verbose:\n print(\"Unifying Ids\")\n for i in range(len(pdbmodels)):\n pdb = pdbmodels[i]\n model = CustomModel(str(i)) # Transforms model to CustomModel instance\n for chain in pdb:\n chain = CustomChain(chain) # Transforms chain to CustomChain instance\n chain.parent = None # Removes previous parent from chain\n chain_seq = chain.get_sequence()\n if chain_seq not in seq_dict:\n if not seq_dict: # If the sequence dictionary is empty\n new_id = get_new_id(seq_dict.values()) # Get first id (A)\n seq_dict[chain_seq] = new_id # Set the first sequence as key and A as value\n chain.id = new_id # Also update chain id to A\n else: # If dictionary is not empty\n sequences = seq_dict.keys()\n homolog_seq = has_homolgs(chain_seq, sequences) # Check if sequence has homology\n if homolog_seq:\n homolog_id = seq_dict[homolog_seq] # Get homolog id from seq_dict\n seq_dict[chain_seq] = homolog_id # Set this sequence with the homolog id as value\n chain.id = homolog_id # Also change chain id to homolog's\n else:\n new_id = get_new_id(seq_dict.values()) # Otherwise generate a new id\n seq_dict[chain_seq] = new_id\n chain.id = new_id\n else:\n chain.id = seq_dict[chain_seq] # If chain is already in seq_dict, update chain object id\n model.add(chain)\n pdbmodels[i] = model # Update pdbmodels list with the updated model\n if verbose:\n print(\"Ids unified\")\n return seq_dict", "def _process_nodes(self):\n # Sort the nodes by metanode type, then by id\n self.node_df = self.node_df.sort_values(['label', 'id']).reset_index(drop=True)\n # Get all the ids\n self.nodes = self.node_df['id']\n # Get mapping from the index to the node ID (one to many so need different one for each node type)\n self.index_to_nid = dict()\n for group_name, group in self.node_df.groupby('label'):\n self.index_to_nid[group_name] = group['id'].reset_index(drop=True).to_dict()\n # Get the reverse mapping (many to one so don't need to separate based on type).\n self.nid_to_index = dict()\n for mapper in self.index_to_nid.values():\n for index, nid in mapper.items():\n self.nid_to_index[nid] = index\n # Finally, we need a mapper from id to node type\n self.id_to_metanode = self.node_df.set_index('id')['label'].to_dict()\n # And from node type to a list of ids\n self.metanode_to_ids = dict()\n for group_name, group in self.node_df.groupby('label'):\n self.metanode_to_ids[group_name] = group['id'].tolist()\n # One more mapper of id to name\n self.nid_to_name = self.node_df.set_index('id')['name'].to_dict()", "def normalize_descriptors(self):\n\n for key, desc_str_dict in self.descriptors.items():\n self.config[key] = desc_str_dict['normalized-string']", "def _find_options(self, inputfield):\r\n elements = inputfield.findall('./options/option')\r\n return [{\r\n 'id': index,\r\n 'description': option.text,\r\n 'choice': option.get('choice')\r\n } for (index, option) in enumerate(elements)]", "def normalize_units(data):\n for obj in data:\n obj['unit'] = normalize_units_function(obj.get('unit', ''))\n # for param in ds.get('parameters', {}).values():\n # if 'unit' in param:\n # param['unit'] = normalize_units_function(param['unit'])\n return data", "def _get_options_map(options: List[bs4.element.Tag]) -> OrderedDict:\n mapping = OrderedDict()\n for option in options:\n value, label = option[\"value\"], option.string\n if not value or value == \"0\" or not label:\n continue\n mapping[value] = label\n return mapping", "def extract_data(self, id: str) -> dict:\r\n\r\n raw = self.session.get(f\"{self.host}/{id}\")\r\n soup = self.soup(raw)\r\n\r\n dlx = soup.find(class_=\"dlx\")\r\n result = {}\r\n\r\n for table in dlx.findAll(\"table\"):\r\n if not table.strong:\r\n continue\r\n\r\n if (h4 := table.findPrevious(\"h4\", style=\"text-align: center\")):\r\n eps = h4.text\r\n elif (prevTable := table.findPrevious(\"table\")):\r\n eps = prevTable.text\r\n\r\n d = {}\r\n res = None\r\n for tr in table.findAll(\"tr\"):\r\n if not tr.a:\r\n res = tr.text\r\n d[res] = {}\r\n continue\r\n for a in tr.findAll(\"a\"):\r\n d[res][a.text] = a[\"href\"]\r\n result[eps] = d\r\n return result", "def ids(self, det_id):\n return [dom['DOMId'] for dom in self._json if dom['DetOID'] == det_id]", "def id(self, word):\n result = []\n for dic_name in self.dictionaries.keys():\n result += self.dictionaries[dic_name].id(word)\n return result", "def get_normalizers_trained_on_dict_data(data: Data_dict_type,\n normalizer_types: Tuple[str, ...] = ('z', 'power')) -> Tuple[object, ...]:\n # concatenate all data from different instances of dict\n concatenated_data = []\n for key, item in data.items():\n values, sample_rate = item\n concatenated_data.append(values)\n concatenated_data = np.concatenate(concatenated_data, axis=0)\n # create and fit normalizers\n normalizers = []\n for normalizer_type in normalizer_types:\n if normalizer_type == 'z':\n concatenated_data, z_normalizer = z_normalization(concatenated_data, return_scaler=True)\n normalizers.append(z_normalizer)\n elif normalizer_type == 'power':\n concatenated_data, power_normalizer = power_normalization(concatenated_data, return_scaler=True)\n normalizers.append(power_normalizer)\n elif normalizer_type == 'l2':\n normalizers.append(Normalizer(norm='l2'))\n else:\n raise AttributeError(\n 'Normalizer_types is supported only z and power normalization. Got %s.' % (normalizer_types))\n # clear RAM\n del concatenated_data\n gc.collect()\n return tuple(normalizers)", "def iter_subcomponents(self, id, recursive=False, only_contributing=False):\n for value in self.values:\n if isinstance(value, GroupingComponent):\n if only_contributing and not value.contributes:\n continue\n if value.id == id:\n yield value\n if recursive:\n for subcomponent in value.iter_subcomponents(id, recursive=True):\n yield subcomponent", "def get_ancestor_agency_id_terms(self):\n return # osid.search.terms.IdTerm", "def normalize_dataset(self):", "def analyze(ctx, id):\n classifiers = dict()\n for (directory, conf) in confs(ctx.obj[\"base\"]):\n with open(classifier_path(directory), 'rb') as classy:\n classifiers[conf[\"language\"]] = {\n \"classifier\": dill.load(classy),\n \"vectorizer\": get_vectorizer(conf)\n }\n records = DB.query(\"\"\"select * from ads where id = '{}'\"\"\".format(id))\n\n idx = 0\n for record in records:\n record_lang = record[\"lang\"]\n if record_lang in classifiers:\n classifier = classifiers[record_lang]\n text = clean_text(get_text(record), record[\"advertiser\"])\n permuted_texts = permute_text(text)\n vectorized_baseline_text = classifier[\"vectorizer\"].transform([text])\n baseline = classifier[\"classifier\"].predict_proba(vectorized_baseline_text)[0][1]\n\n diffs = [(deleted_word, probability_difference(classifier, permuted_text, baseline)) for (deleted_word, permuted_text) in permuted_texts]\n\n print(\"text: {}\".format(text))\n print(\"original probability: {}\".format(baseline))\n biggest_diffs = sorted(diffs, key=lambda word_diff: -abs(word_diff[1]))[:4]\n print(\"top difference-makers:\")\n for (deleted_word, permuted_text) in biggest_diffs:\n print(\" - {}, {}\".format(deleted_word, permuted_text))\n\n # for (deleted_word, permuted_text) in permuted_texts:\n # vectorized_text = classifier[\"vectorizer\"].transform([permuted_text])\n # probability = classifier[\"classifier\"].predict_proba(vectorized_text)[0][1]\n # print(\"{}: {}\".format(baseline - probability, deleted_word))", "def _get_term_depth_dictionary(self):\n\n\n\n\t\t# Find the root term(s) of the ontology.\n\t\troot_term_ids = []\n\t\tfor term in self.terms():\n\t\t\t# Check if this term has no inherited terms (is a root), discounting terms that are obsolete.\n\t\t\tinherited_terms = [t for t in term.superclasses(with_self=False)]\n\t\t\tif (len(inherited_terms)==0) and (term.name is not None) and (\"obsolete\" not in term.name):\n\t\t\t\troot_term_ids.append(term.id)\n\t\t\t\t\n\t\t# Find the depths of all terms in the ontology below those terms.\n\t\tdepths = {i:0 for i in root_term_ids}\n\t\tdepth = 1\n\t\tdone = False\n\t\twhile not done:\n\t\t\t\n\t\t\t# Add all the terms immediately below \n\t\t\tbefore = len(depths)\n\t\t\tnew_terms = []\n\t\t\tfor old_term_id in [i for i in depths.keys() if depths[i] == depth-1]:\n\t\t\t\tfor new_term_id in [t.id for t in self[old_term_id].subclasses(with_self=False,distance=1)]:\n\t\t\t\t\tif new_term_id not in depths:\n\t\t\t\t\t\tdepths[new_term_id] = depth\n\t\t\t\n\t\t\t# Increment the depth and see if any new terms were added to the distance dictionary during this pass.\n\t\t\tdepth = depth + 1\n\t\t\tafter = len(depths)\n\t\t\tif before == after:\n\t\t\t\tdone = True\n\t\t\t\t\n\t\t# Add any other remaining terms to the dictionary with a depth of 0 indicating minimal specificity.\n\t\tfor term in self.terms():\n\t\t\tif term.id not in depths:\n\t\t\t\tdepths[term.id] = 0\n\t\t\n\t\t# Return the dictionary mapping term IDs to their depth in the hierarchy.\n\t\treturn(depths)", "def init(self):\n if not self.needs_build:\n return self.tree\n if self.new_values and self.tree:\n ixs = self.new_values\n search_tree = self.tree\n else:\n search_tree = {}\n ixs = range(len(self.vos))\n for ix in ixs:\n vo = self.vos[ix]\n for label, label_value in vo.items():\n if label == \"value\":\n continue\n if label not in search_tree:\n search_tree[label] = defaultdict(set)\n search_tree[label][label_value].add(ix)\n self.tree = search_tree\n self.needs_build = False\n self.new_values = None\n return self.tree", "def get_hull_attributes():\n hulls = {}\n hull_table = db_parser.get_table_as_dict('hull')\n hull_loadouts = Hull.get_hull_loadouts()\n for row in hull_table:\n # Make a new nested dictionary indexed by this hull's name\n hull_name = row['hull_name']\n hulls[hull_name] = {}\n for key in row.keys():\n if key == 'hull_name':\n pass\n else:\n hulls[hull_name][key] = row[key]\n # Now add this hull's loadout to its dictionary\n hulls[hull_name]['loadout'] = hull_loadouts[hull_name]\n return hulls", "def raw_to_ids(raw_data, word_to_id):\n docs = tokenize_keras(raw_data)\n uid = word_to_id[UNKNOWN_WORD]\n return [[word_to_id.get(w, uid) for w in doc] for doc in docs]", "def edge_lookup(self, keylookup_obj, id_strct, debug=False):\n res_id_strct = IDStruct()\n for left, right in id_strct:\n res_id_strct.add(left, re.sub(self.from_regex, self.to_regex, right))\n return res_id_strct", "def load(self, data):\n points = data[\"points\"]\n normals = data[\"normals\"]\n labels = data[\"labels\"]\n primitives = data[\"primitives\"]\n primitives = copy.deepcopy(primitives)\n\n try:\n cluster_ids = data[\"seg_id_RANSAC\"]\n except:\n cluster_ids = data[\"seg_id\"]\n parameters = data[\"primitive_dict\"]\n\n rows, cols, unique_target, unique_pred = match(labels, cluster_ids)\n gtpoints = {}\n for k in range(rows.shape[0]):\n if not (parameters.get(k) is None):\n v = parameters[k]\n for index, j in enumerate(v):\n if index == 0:\n continue\n try:\n v[index] = torch.from_numpy(j.astype(np.float32)).cuda()\n except:\n v[index] = torch.tensor(j).cuda()\n indices = labels == cols[k]\n # only include the surface patches that are matched\n if np.sum(indices) > 0:\n gtpoints[k] = torch.from_numpy(points[indices].astype(np.float32)).cuda()\n else:\n parameters.pop(k)\n return parameters, gtpoints", "def as_dict(self):\n d = {}\n for e in self.get_queryset():\n d[e.name] = e.id\n return d", "def get(self, _id):\n if not self.root:\n raise RootNotSet\n node = self.id_map.get(_id)\n if not node:\n raise IDNotFound(_id)\n\n link = node.get('link')\n if link:\n link_node = self.id_map.get(_id)\n if not link_node:\n logger.error('link node not found!')\n raise IDNotFound(link_node)\n data = self.get(node['link'])\n data['link'] = data['id']\n data['id'] = link_node['id']\n return data\n\n if node.get('type') == 'group' or node.get('type') == None:\n return self._adapter._get_group(_id)\n elif node.get('type') == 'data':\n return self._adapter._load_data(_id)\n elif node.get('type') == 'json':\n return self._adapter._load_data(_id)\n elif node.get('type') == 'config':\n data = self._adapter._load_data(_id)\n data.pop('name', None)\n return data\n else:\n raise UnsupportedType", "def get_norm(self):\n\n # logger.info(\" Normalization factor:\")\n\n # declare the dict of class instance\n # where we'll store the normalization parameter\n self.param_norm = {'features': {}, 'targets': {}}\n for feat_type, feat_names in self.select_feature.items():\n self.param_norm['features'][feat_type] = {}\n for name in feat_names:\n self.param_norm['features'][feat_type][name] = NormParam(\n )\n self.param_norm['targets'][self.select_target] = MinMaxParam()\n\n # read the normalization\n self._read_norm()\n\n # make array for fast access\n self.feature_mean, self.feature_std = [], []\n for feat_type, feat_names in self.select_feature.items():\n for name in feat_names:\n self.feature_mean.append(\n self.param_norm['features'][feat_type][name].mean)\n self.feature_std.append(\n self.param_norm['features'][feat_type][name].std)\n\n self.target_min = self.param_norm['targets'][self.select_target].min\n self.target_max = self.param_norm['targets'][self.select_target].max", "def form_dictionary_by_diffrn(data_obj) -> dict:\n if isinstance(data_obj, Diffrn):\n ddict = data_obj.get_dictionary()\n else:\n ddict = {}\n\n return ddict", "def to_representation(self, obj):\n return self._choices[obj]", "def _get_ids_from_label(self, label):\r\n keys = self.list_keys()\r\n results = []\r\n for key in keys:\r\n if key['label'] == label:\r\n results.append(key['id'])\r\n return results", "def reset_stat_obj(self, nn_id):\n try:\n obj = models.NN_DEF_LIST_INFO.objects.get(nn_id=nn_id)\n data_set = {}\n data_set['bygen'] = []\n data_set['best'] = []\n setattr(obj, \"automl_stat\", data_set)\n obj.save()\n return data_set\n except Exception as e:\n raise Exception(e)", "def _dicts_with_ids(self):\n fields = self.fields\n # Append ID to the requested fields so we can keep track of object\n # identity to sort by weight (or whatever Sphinx sorted by). We could\n # optimize slightly by not prepending ID if the user already\n # specifically asked for it, but then we'd have to keep track of its\n # offset.\n if fields and 'id' not in fields:\n fields += ('id',)\n\n # Get values rather than values_list, because we need to be able to\n # find the ID afterward, and we don't want to have to go rooting around\n # in the Django model to figure out what order the fields were declared\n # in in the case that no fields were passed in.\n return self._queryset().values(*fields)", "def recur_iter_attrs(obj):\n\n if isinstance(obj, list) or isinstance(obj, tuple):\n res = []\n for i in obj:\n res.append(recur_iter_attrs(i))\n return res\n elif isinstance(obj, dict):\n dic = {}\n for k, v in obj.items():\n dic[k] = recur_iter_attrs(v)\n return dic\n elif obj is None\\\n or isinstance(obj, int) \\\n or isinstance(obj, float) \\\n or isinstance(obj, complex) \\\n or isinstance(obj, str) \\\n or isinstance(obj, bool):\n return obj\n else:\n dic = {}\n for k, v in obj.__dict__.items():\n dic[k] = recur_iter_attrs(v)\n return dic", "def fit(self, dataset, labels):\n self.dataset = dataset\n self.labels = labels\n self.normalization_n = []\n self.normalization_d = []\n self.first_title = list(self.dataset.keys())[0]\n for ind in range(len(self.dataset[self.first_title])):\n self.normalize_features(self.dataset, ind)", "def ids_to_tree(self, emb, postprocess=True):\n\n tree = TreeData()\n tokens = self.ids_to_strings(emb)\n\n for token in tokens:\n if token in ['<GO>', '<STOP>', '<VOID>']:\n continue\n if postprocess:\n # casing (only if set to lowercase)\n if self.lowercase and len(tree) == 1 or tree.nodes[-1].t_lemma in ['.', '?', '!']:\n token = token[0].upper() + token[1:]\n # plural merging (if plural tokens come up)\n if token == '<-s>' and tree.nodes[-1].t_lemma is not None:\n token = self._singular_to_plural(tree.nodes[-1].t_lemma)\n tree.remove_node(len(tree) - 1)\n elif token == '<-s>':\n continue\n\n tree.create_child(0, len(tree), NodeData(token, 'x'))\n\n return tree", "def _compute_labels(self, element, data, mapping):\n lidx = element.nodes.get_dimension(self.label_index)\n if element.vdims:\n edges = Dataset(element)[element[element.vdims[0].name]>0]\n nodes = list(np.unique([edges.dimension_values(i) for i in range(2)]))\n nodes = element.nodes.select(**{element.nodes.kdims[2].name: nodes})\n else:\n nodes = element\n\n value_dim = element.vdims[0]\n labels = [lidx.pprint_value(v) for v in nodes.dimension_values(lidx)]\n if self.show_values:\n value_labels = []\n for i, node in enumerate(element._sankey['nodes']):\n value = value_dim.pprint_value(node['value'])\n label = '%s - %s' % (labels[i], value)\n if value_dim.unit:\n label += ' %s' % value_dim.unit\n value_labels.append(label)\n labels = value_labels\n\n ys = nodes.dimension_values(1)\n nodes = element._sankey['nodes']\n offset = (nodes[0]['x1']-nodes[0]['x0'])/4.\n if self.label_position == 'right':\n xs = np.array([node['x1'] for node in nodes])+offset\n else:\n xs = np.array([node['x0'] for node in nodes])-offset\n data['text_1'] = dict(x=xs, y=ys, text=[str(l) for l in labels])\n align = 'left' if self.label_position == 'right' else 'right'\n mapping['text_1'] = dict(text='text', x='x', y='y', text_baseline='middle', text_align=align)", "def label_children(node: etree.Element) -> t.Mapping[str, etree.Element]:\n return {child.tag: child for child in node}", "def _extract_terms(self, obj):\r\n terms = set()\r\n if 'paths' in obj:\r\n for path in obj['paths']:\r\n segs = re.split('[/{}]', path)\r\n for seg in segs:\r\n terms.add(seg.lower())\r\n self.terms = terms", "def ids_to_tree(self, emb, postprocess=True):\n\n tree = TreeData()\n tokens = self.ids_to_strings(emb)\n\n for token in tokens:\n if token in ['<GO>', '<STOP>', '<VOID>']:\n continue\n tree.create_child(0, len(tree), NodeData(token, 'x'))\n\n return tree", "def get_elements_in_data_dimension(analytics_items, analytics_uids):\n for key in ['dataElement', 'indicator', 'programIndicator', 'attribute']:\n analytics_uids[key] = list(dict.fromkeys(analytics_uids[key] + json_extract_nested_ids(analytics_items, key)))\n\n return analytics_uids", "def node_labels(self, n_id=None):\n if n_id is None:\n return frozenset(self._nodes_by_label.keys())\n else:\n try:\n node_entry = self._nodes[n_id]\n except KeyError:\n return None\n else:\n return node_entry.labels", "def get_objective_id_terms(self):\n return # osid.search.terms.IdTerm", "def get_objective_id_terms(self):\n return # osid.search.terms.IdTerm", "def get_objective_id_terms(self):\n return # osid.search.terms.IdTerm", "def __parse_ldap_to_dhcp_attribute_map(self):\n import shlex\n self.ldap_to_dhcp_attribute_map = dict()\n options = shlex.split(self.options.get(\"ldap_to_dhcp_attribute_map\", \\\n self.DEFAULT_LDAP_TO_DHCP_ATTRIBUTE_MAP))\n for option in options:\n ldap_attr_name, dhcp_attr_name = option.split('=',1)\n self.ldap_to_dhcp_attribute_map[ldap_attr_name] = dhcp_attr_name", "def get_vectors_and_labels_self():\n pos_t, pos_post_t = ngram.generate_n_gram_dict(ds.POS_DICT_SELF, 1)\n neg_t, neg_post_t = ngram.generate_n_gram_dict(ds.NEG_DICT_SELF, 1)\n neu_t, neu_post_t = ngram.generate_n_gram_dict(ds.NEU_DICT_SELF, 1)\n ds.POS_UNI_GRAM_SELF, is_success = commons.dict_update(ds.POS_UNI_GRAM, pos_t)\n ds.NEG_UNI_GRAM_SELF, is_success = commons.dict_update(ds.NEG_UNI_GRAM, neg_t)\n ds.NEU_UNI_GRAM_SELF, is_success = commons.dict_update(ds.NEU_UNI_GRAM, neu_t)\n ds.POS_POST_UNI_GRAM_SELF, is_success = commons.dict_update(ds.POS_POST_UNI_GRAM, pos_post_t)\n ds.NEG_POST_UNI_GRAM_SELF, is_success = commons.dict_update(ds.NEG_POST_UNI_GRAM, neg_post_t)\n ds.NEU_POST_UNI_GRAM_SELF, is_success = commons.dict_update(ds.NEU_POST_UNI_GRAM, neu_post_t)\n temp_pos_dict = ds.POS_DICT.copy()\n temp_neg_dict = ds.NEG_DICT.copy()\n temp_neu_dict = ds.NEU_DICT.copy()\n temp_pos_dict_self = ds.POS_DICT_SELF.copy()\n temp_neg_dict_self = ds.NEG_DICT_SELF.copy()\n temp_neu_dict_self = ds.NEU_DICT_SELF.copy()\n temp_pos_dict_final = {}\n temp_neg_dict_final = {}\n temp_neu_dict_final = {}\n temp_pos_dict_final.update(temp_pos_dict)\n temp_neg_dict_final.update(temp_neg_dict)\n temp_neu_dict_final.update(temp_neu_dict)\n temp_pos_dict_final.update(temp_pos_dict_self)\n temp_neg_dict_final.update(temp_neg_dict_self)\n temp_neu_dict_final.update(temp_neu_dict_self)\n pos_vec, pos_lab = load_matrix_sub(temp_pos_dict_final, cons.LABEL_POSITIVE, True)\n neg_vec, neg_lab = load_matrix_sub(temp_neg_dict_final, cons.LABEL_NEGATIVE, True)\n neu_vec, neu_lab = load_matrix_sub(temp_neu_dict_final, cons.LABEL_NEUTRAL, True)\n ds.VECTORS_SELF = pos_vec + neg_vec + neu_vec\n ds.LABELS_SELF = pos_lab + neg_lab + neu_lab\n return is_success", "def _gt_object_hook(d):\n return {int(k): v for k, v in d.items()}", "def get_election(self, id: int) -> dict:", "def normalize_data(data, class_name):\n row_count = len(data.index)\n col_count = len(data.columns)\n normalized_data = []\n\n normalized_class_list = []\n class_list = data.iloc[(range(row_count)), 0].values\n for value in class_list:\n normalized_class_list.append(1 if value == class_name else 0)\n normalized_data.append(normalized_class_list)\n\n for index in range(1, col_count):\n feature_list = data.iloc[(range(row_count)), index].values\n normalized_data += normalize(feature_list)\n \n return normalized_data", "def prepare_data_and_labels_for_svm(data: Data_dict_type, labels: Labels_dict_type, window_size: float,\n window_step: float,\n normalization: bool = False, normalization_types: Tuple[str, ...] = ('z', 'l2'),\n return_normalizers: bool = False,\n normalizers: Optional[Tuple[object, ...]] = None,\n class_to_delete: Optional[int] = None) \\\n -> Union[\n Tuple[Data_dict_type, Labels_dict_type_numpy],\n Tuple[Data_dict_type, Labels_dict_type_numpy, Optional[Tuple[object, ...]]]\n ]:\n # cut data on sequences\n data, labels = cut_all_data_and_labels_on_chunks(data, labels, window_size, window_step)\n labels_averaged = average_labels_within_window(labels)\n\n # delete instances with -1 label\n if not class_to_delete is None:\n data, labels_averaged = delete_instances_with_class(data, labels_averaged, class_to_delete)\n # extract window statistics such as mean, std\n data = extract_statistics_from_windows(data)\n # get normalizers of data\n if normalization:\n if normalizers is None:\n normalizers = get_normalizers_trained_on_dict_data(data, normalization_types)\n data = normalize_all_data_in_dict(data, normalizers)\n if return_normalizers: return data, labels_averaged, normalizers\n return data, labels_averaged", "def _populate_terms(self, optobj):\n has_relationship = optobj is not None and 'relationship' in optobj.optional_attrs\n # Make parents and relationships references to the actual GO terms.\n for rec in self.values():\n # Given parent GO IDs, set parent GO Term objects\n rec.parents = set([self[goid] for goid in rec._parents])\n\n # For each parent GO Term object, add it's child GO Term to the children data member\n for parent_rec in rec.parents:\n parent_rec.children.add(rec)\n\n if has_relationship:\n self._populate_relationships(rec)", "def from_label(self, data):\n out = data.copy()\n if isinstance(data, pd.DataFrame) or isinstance(data, dict):\n for column in self.categorical_feature_names:\n out[column] = self.labelencoder[column].inverse_transform(out[column].round().astype(int).tolist())\n return out\n elif isinstance(data, list):\n for column in self.categorical_feature_indexes:\n out[column] = self.labelencoder[self.feature_names[column]].inverse_transform([round(out[column])])[0]\n return out" ]
[ "0.4882127", "0.48138362", "0.46735653", "0.46150512", "0.45790586", "0.44817707", "0.44758487", "0.4465512", "0.442252", "0.44222006", "0.43991864", "0.43809223", "0.4366402", "0.43599775", "0.4356347", "0.43552524", "0.43511787", "0.42940468", "0.42261603", "0.42139208", "0.41993126", "0.416335", "0.414635", "0.41318846", "0.41266203", "0.41098994", "0.40977052", "0.4061388", "0.40588298", "0.4056097", "0.40052527", "0.3995996", "0.39950192", "0.3993862", "0.3993862", "0.3982741", "0.39773813", "0.39692363", "0.396533", "0.39632782", "0.39629394", "0.39608485", "0.39367962", "0.39283952", "0.39277652", "0.39219487", "0.39164904", "0.3902457", "0.3900099", "0.38954166", "0.38818365", "0.38764173", "0.38693556", "0.38687816", "0.38682768", "0.38663375", "0.3863371", "0.38573205", "0.38566446", "0.384625", "0.38447905", "0.38437945", "0.38411692", "0.38371146", "0.38311014", "0.38267016", "0.38249388", "0.38206923", "0.38184118", "0.38170823", "0.38077348", "0.38064554", "0.38062733", "0.37988114", "0.37986353", "0.37960878", "0.37950864", "0.37904143", "0.3789977", "0.378635", "0.37845582", "0.3784304", "0.37814873", "0.37786564", "0.3778572", "0.37777886", "0.37764177", "0.37757152", "0.37731576", "0.37714592", "0.37714592", "0.37714592", "0.37699646", "0.37667954", "0.37577358", "0.3756891", "0.374812", "0.37470245", "0.37446472", "0.37431628" ]
0.6927882
0
Traverses the supplied object getting all options in opts for the specified opt_type and specs. Also takes into account the plotting class defaults for plot options. If a keyfn is supplied the returned options will be grouped by the returned keys.
def _traverse_options(cls, obj, opt_type, opts, specs=None, keyfn=None, defaults=True): def lookup(x): """ Looks up options for object, including plot defaults, keyfn determines returned key otherwise None key is used. """ options = cls.lookup_options(x, opt_type) selected = {o: options.options[o] for o in opts if o in options.options} if opt_type == 'plot' and defaults: plot = Store.registry[cls.backend].get(type(x)) selected['defaults'] = {o: getattr(plot, o) for o in opts if o not in selected and hasattr(plot, o)} key = keyfn(x) if keyfn else None return (key, selected) # Traverse object and accumulate options by key traversed = obj.traverse(lookup, specs) options = defaultdict(lambda: defaultdict(list)) default_opts = defaultdict(lambda: defaultdict(list)) for key, opts in traversed: defaults = opts.pop('defaults', {}) for opt, v in opts.items(): options[key][opt].append(v) for opt, v in defaults.items(): default_opts[key][opt].append(v) # Merge defaults into dictionary if not explicitly specified for key, opts in default_opts.items(): for opt, v in opts.items(): if opt not in options[key]: options[key][opt] = v return options if keyfn else options[None]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _all_opt_infos(self):\n for info in self._opts.values():\n yield info, None\n for group in self._groups.values():\n for info in group._opts.values():\n yield info, group", "def get_plot_kwargs(cfg, option, key=None):\n plot_kwargs = cfg.get(option, {}).get('plot_kwargs', {})\n if key is None:\n return plot_kwargs\n if '_xy' in option:\n additional_plot_kwargs = cfg.get('additional_plot_kwargs_xy_plots', {})\n if key in additional_plot_kwargs:\n return {**plot_kwargs, **additional_plot_kwargs[key]}\n subkey = key.split(SEP)[-1]\n if subkey in additional_plot_kwargs:\n return {**plot_kwargs, **additional_plot_kwargs[subkey]}\n return deepcopy(plot_kwargs)", "def _parse_options(options):\n opts = dict()\n for attr in dir(options):\n if attr.startswith(\"__\"):\n continue\n opts[attr] = getattr(options, attr)\n return opts", "def _all_cli_opts(self):\n for item in self._cli_opts:\n yield item['opt'], item['group']", "def options(self):\n result = []\n for typ in type(self).mro():\n result.extend(k for k, v in typ.__dict__.items()\n if isinstance(v, Option))\n return dict((o, getattr(self, o)) for o in result)", "def get_options(self):\n additional_data = self.get_additional_data()\n options_out = []\n for key, value in additional_data['DIMENSION_VALUES'].items():\n key_label = ' '.join(key.split('_')).strip().title()\n data = {'specification': key_label, 'specification_key': key, 'choices': value}\n options_out.append(data)\n return options_out", "def static_opts(ftype, **kwargs):\n\n opts = dict()\n\n if ftype == 'sequential_feature_selector':\n # check if we got the features\n features = kwargs.pop('features', None)\n if features is not None:\n opts['hidden_layer_sizes'] = (features.shape[0], features.shape[1])\n\n if ftype == 'vote':\n # check if we got the training data\n X_train = kwargs.pop('X_train', None)\n if X_train is not None:\n # TODO: check dimensions!\n opts['hidden_layer_sizes'] = (X_train.shape[1], X_train.shape[1])\n\n return opts", "def lookup(x):\n options = cls.lookup_options(x, opt_type)\n selected = {o: options.options[o]\n for o in opts if o in options.options}\n if opt_type == 'plot' and defaults:\n plot = Store.registry[cls.backend].get(type(x))\n selected['defaults'] = {o: getattr(plot, o) for o in opts\n if o not in selected and hasattr(plot, o)}\n key = keyfn(x) if keyfn else None\n return (key, selected)", "def get_options(self, key):\n if key in self.options.get_option_names():\n return self.options\n\n try:\n scope, scoped_key = key.split('.')\n except ValueError:\n return None\n\n if scope == 'input' and scoped_key in self.input.options.get_option_names():\n return self.input.options\n elif scope == 'output' and scoped_key in self.output.options.get_option_names():\n return self.output.options\n elif scope == 'exploit' and scoped_key in self.exploit.options.get_option_names():\n return self.exploit.options\n else:\n return None", "def __iter__(self):\n for key in itertools.chain(list(self._opts.keys()),\n list(self._groups.keys())):\n yield key", "def get_options(cls):\n for option in cls._general_options.items():\n yield option\n for option in cls._specific_options.items():\n yield option", "def get_options():\n user_options = {}\n user_options['surface'] = {'label': 'Surface',\n 'type': 'stringList',\n 'default': 'bcc100',\n 'values': surface_selections}\n\n user_options['metal'] = {'label': 'Metal',\n 'type': 'string',\n 'default': 'Au'}\n\n user_options['a'] = {'label': 'Lattice Constant',\n 'type': 'float',\n 'precision': 3,\n 'suffix': 'Å'}\n\n user_options['size-x'] = {'label': 'Size X',\n 'type': 'integer',\n 'default': 5}\n\n user_options['size-y'] = {'label': 'Size Y',\n 'type': 'integer',\n 'default': 5}\n\n user_options['size-z'] = {'label': 'Size Z',\n 'type': 'integer',\n 'default': 3}\n\n user_options['vacuum'] = {'label': 'Vacuum distance',\n 'type': 'float',\n 'precision': 1,\n 'suffix': 'Å'}\n\n user_options['orthogonal'] = {'label': 'Orthogonal',\n 'type': 'stringList',\n 'default': 'True',\n 'values': ['True', 'False']}\n\n return {'userOptions': user_options }", "def _get_norm_opts(self, obj):\n norm_opts = {}\n\n # Get all elements' type.group.label specs and ids\n type_val_fn = lambda x: (x.id, (type(x).__name__, util.group_sanitizer(x.group, escape=False),\n util.label_sanitizer(x.label, escape=False))) \\\n if isinstance(x, Element) else None\n element_specs = {(idspec[0], idspec[1]) for idspec in obj.traverse(type_val_fn)\n if idspec is not None}\n\n # Group elements specs by ID and override normalization\n # options sequentially\n key_fn = lambda x: -1 if x[0] is None else x[0]\n id_groups = groupby(sorted(element_specs, key=key_fn), key_fn)\n for gid, element_spec_group in id_groups:\n gid = None if gid == -1 else gid\n group_specs = [el for _, el in element_spec_group]\n\n backend = self.renderer.backend\n optstree = Store.custom_options(\n backend=backend).get(gid, Store.options(backend=backend))\n # Get the normalization options for the current id\n # and match against customizable elements\n for opts in optstree:\n path = tuple(opts.path.split('.')[1:])\n applies = any(path == spec[:i] for spec in group_specs\n for i in range(1, 4))\n if applies and 'norm' in opts.groups:\n nopts = opts['norm'].options\n if 'axiswise' in nopts or 'framewise' in nopts:\n norm_opts.update({path: (nopts.get('axiswise', False),\n nopts.get('framewise', False))})\n element_specs = [spec for _, spec in element_specs]\n norm_opts.update({spec: (False, False) for spec in element_specs\n if not any(spec[:i] in norm_opts.keys() for i in range(1, 4))})\n return norm_opts", "def get_all_combinations(param_opt):\n\tif not param_opt:\n\t\treturn {}\n\treturn (dict(zip(param_opt.keys(), x)) for x in itertools.product(*param_opt.values()))", "def options(name, option=None, value=None, opt_dict=None):\n\n if isinstance(name, int):\n name = list(pytplot.data_quants.keys())[name]\n\n if opt_dict is None:\n opt_dict = {option: value}\n else:\n if not isinstance(opt_dict,dict):\n print(\"dict must be a dictionary object. Returning.\")\n return\n\n if not isinstance(name, list):\n name = [name]\n\n for i in name:\n\n for option, value in opt_dict.items():\n\n # Lower case option for consistency\n option = option.lower()\n\n if i not in pytplot.data_quants.keys():\n print(str(i) + \" is currently not in pytplot.\")\n return\n\n if option == 'color':\n if isinstance(value, list):\n pytplot.data_quants[i].attrs['plot_options']['extras']['line_color'] = value\n else:\n pytplot.data_quants[i].attrs['plot_options']['extras']['line_color'] = [value]\n\n if option == 'link':\n if isinstance(value, list):\n pytplot.link(i, value[1], value[0])\n\n if option == 'colormap':\n if isinstance(value, list):\n pytplot.data_quants[i].attrs['plot_options']['extras']['colormap'] = value\n else:\n pytplot.data_quants[i].attrs['plot_options']['extras']['colormap'] = [value]\n\n if option == 'spec':\n _reset_plots(i)\n if value:\n if 'spec_bins' not in pytplot.data_quants[i].coords:\n print(f\"{i} does not contain coordinates for spectrogram plotting. Continuing...\")\n continue\n else:\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec'] = value\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['y_range'] = utilities.get_y_range(pytplot.data_quants[i])\n\n else:\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec'] = value\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['y_range'] = utilities.get_y_range(pytplot.data_quants[i])\n\n # Set the default dimension to plot by. All others will be summed over.\n if 'spec_dim_to_plot' not in pytplot.data_quants[i].attrs['plot_options']['extras']:\n if 'v' in pytplot.data_quants[i].coords:\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec_dim_to_plot'] = 'v'\n elif 'v2' in pytplot.data_quants[i].coords:\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec_dim_to_plot'] = 'v2'\n else:\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec_dim_to_plot'] = 'v1'\n\n if option == 'alt':\n _reset_plots(i)\n pytplot.data_quants[i].attrs['plot_options']['extras']['alt'] = value\n\n if option == 'map':\n _reset_plots(i)\n pytplot.data_quants[i].attrs['plot_options']['extras']['map'] = value\n\n if option == 'legend_names':\n if isinstance(value, list):\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['legend_names'] = value\n else:\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['legend_names'] = [value]\n\n if option == 'xlog_slice':\n if value:\n pytplot.data_quants[i].attrs['plot_options']['slice_xaxis_opt']['xi_axis_type'] = 'log'\n else:\n pytplot.data_quants[i].attrs['plot_options']['slice_xaxis_opt']['xi_axis_type'] = 'linear'\n\n if option == 'ylog':\n negflag = 0 # _ylog_check(data_quants, value, i)\n if negflag == 0 and value:\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['y_axis_type'] = 'log'\n else:\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['y_axis_type'] = 'linear'\n\n if option == 'ylog_slice':\n if value:\n pytplot.data_quants[i].attrs['plot_options']['slice_yaxis_opt']['yi_axis_type'] = 'log'\n else:\n pytplot.data_quants[i].attrs['plot_options']['slice_yaxis_opt']['yi_axis_type'] = 'linear'\n\n if option == 'zlog':\n # check for negative values and warn the user that they will be ignored\n negflag = _zlog_check(pytplot.data_quants, value, i)\n if negflag != 0 and value:\n print(str(i) + ' contains negative values; setting the z-axis to log scale will cause the negative values to be ignored on figures.')\n\n if value:\n pytplot.data_quants[i].attrs['plot_options']['zaxis_opt']['z_axis_type'] = 'log'\n else:\n pytplot.data_quants[i].attrs['plot_options']['zaxis_opt']['z_axis_type'] = 'linear'\n\n if option == 'nodata':\n pytplot.data_quants[i].attrs['plot_options']['line_opt']['visible'] = value\n\n if option == 'line_style':\n if value == 0 or value == 'solid_line':\n to_be = []\n elif value == 1 or value == 'dot':\n to_be = [2, 4]\n elif value == 2 or value == 'dash':\n to_be = [6]\n elif value == 3 or value == 'dash_dot':\n to_be = [6, 4, 2, 4]\n elif value == 4 or value == 'dash_dot_dot_dot':\n to_be = [6, 4, 2, 4, 2, 4, 2, 4]\n elif value == 5 or value == 'long_dash':\n to_be = [10]\n else:\n to_be=value\n\n pytplot.data_quants[i].attrs['plot_options']['line_opt']['line_style'] = to_be\n\n if(value == 6 or value == 'none'):\n pytplot.data_quants[i].attrs['plot_options']['line_opt']['visible'] = False\n\n if option == 'char_size':\n pytplot.data_quants[i].attrs['plot_options']['extras']['char_size'] = value\n\n if option == 'name':\n pytplot.data_quants[i].attrs['plot_options']['line_opt']['name'] = value\n\n if option == \"panel_size\":\n if value > 1 or value <= 0:\n print(\"Invalid value. Should be (0, 1]\")\n return\n pytplot.data_quants[i].attrs['plot_options']['extras']['panel_size'] = value\n\n if option == 'basemap':\n pytplot.data_quants[i].attrs['plot_options']['extras']['basemap'] = value\n\n if option == 'alpha':\n if value > 1 or value < 0:\n print(\"Invalid value. Should be [0, 1]\")\n return\n pytplot.data_quants[i].attrs['plot_options']['extras']['alpha'] = value\n\n if option == 'thick':\n pytplot.data_quants[i].attrs['plot_options']['line_opt']['line_width'] = value\n\n if option == 'yrange' or option == 'y_range':\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['y_range'] = [value[0], value[1]]\n\n if option == 'zrange' or option == 'z_range':\n pytplot.data_quants[i].attrs['plot_options']['zaxis_opt']['z_range'] = [value[0], value[1]]\n\n if option == 'xrange_slice':\n pytplot.data_quants[i].attrs['plot_options']['slice_xaxis_opt']['xi_range'] = [value[0], value[1]]\n\n if option == 'yrange_slice':\n pytplot.data_quants[i].attrs['plot_options']['slice_yaxis_opt']['yi_range'] = [value[0], value[1]]\n\n if option == 'xtitle':\n pytplot.data_quants[i].attrs['plot_options']['xaxis_opt']['axis_label'] = value\n\n if option == 'ytitle':\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['axis_label'] = value\n\n if option == 'ztitle':\n pytplot.data_quants[i].attrs['plot_options']['zaxis_opt']['axis_label'] = value\n\n if option == 'xsubtitle':\n pytplot.data_quants[i].attrs['plot_options']['xaxis_opt']['axis_subtitle'] = value\n\n if option == 'ysubtitle':\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['axis_subtitle'] = value\n\n if option == 'zsubtitle':\n pytplot.data_quants[i].attrs['plot_options']['zaxis_opt']['axis_subtitle'] = value\n\n if option == 'ybar':\n pytplot.data_quants[i].attrs['plot_options']['extras']['ybar'] = value\n\n if option == 'ybar_color':\n pytplot.data_quants[i].attrs['plot_options']['extras']['ybar'] = value\n\n if option == 'ybar_size':\n pytplot.data_quants[i].attrs['plot_options']['extras']['ysize'] = value\n\n if option == 'plotter':\n _reset_plots(i)\n pytplot.data_quants[i].attrs['plot_options']['extras']['plotter'] = value\n\n if option == 'crosshair_x':\n pytplot.data_quants[i].attrs['plot_options']['xaxis_opt']['crosshair'] = value\n\n if option == 'crosshair_y':\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['crosshair'] = value\n\n if option == 'crosshair_z':\n pytplot.data_quants[i].attrs['plot_options']['zaxis_opt']['crosshair'] = value\n\n if option == 'static':\n pytplot.data_quants[i].attrs['plot_options']['extras']['static'] = value\n\n if option == 'static_tavg':\n pytplot.data_quants[i].attrs['plot_options']['extras']['static_tavg'] = [value[0], value[1]]\n\n if option == 't_average':\n pytplot.data_quants[i].attrs['plot_options']['extras']['t_average'] = value\n\n if option == 'spec_dim_to_plot' or option == 'spec_plot_dim':\n if len(pytplot.data_quants[i].values.shape) <= 2:\n print(f\"Must have more than 2 coordinate dimensions to set spec_coord_to_plot for {pytplot.data_quants[i].name}\")\n continue\n\n # Set the 'spec_dim_to_plot' value to either 'v' or 'v1', 'v2', 'v3', etc.\n if isinstance(value, int):\n coord_to_plot = \"v\" + str(value)\n if coord_to_plot not in pytplot.data_quants[i].coords:\n if value == 1:\n coord_to_plot = \"v\"\n if coord_to_plot not in pytplot.data_quants[i].coords:\n print(f\"Dimension {value} not found in {pytplot.data_quants[i].name}\")\n continue\n else:\n print(f\"Dimension {value} not found in {pytplot.data_quants[i].name}\")\n continue\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec_dim_to_plot'] = coord_to_plot\n elif isinstance(value, str):\n coord_to_plot = value\n if coord_to_plot not in pytplot.data_quants[i].coords:\n print(f\"Dimension {value} not found in {pytplot.data_quants[i].name}\")\n continue\n else:\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec_dim_to_plot'] = value\n\n # If we're plotting against different coordinates, we need to change what we consider the \"spec_bins\"\n pytplot.data_quants[i].coords['spec_bins'] = pytplot.data_quants[i].coords[coord_to_plot]\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['y_range'] = utilities.get_y_range(pytplot.data_quants[i])\n\n if option == 'spec_slices_to_use':\n if not isinstance(value, dict):\n print(\"Must be a dictionary object in the format {'v2':15, 'v3':7}\")\n return\n else:\n for coord in value:\n if coord not in pytplot.data_quants[i].coords:\n print(f\"Dimension {coord} not found in {pytplot.data_quants[i].name}\")\n continue\n\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec_slices_to_use'] = value\n\n if option == 'border':\n pytplot.data_quants[i].attrs['plot_options']['extras']['border'] = value\n\n if option == 'var_label_ticks':\n pytplot.data_quants[i].attrs['plot_options']['var_label_ticks'] = value\n\n\n return", "def options(self):\n options_to_report = dict()\n for cls in inspect.getmro(type(self)):\n parameter_names, _, _, defaults, _, _, _ = inspect.getfullargspec(cls.__init__)\n if defaults:\n class_options = {parameter_name: getattr(self, '_' + parameter_name) for\n parameter_name in parameter_names[-len(defaults):]}\n options_to_report.update(class_options)\n options_to_report.pop('mcmc_moves')\n return options_to_report", "def processOptions_(self, opts):\n\n for opt in opts.keys():\n val = opts[opt]\n\n # Skip actions, they are processed later in initializeActions_()\n if opt in self.main_actions:\n self.cfg_params['SKIM.'+opt[1:]] = val\n continue\n if opt in self.aux_actions:\n self.cfg_params['SKIM.'+opt[1:]] = val\n continue\n \n\n elif ( opt == '-cfg' ):\n pass\n\n elif ( opt in ('-continue', '-c') ):\n # Already processed in processContinueOption_()\n pass\n\n elif ( opt == '-Q' ):\n self.flag_quiet = 1\n pass\n\n elif ( opt == '-debug' ):\n if val: self.debug_level = int(val)\n else: self.debug_level = 1\n pass\n\n elif string.find(opt,'.') == -1:\n print common.prog_name+'. Unrecognized option '+opt\n usage()\n pass\n\n # Override config parameters from INI-file with cmd-line params\n if string.find(opt,'.') == -1 :\n self.cfg_params['SKIM.'+opt[1:]] = val\n pass\n else:\n # Command line parameters in the form -SECTION.ENTRY=VALUE\n self.cfg_params[opt[1:]] = val\n pass\n pass\n return", "def _generate_options(self, **kwargs: Any) -> dict:\n raise NotImplementedError", "def read_all_options(self, test_case=None):\n args = self.get_parsed_cmd_args(test_case)\n\n Options.validate_methods(args[\"methods\"])\n\n self.read_config_file(args[\"config_file\"])\n\n for option in self.options:\n if args[option] not in [None, []]:\n self.options[option] = args[option]\n\n if option in self.method_options:\n method, method_option = self.method_options[option]\n Options.available_methods()[method].options[method_option] = args[option]\n\n #remove duplicate\n for option in [\"methods\", \"packages\"]:\n self.options[option] = list(set(self.options[option]))\n\n return self.options", "def get_options(cls):\n return {\n \"name\": str,\n ConfigOption(\"install_files\", default=None): Or(None, list),\n ConfigOption(\"timeout\", default=300): int,\n ConfigOption(\"log_regexps\", default=None): Or(None, list),\n ConfigOption(\"stdout_regexps\", default=None): Or(None, list),\n ConfigOption(\"stderr_regexps\", default=None): Or(None, list),\n ConfigOption(\"file_logger\", default=None): Or(None, str),\n ConfigOption(\"async_start\", default=False): bool,\n ConfigOption(\"report_errors_from_logs\", default=False): bool,\n ConfigOption(\"error_logs_max_lines\", default=10): int,\n ConfigOption(\"path_cleanup\", default=True): bool,\n ConfigOption(\"pre_start\", default=None): validate_func(\"driver\"),\n ConfigOption(\"post_start\", default=None): validate_func(\"driver\"),\n ConfigOption(\"pre_stop\", default=None): validate_func(\"driver\"),\n ConfigOption(\"post_stop\", default=None): validate_func(\"driver\"),\n }", "def create_options(options, passthru_args=None, fingerprintable_options=None):\n fingerprintable = fingerprintable_options or defaultdict(dict)\n\n class FakeOptions(object):\n def for_scope(self, scope):\n # TODO(John Sirois): Some users pass in A dict of scope -> _FakeOptionValues instead of a\n # dict of scope -> (dict of option name -> value). Clean up these usages and kill this\n # accommodation.\n options_for_this_scope = options.get(scope) or {}\n if isinstance(options_for_this_scope, _FakeOptionValues):\n options_for_this_scope = options_for_this_scope.option_values\n\n scoped_options = {}\n if scope:\n scoped_options.update(self.for_scope(enclosing_scope(scope)).option_values)\n scoped_options.update(options_for_this_scope)\n return _FakeOptionValues(scoped_options)\n\n def for_global_scope(self):\n return self.for_scope('')\n\n def passthru_args_for_scope(self, scope):\n return passthru_args or []\n\n def items(self):\n return options.items()\n\n @property\n def scope_to_flags(self):\n return {}\n\n def get_fingerprintable_for_scope(self, bottom_scope, include_passthru=False):\n \"\"\"Returns a list of fingerprintable (option type, option value) pairs for\n the given scope.\n\n Note that this method only collects values for a single scope, NOT from\n all enclosing scopes as in the Options class!\n\n :param str bottom_scope: The scope to gather fingerprintable options for.\n :param bool include_passthru: Whether to include passthru args captured by `bottom_scope` in the\n fingerprintable options.\n \"\"\"\n pairs = []\n if include_passthru:\n pu_args = self.passthru_args_for_scope(bottom_scope)\n pairs.extend((str, arg) for arg in pu_args)\n\n option_values = self.for_scope(bottom_scope)\n for option_name, option_type in fingerprintable[bottom_scope].items():\n pairs.append((option_type, option_values[option_name]))\n return pairs\n\n def __getitem__(self, scope):\n return self.for_scope(scope)\n\n return FakeOptions()", "def create_options_for_optionables(optionables,\n options=None,\n options_fingerprintable=None,\n passthru_args=None):\n all_options = defaultdict(dict)\n fingerprintable_options = defaultdict(dict)\n bootstrap_option_values = None\n\n if options_fingerprintable:\n for scope, opts in options_fingerprintable.items():\n fingerprintable_options[scope].update(opts)\n\n def register_func(on_scope):\n scoped_options = all_options[on_scope]\n scoped_fingerprintables = fingerprintable_options[on_scope]\n register = _options_registration_function(scoped_options, scoped_fingerprintables)\n register.bootstrap = bootstrap_option_values\n register.scope = on_scope\n return register\n\n # TODO: This sequence is a bit repetitive of the real registration sequence.\n\n # Register bootstrap options and grab their default values for use in subsequent registration.\n GlobalOptionsRegistrar.register_bootstrap_options(register_func(GLOBAL_SCOPE))\n bootstrap_option_values = _FakeOptionValues(all_options[GLOBAL_SCOPE].copy())\n\n # Now register the full global scope options.\n GlobalOptionsRegistrar.register_options(register_func(GLOBAL_SCOPE))\n\n for optionable in optionables:\n optionable.register_options(register_func(optionable.options_scope))\n\n if options:\n for scope, opts in options.items():\n all_options[scope].update(opts)\n\n return create_options(all_options,\n passthru_args=passthru_args,\n fingerprintable_options=fingerprintable_options)", "def get_specific_options(cls):\n for option in cls._specific_options.items():\n yield option", "def get_options(self, panel=\"\"):\n return dict()", "def register_opts(self, opts, group=None):\n for opt in opts:\n self.register_opt(opt, group, clear_cache=False)", "def get_create_options(self, ds_options, section, pretty=True):\r\n return_value = None\r\n\r\n if 'datacenter' == section:\r\n datacenters = [loc['keyname']\r\n for loc in ds_options['locations']]\r\n return_value = [('datacenter', datacenters)]\r\n elif 'cpu' == section and 'server' in ds_options['categories']:\r\n results = []\r\n\r\n for item in ds_options['categories']['server']['items']:\r\n results.append((\r\n item['description'],\r\n item['price_id']\r\n ))\r\n\r\n return_value = results\r\n elif 'memory' == section and 'ram' in ds_options['categories']:\r\n ram = []\r\n for option in ds_options['categories']['ram']['items']:\r\n ram.append((int(option['capacity']), option['price_id']))\r\n\r\n return_value = [('memory', ram)]\r\n elif 'server_core' == section and \\\r\n 'server_core' in ds_options['categories']:\r\n mem_options = {}\r\n cpu_regex = re.compile(r'(\\d+) x ')\r\n memory_regex = re.compile(r' - (\\d+) GB Ram', re.I)\r\n\r\n for item in ds_options['categories']['server_core']['items']:\r\n cpu = cpu_regex.search(item['description']).group(1)\r\n memory = memory_regex.search(item['description']).group(1)\r\n\r\n if cpu and memory:\r\n if memory not in mem_options:\r\n mem_options[memory] = []\r\n\r\n mem_options[memory].append((cpu, item['price_id']))\r\n\r\n results = []\r\n for memory in sorted(mem_options.keys(), key=int):\r\n key = memory\r\n\r\n if pretty:\r\n key = memory\r\n\r\n results.append((key, mem_options[memory]))\r\n\r\n return_value = results\r\n elif 'os' == section:\r\n os_regex = re.compile(r'(^[A-Za-z\\s\\/\\-]+) ([\\d\\.]+)')\r\n bit_regex = re.compile(r' \\((\\d+)\\s*bit')\r\n extra_regex = re.compile(r' - (.+)\\(')\r\n\r\n os_list = {}\r\n flat_list = []\r\n\r\n # Loop through the operating systems and get their OS codes\r\n for opsys in ds_options['categories']['os']['items']:\r\n if 'Windows Server' in opsys['description']:\r\n os_code = self._generate_windows_code(opsys['description'])\r\n else:\r\n os_results = os_regex.search(opsys['description'])\r\n\r\n # Skip this operating system if it's not parsable\r\n if os_results is None:\r\n continue\r\n\r\n name = os_results.group(1)\r\n version = os_results.group(2)\r\n bits = bit_regex.search(opsys['description'])\r\n extra_info = extra_regex.search(opsys['description'])\r\n\r\n if bits:\r\n bits = bits.group(1)\r\n if extra_info:\r\n extra_info = extra_info.group(1)\r\n\r\n os_code = self._generate_os_code(name, version, bits,\r\n extra_info)\r\n\r\n name = os_code.split('_')[0]\r\n\r\n if name not in os_list:\r\n os_list[name] = []\r\n\r\n os_list[name].append((os_code, opsys['price_id']))\r\n flat_list.append((os_code, opsys['price_id']))\r\n\r\n if pretty:\r\n results = []\r\n for opsys in sorted(os_list.keys()):\r\n results.append(('os (%s)' % opsys, os_list[opsys]))\r\n\r\n return_value = results\r\n else:\r\n return_value = [('os', flat_list)]\r\n\r\n elif 'disk' == section:\r\n disks = []\r\n type_regex = re.compile(r'^[\\d\\.]+[GT]B\\s+(.+)$')\r\n for disk in ds_options['categories']['disk0']['items']:\r\n disk_type = 'SATA'\r\n disk_type = type_regex.match(disk['description']).group(1)\r\n\r\n disk_type = disk_type.replace('RPM', '').strip()\r\n disk_type = disk_type.replace(' ', '_').upper()\r\n disk_type = str(int(disk['capacity'])) + '_' + disk_type\r\n disks.append((disk_type, disk['price_id'], disk['id']))\r\n\r\n return_value = [('disk', disks)]\r\n elif 'nic' == section:\r\n single = []\r\n dual = []\r\n\r\n for item in ds_options['categories']['port_speed']['items']:\r\n if 'dual' in item['description'].lower():\r\n dual.append((str(int(item['capacity'])) + '_DUAL',\r\n item['price_id']))\r\n else:\r\n single.append((str(int(item['capacity'])),\r\n item['price_id']))\r\n\r\n return_value = [('single nic', single), ('dual nic', dual)]\r\n elif 'disk_controller' == section:\r\n options = []\r\n for item in ds_options['categories']['disk_controller']['items']:\r\n text = item['description'].replace(' ', '')\r\n\r\n if 'Non-RAID' == text:\r\n text = 'None'\r\n\r\n options.append((text, item['price_id']))\r\n\r\n return_value = [('disk_controllers', options)]\r\n\r\n return return_value", "def iterate(self, compmgr=None, defaults=True):\n options = set()\n name_str = self.name\n for setting in ProductSetting.select(self.env,\n where={'product': self.product,\n 'section': name_str}):\n option = self.optionxform(setting.option)\n options.add(option)\n yield option\n for parent in self.config.parents:\n for option in parent[self.name].iterate(defaults=False):\n loption = self.optionxform(option)\n if loption not in options:\n options.add(loption)\n yield option\n if defaults:\n for section, option in Option.get_registry(compmgr).keys():\n if section == self.name and \\\n self.optionxform(option) not in options:\n yield option", "def options(self):\n options = {\n o.name: getattr(self, o.name)\n for o in _OPTIONS\n }\n return options", "def options(self, **kwds):\n opts = dict(self.opts)\n for k in kwds:\n try:\n # Ensure that the key exists because we want to change\n # existing options, not add new ones.\n _ = opts[k]\n except KeyError:\n raise ValueError(\"invalid option {!r}\".format(k))\n opts[k] = kwds[k]\n return type(self)(self.cls, opts, self.kwargs)", "def __iter__(self):\n for key in self._group._opts.keys():\n yield key", "def opt_to_dict(opts):\n if isinstance(opts, dict):\n return\n args = list(itertools.chain.from_iterable([x.split(\"=\") for x in opts]))\n opt_d = {k: True if v.startswith('-') else v\n for k,v in zip(args, args[1:]+[\"--\"]) if k.startswith('-')}\n return opt_d", "def common_options(func):\n\n def parse_preset(ctx, param, value):\n return PRESETS.get(value, (None, None))\n\n def parse_private(ctx, param, value):\n return hex_from_b64(value) if value else None\n\n func = click.option('--private', default=None, help='Private.', callback=parse_private)(func)\n\n func = click.option(\n '--preset',\n default=None, help='Preset ID defining prime and generator pair.',\n type=click.Choice(PRESETS.keys()), callback=parse_preset\n )(func)\n\n return func", "def get_options(cls, mode):\n return dict(\n (key, properties[mode])\n for key, properties in cls.__register.items()\n if mode in properties\n )", "def options_by_name(self):\n pass", "def options(self, section):\n try:\n opts = self._sections[section].copy()\n except KeyError:\n raise NoSectionError(section)\n opts.update(self._defaults)\n if '__name__' in opts:\n del opts['__name__']\n return opts.keys()", "def initoptionsdict(cls):\n for i in range(len(clslist)):\n optionsdict.update(dict({clslist[i]: dict({'OPTIONS': dict()})}))", "def _get_options(self) -> Dict[str, Any]:\n # TODO: handle holidays as well\n return {\n \"growth\": self.growth,\n \"changepoints\": self.changepoints and list(self.changepoints.astype('str')),\n \"n_changepoints\": self.n_changepoints,\n \"changepoint_range\": self.changepoint_range,\n \"changepoint_prior_scale\": self.changepoint_prior_scale,\n \"mcmc_samples\": self.mcmc_samples,\n \"interval_width\": self.interval_width,\n \"uncertainty_samples\": self.uncertainty_samples,\n \"yearly_seasonality\": self.yearly_seasonality,\n \"weekly_seasonality\": self.weekly_seasonality,\n \"daily_seasonality\": self.daily_seasonality,\n \"seasonality_mode\": self.seasonality_mode,\n \"seasonality_prior_scale\": self.seasonality_prior_scale,\n\n \"seasonalities\": self.seasonalities,\n \"extra_regressors\": self.extra_regressors\n }", "def get_tool_options(name, fmt='json', **kwargs):\n plugin = load_plugins('tool', name)[name]\n return plugin.get_tool_options(fmt, **kwargs)", "def optgroups(self, name, value, attrs=None):\n options = []\n\n for index, (name, product_data) in enumerate(self.product_fields.items()):\n quantity = product_data['quantity']\n name = product_data['name']\n price = product_data['price']\n if index:\n label = 'product_{}'.format(str(index))\n else:\n label = 'product'\n\n options.append({\n 'value': quantity,\n 'price': price,\n 'name': 'products',\n 'label': name,\n 'type': self.input_type,\n 'template_name': self.option_template_name,\n 'wrap_label': True,\n 'index': index\n })\n\n return options", "def getOptionsDict(self, section):\n answer = {}\n for option in self.getOptions(section):\n answer[option] = self.get(section, option)\n return answer", "def do_opt(self, *_args, **kwargs):\n args = list(_args)\n if not args:\n largest = 0\n keys = [key for key in self.conf if not key.startswith(\"_\")]\n for key in keys:\n largest = max(largest, len(key))\n for key in keys:\n print(\"%s : %s\" % (key.rjust(largest), self.conf[key]))\n return\n option = args.pop(0)\n if not args and not kwargs:\n method = getattr(self, \"getopt_\" + option, None)\n if method is None:\n self.getopt_default(option)\n else:\n method()\n else:\n method = getattr(self, \"opt_\" + option, None)\n if method is None:\n print(\"Unrecognized option %r\" % option)\n else:\n method(*args, **kwargs)\n self.save_config()", "def get_general_options(cls):\n for option in cls._general_options.items():\n yield option", "def opts_d2s(opts):\n ostr = []\n for k, v in opts.items():\n if k == \"optional\" and v:\n ostr.append(\"?\")\n elif k == \"atfield\":\n ostr.append(\"{\" + v)\n elif k == \"range\":\n ostr.append(\"[\" + str(v[0]) + \":\" + str(v[1]))\n elif k == \"pattern\":\n ostr.append(\">\" + v)\n elif k == \"format\":\n ostr.append(\"@\" + v)\n elif k == \"aetype\":\n ostr.append(\"#\" + v)\n else:\n print(\"Unknown option '\", o, \"'\")\n return ostr", "def ParseOptions(cls, options, config_object):", "def define_options(self):\n return {\n 'basename': OptionDef(required=True, default_value='keycloak', allowed_types=[str]),\n 'namespace': OptionDef(required=True, default_value='default', allowed_types=[str]),\n 'config': {\n 'service_port': OptionDef(required=True, default_value=8080, allowed_types=[int]),\n 'realm_import': OptionDef(format=OptionDefFormat.KDATA_VOLUME, allowed_types=[str, bytes, KData_Secret]),\n 'proxy_address_forwarding': OptionDef(format=OptionDefFormat.KDATA_ENV,\n allowed_types=[bool, *KDataHelper_Env.allowed_kdata()]),\n 'frontend_url': OptionDef(allowed_types=[str]),\n 'admin': {\n 'user': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'password': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, KData_Secret]),\n },\n 'db': {\n 'vendor': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'addr': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'port': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[int, *KDataHelper_Env.allowed_kdata()]),\n 'database': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'schema': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'user': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'password': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, KData_Secret]),\n },\n },\n 'container': {\n 'keycloak': OptionDef(required=True, default_value='quay.io/keycloak/keycloak:11.0.2', allowed_types=[str]),\n },\n 'kubernetes': {\n 'resources': {\n 'deployment': OptionDef(allowed_types=[Mapping]),\n }\n },\n }", "def __iter__(self):\n\n return iter(self.opts)", "def opts_load(opts):\n attr_words = []\n kv_words = []\n kv_exprs = {}\n for opt in opts:\n if isinstance(opt, basestring): # attr_word\n attr_words.append(opt)\n elif isinstance(opt, list):\n if len(opt) == 1: # attr_word\n attr_words.append(unicode(opt[0]))\n elif len(opt) == 2 and not opt[1]: # attr_word\n attr_words.append(unicode(opt[0]))\n elif (len(opt) == 2 and\n len(opt[0]) == 1 and\n unicode(opt[0]).isalpha() and\n unicode(opt[1]).isdigit()\n ): # kv_word\n kv_words.append(unicode(opt[0]) + unicode(opt[1]))\n else: # kv_expr\n kv_exprs[unicode(opt[0])] = \" \".join(opt[1:])\n return attr_words, kv_words, kv_exprs", "def gen_default_plot_options(plot_type, fa_label, fn_label,\r\n plot_title=None):\r\n\r\n plot_opts = OrderedDict([\r\n ('title', \"Performance\" if plot_title is None else plot_title),\r\n ('suptitle', ''),\r\n ('figsize', (8, 6)),\r\n ('title_fontsize', 13),\r\n ('suptitle_fontsize', 11),\r\n ('xlim', [0, 1]),\r\n ('ylim', [0, 1]),\r\n ('xticks_label_size', 'medium'),\r\n ('yticks_label_size', 'medium'),\r\n ('xlabel', \"False Alarm Rate [%]\"),\r\n ('xlabel_fontsize', 11),\r\n ('ylabel_fontsize', 11)])\r\n\r\n if plot_type.lower() == \"det\" or plot_type.lower() == \"detpmthr\":\r\n if plot_type.lower() == \"detpmthr\": ### X-axis is the threshold\r\n plot_opts[\"xscale\"] = \"linear\"\r\n plot_opts[\"xlabel\"] = \"PresenceConf Value\"\r\n plot_opts[\"xticks\"] = [0.0, 0.2, 0.4, 0.6, 0.8, 1]\r\n plot_opts[\"xticks_labels\"] = [\r\n \"0.0\", \"0.2\", \"0.4\", \"0.6\", \"0.8\", \"1.0\"]\r\n \r\n elif (fa_label == \"TFA\"):\r\n plot_opts[\"xscale\"] = \"log\"\r\n plot_opts[\"xlabel\"] = \"Time-based False Alarm\"\r\n plot_opts[\"xticks\"] = [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1]\r\n plot_opts[\"xticks_labels\"] = [\r\n \"0.01\", \"0.02\", \"0.05\", \"0.1\", \"0.2\", \"0.5\", \"1.0\"]\r\n elif (fa_label == \"RFA\"):\r\n plot_opts[\"xscale\"] = \"log\"\r\n plot_opts[\"xlabel\"] = \"Rate of False Alarms (#FAs/minute)\"\r\n plot_opts[\"xticks\"] = [\r\n 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10]\r\n plot_opts[\"xticks_labels\"] = [\r\n \"0.01\", \"0.02\", \"0.05\", \"0.1\", \"0.2\", \"0.5\", \"1.0\", \"2.0\",\r\n \"5.0\", \"10.0\"]\r\n else:\r\n plot_opts[\"xscale\"] = \"log\"\r\n plot_opts[\"xlabel\"] = \"Prob. of False Alarm\"\r\n plot_opts[\"xticks\"] = [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1]\r\n plot_opts[\"xticks_labels\"] = [\r\n \"0.01\", \"0.02\", \"0.05\", \"0.1\", \"0.2\", \"0.5\", \"1.0\"]\r\n\r\n # Default\r\n plot_opts[\"xlim\"] = (plot_opts[\"xticks\"][0],\r\n plot_opts[\"xticks\"][-1])\r\n plot_opts[\"ylabel\"] = \"Prob. of Miss Detection\"\r\n\r\n plot_opts[\"yticks\"] = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6,\r\n 0.7, 0.8, 0.9, 1.0]\r\n plot_opts[\"yticks_labels\"] = [\r\n '0', '0.1', '0.2', '0.3', '0.4', '0.5', '0.6', '0.7',\r\n '0.8', '0.9', '1.0']\r\n plot_opts[\"ylim\"] = (plot_opts[\"yticks\"][0],\r\n plot_opts[\"yticks\"][-1])\r\n \r\n elif plot_type.lower() == \"roc\":\r\n plot_opts[\"xscale\"] = \"linear\"\r\n plot_opts[\"ylabel\"] = \"Correct Detection Rate [%]\"\r\n plot_opts[\"xticks\"] = [\r\n 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]\r\n plot_opts[\"yticks\"] = [\r\n 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]\r\n plot_opts[\"yticks_labels\"] = ['0', '10', '20', '30', '40', '50',\r\n '60', '70', '80', '90', '100']\r\n plot_opts[\"xticks_labels\"] = ['0', '10', '20', '30', '40', '50',\r\n '60', '70', '80', '90', '100']\r\n\r\n return plot_opts", "def option_registrations_iter(self):\n\n def normalize_kwargs(orig_args, orig_kwargs):\n nkwargs = copy.copy(orig_kwargs)\n dest = self.parse_dest(*orig_args, **nkwargs)\n nkwargs[\"dest\"] = dest\n if not (\"default\" in nkwargs and isinstance(nkwargs[\"default\"], RankedValue)):\n type_arg = nkwargs.get(\"type\", str)\n member_type = nkwargs.get(\"member_type\", str)\n default_val = self.to_value_type(nkwargs.get(\"default\"), type_arg, member_type)\n if isinstance(default_val, (ListValueComponent, DictValueComponent)):\n default_val = default_val.val\n nkwargs[\"default\"] = RankedValue(Rank.HARDCODED, default_val)\n return nkwargs\n\n # Yield our directly-registered options.\n for args, kwargs in self._option_registrations:\n normalized_kwargs = normalize_kwargs(args, kwargs)\n yield args, normalized_kwargs", "def get_test_specific_options(test, BLIND=False, ignore_others=True, ignore_labels=None):\n # Need to change when the total test options changed\n # Color key Experiment 1: (Groups 5-8 are unusually flexible; group 1,6-8 are different strains than 2-5)\n exp1 = { #\"0.0\": r'Controls',\n \"1.0\": r\"Controls WT/SAL male P60-90 Bl/6J/CR\",\n \"2.0\": r\"FI male P60 Taconic\",\n \"3.0\": r\"FR male P60 Taconic\",\n \"4.0\": r\"ALG male P60 Taconic\",\n \"5.0\": r\"ALS male P60 Taconic\",\n \"6.0\": r\"5d COC test at P90 Bl/6CR\",\n \"7.0\": r\"BDNF met/met Ron tested at P60\",\n \"8.0\": r\"P26 males WT Bl/6CR\"}\n # Color Key Experiment 2 data (focusing on angel's mice and bdnf/trkb manipulations) P40-60 ages\n exp2 = {\"1.0\": r\"Controls VEH/SAL/WT\",\n \"2.0\": r\"acute NMPP1pump\",\n \"3.0\": r\"chronic NMPP1pump\",\n \"4.0\": r\"BDNF Val/Val Ron\",\n \"5.0\": r\"P1-23 NMPP1H20\",\n \"6.0\": r\"P1-40 NMPP1H20\",\n \"7.0\": r\"BDNF Met/Met Ron\"}\n if not ignore_others:\n exp1[\"-1.0\"] = r\"OTHERS\"\n exp2[\"-1.0\"] = r\"OTHERS\"\n\n exp1_params = {'UMAP': {'n_neighbors': 10,\n 'min_dist': 0.8,\n 'n_components': 3,\n 'metric': 'euclidean'}}\n exp2_params = {}\n\n TEST_LABEL_ALIAS = {\n 'exp1_label_FI_AL_M': None if BLIND else exp1,\n 'exp2_Angel': None if BLIND else exp2,\n 'age': None,\n 'RL_age': None,\n 'RL_treat_sex': None,\n 'RL_treat': None,\n 'RL_sex': None\n }\n\n IGNORE_LABELS = {\n 'exp1_label_FI_AL_M': ['-1.0', '0.0', '1.0', '3.0', '4.0', '6.0', '7.0'],\n 'exp2_Angel': ['-1.0', '1.0'],\n 'age': None,\n 'RL_age': None,\n 'RL_treat_sex': ['ALS_F', 'FI_F', 'FR_F'],\n 'RL_treat': None,\n 'RL_sex': None\n }\n\n DIM_PARAMS = {\n 'exp1_label_FI_AL_M': exp1_params,\n 'exp2_Angel': exp2_params,\n 'age': {},\n 'RL_age': {},\n 'RL_treat_sex': {},\n 'RL_treat': {},\n 'RL_sex': {}\n }\n\n return TEST_LABEL_ALIAS[test], IGNORE_LABELS[test] if ignore_labels is None else ignore_labels,\\\n DIM_PARAMS[test]", "def register_opts(self, opts):\n for opt in opts:\n self.register_opt(opt)", "def parse_options(self,arg_str,opt_str,*long_opts,**kw):\n\n mode = kw.get('mode','string')\n list_all = kw.get('list_all',0)\n\n opts,args = getopt(arg_str.split(),opt_str,*long_opts)\n odict = {}\n for o,a in opts:\n if o.startswith('--'):\n o = o[2:]\n else:\n o = o[1:]\n try:\n odict[o].append(a)\n except AttributeError:\n odict[o] = [odict[o],a]\n except KeyError:\n if list_all:\n odict[o] = [a]\n else:\n odict[o] = a\n opts = Struct(odict)\n\n if mode == 'string':\n args = ' '.join(args)\n elif mode == 'list':\n pass\n else:\n raise ValueError,'incorrect mode given:'+`mode`\n return opts,args", "def opts_render(opts, get_aw=True, get_kw=True, get_ke=False):\n attr_words, kv_words, kv_exprs = opts_load(opts)\n result = []\n if get_aw and attr_words:\n result.extend(attr_words)\n if get_kw and kv_words:\n result.extend(kv_words)\n if get_ke and kv_exprs:\n kve_strs = []\n for item in kv_exprs.items():\n kve_strs.append(unicode(item[0]) + \"=\" + \"'\" + unicode(item[1]) + \"'\")\n result.append(\" \".join(kve_strs))\n return \" \".join(result)", "def parse_opts(opts0):\n opts = {}\n # parse the stuff in \"opts\"\n for opt in opts0:\n parsed = opt.split('=')\n key = parsed[0].strip()\n if len(parsed) > 1:\n # OLD: cmd = parsed[1].strip()\n cmd = '='.join(parsed[1:]).strip()\n else:\n cmd = ''\n opts[key] = cmd\n\n return opts", "def _make_opt_list(opts, group):\n import copy\n import itertools\n\n _opts = [(group, list(itertools.chain(*opts)))]\n return [(g, copy.deepcopy(o)) for g, o in _opts]", "def define_options(self) -> Optional[Any]:\n return {\n 'basename': OptionDef(required=True, default_value='promtail', allowed_types=[str]),\n 'namespace': OptionDef(required=True, default_value='monitoring', allowed_types=[str]),\n 'config': {\n 'prometheus_annotation': OptionDef(required=True, default_value=False, allowed_types=[bool]),\n 'promtail_config': OptionDef(allowed_types=[str, ConfigFile]),\n 'loki_url': OptionDef(allowed_types=[str]),\n 'authorization': {\n 'serviceaccount_create': OptionDef(required=True, default_value=True, allowed_types=[bool]),\n 'serviceaccount_use': OptionDef(allowed_types=[str]),\n 'roles_create': OptionDef(required=True, default_value=True, allowed_types=[bool]),\n 'roles_bind': OptionDef(required=True, default_value=True, allowed_types=[bool]),\n },\n },\n 'container': {\n 'promtail': OptionDef(required=True, default_value='grafana/promtail:2.0.0', allowed_types=[str]),\n },\n 'kubernetes': {\n 'resources': {\n 'daemonset': OptionDef(allowed_types=[Mapping]),\n }\n },\n }", "def get_options_and_reporter(method, init_points, init_vals):\n reporter = get_reporter('default')\n if method in ['GA', 'randGA']:\n options = load_options(ga_optimiser.ga_opt_args, reporter=reporter)\n else:\n raise ValueError('Unknown method %s.'%(method))\n options.pre_eval_points = init_points\n options.pre_eval_vals = init_vals\n options.pre_eval_true_vals = init_vals\n return options, reporter", "def getOptionHashes(options):\n positionalArgs={}\n flaggedArgs={}\n #if options.inputFlag is None and options.taskType is not None:\n # options.inputFlag=programOptionMap[options.taskType].get('in',None)\n if options.inputFlag is not None:\n try:\n positionalArgs[int(options.inputFlag)]='in'\n except ValueError:\n flaggedArgs[options.inputFlag]='in'\n except TypeError:\n for flag in options.inputFlag:\n flaggedArgs[flag]='in'\n #if not(options.outputFlags) and options.taskType is not None:\n # options.outputFlags=programOptionMap[options.taskType].get('out',[])\n if options.outputFlags is not None:\n for outputFlag in options.outputFlags:\n try:\n positionalArgs[int(outputFlag)]='out'\n except ValueError:\n flaggedArgs[outputFlag]='out'\n except TypeError:\n for flag in outputFlag:\n flaggedArgs[flag]='out'\n #if not(options.threadsFlag) and options.taskType is not None:\n # options.threadsFlag=programOptionMap[options.taskType].get('threads',None)\n if options.threadsFlag is not None:\n try:\n positionalArgs[int(options.threadsFlag)]='threads'\n except ValueError:\n flaggedArgs[options.threadsFlag]='threads'\n except TypeError:\n for flag in options.threadsFlag:\n flaggedArgs[flag]='threads'\n if options.prefixFlag is not None:\n try:\n positionalArgs[int(options.prefixFlag)]='prefix'\n except ValueError:\n flaggedArgs[options.prefixFlag]='prefix'\n except TypeError:\n for flag in options.prefixFlag:\n flaggedArgs[flag]='prefix'\n if options.rel_paths is not None:\n for rel_path_flag in options.rel_paths:\n try:\n positionalArgs[int(rel_path_flag)]='rel'\n except ValueError:\n flaggedArgs[rel_path_flag]='rel'\n \n return (positionalArgs,flaggedArgs)", "def getCmdOptions():\n #print( \"getCmdOptions() entered...\\n )\"\n my_opts = {}\n err = None\n required_opts = { 'number': True, 'host': True,'port': True, 'help': True, 'debug': True, 'stdout': True, 'logfile': True }\n rc = 1\n\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"hdsn:H:p:l:\", [\"help\", \"debug\", \"stdout\", \"number=\", \"host=\", \"port=\", \"logfile=\"]) #@UnusedVariable\n except(getopt.GetoptError, err):\n # print help information and exit:\n print(str(err)) # will print something like \"option -a not recognized\"\n usage()\n sys.exit(2)\n\n for o, a in opts:\n if o in (\"-h\", \"--help\"):\n usage()\n sys.exit()\n elif o in (\"-n\", \"--number\"):\n my_opts['number'] = a\n elif o in (\"-H\", \"--host\"):\n my_opts['host'] = a\n elif o in (\"-p\", \"--port\"):\n my_opts['port'] = a\n required_opts['port'] = True\n elif o in (\"-l\", \"--logfile\"):\n my_opts['logfile'] = a\n elif o in (\"-s\", \"--stdout\"):\n my_opts['stdout'] = True\n elif o in (\"-d\", \"--debug\"):\n my_opts['debug'] = True\n else:\n rc = 0\n assert False, \"unhandled option\"\n #Endif\n #Endfor\n\n if(rc == 0):\n usage()\n\n #for k, v in required_opts.iteritem():\n for k, v in required_opts.items(): #@UnusedVariable\n if(required_opts[k] == False):\n msg = sys.argv[0] + \" Must provide: \" + \"--\" + str(k)\n print(msg)\n rc = 0\n #Endif\n #Endfor\n\n if(rc == 0):\n usage()\n sys.exit(2)\n #Endif\n\n resetInit(my_opts)", "def list_opts():\n return [(constants.MLNX_BAREMETAL_DRIVER_GROUP_NAME, DRIVER_OPTS)]", "def register_cli_opts(self, opts, group=None):\n for opt in opts:\n self.register_cli_opt(opt, group, clear_cache=False)", "def load_all_options(cls, inst):\n for attrname in cls.get_option_names(inst):\n getattr(inst, attrname)", "def build_options(self):\n opts = [\n \"-k rpm.rpmva=off\",\n \"-k apache.log=True\",\n ]\n\n sensitive_keys = {\n self._engine_plugin: 'sensitive_keys',\n 'ovirt_engine_dwh': 'dwh_sensitive_keys',\n }\n if self.configuration['include_sensitive_data']:\n for plugin in sensitive_keys:\n self.configuration[sensitive_keys[plugin]] = ':'\n\n for plugin in sensitive_keys:\n if self.configuration.get(sensitive_keys[plugin]):\n opts.append(\n '-k {plugin}.sensitive_keys={keys}'.format(\n plugin=plugin,\n keys=self.configuration.get(sensitive_keys[plugin]),\n )\n )\n\n if self.configuration.get(\"ticket_number\"):\n opts.append(\n \"--ticket-number=%s\" % self.configuration.get(\"ticket_number\")\n )\n\n if self.sos_version < '30':\n opts.append('--report')\n\n if self.configuration.get(\"log_size\"):\n opts.append(\n \"--log-size=%s\" %\n self.configuration.get('log_size')\n )\n else:\n if self.sos_version < '30':\n opts.append('--report')\n opts.append(\"-k general.all_logs=True\")\n elif self.sos_version < '32':\n opts.append(\"-k logs.all_logs=True\")\n else:\n opts.append(\"--all-logs\")\n\n if self.configuration.get(\"upload\"):\n opts.append(\"--upload=%s\" % self.configuration.get(\"upload\"))\n return \" \".join(opts)", "def _parse(self, options):\n\n '''Start by considering all registered options, and validating them\n if they are in the incoming options dict'''\n self.results = {}\n wanted = self.wanted.copy()\n for opt in wanted.keys():\n if opt in options:\n self.results[opt] = self._access(wanted, opt, options[opt])\n\n '''As all registered options, in trac.ini, have composite names,\n consisting of a prefix and the option name separated by a dot,\n now find the starting list of prefixes to consider. Either use\n the value of incoming option of the name found in self.config,\n or use the fixed default prefix from self.prefix'''\n if self.config in options:\n parents = self._parents_to_list(options[self.config])\n del options[self.config]\n else:\n parents = [ self.prefix ]\n\n '''Look up these composite options'''\n if len(wanted) > 0:\n self._inherit(options, parents, wanted, {})\n\n '''Set all still unresolved registered options, to their defaults'''\n for opt in wanted.keys():\n self.results[opt] = (\n wanted[opt].default,\n self._is_default,\n wanted[opt]\n )\n\n '''Move over all UNregistered options as they were passed in.'''\n for opt in options.keys():\n if not opt in self.results:\n self.results[opt] = (\n options[opt],\n self._is_extra,\n None\n )", "def plot_options(cls, obj, percent_size):\n raise NotImplementedError", "def build_options(slot, snacks):\n \n if slot == 'Fast':\n return [\n {'text': 'Pizza', 'value': 'Pizza'},\n {'text': 'Fries', 'value': 'Fries'},\n {'text': 'Franky', 'value': 'Franky'},\n {'text': 'Burger', 'value': 'Burger'},\n {'text': 'Sandwich', 'value': 'Sandwich'}\n \n \n ]\n elif slot == 'drink':\n return [\n {'text': 'Coca-Cola', 'value': 'Coca-cola'},\n {'text': 'Appy', 'value': 'Appy'},\n \n {'text': 'Beer', 'value': 'Beer'},\n {'text': 'Frooti', 'value': 'Frooti'},\n {'text': 'Pepsi', 'value': 'Pepsi'}\n \n ]", "def _extract_options(config, options, *args):\n extract = {}\n for key in args:\n if key not in args:\n continue\n extract[key] = config[key]\n option = getattr(options, key, None)\n if option is not None:\n extract[key] = option\n return extract", "def default_options(cls):\n options_to_report = dict()\n for c in inspect.getmro(cls):\n parameter_names, _, _, defaults, _, _, _ = inspect.getfullargspec(c.__init__)\n if defaults:\n class_options = {parameter_name: defaults[index] for (index, parameter_name) in\n enumerate(parameter_names[-len(defaults):])}\n options_to_report.update(class_options)\n options_to_report.pop('mcmc_moves')\n return options_to_report", "def parse_options(site, darkness, date, plan_range):\n\n # parse targets\n opts = {}\n opts.update(parse_date(date))\n opts.update(parse_range(plan_range))\n opts.update(parse_site(site))\n opts.update(parse_darkness(darkness))\n\n return opts", "def consume_options(cls, data, hittype, args):\n opt_position = 0\n data[\"t\"] = hittype # integrate hit type parameter\n if hittype in cls.option_sequence:\n for expected_type, optname in cls.option_sequence[hittype]:\n if opt_position < len(args) and isinstance(args[opt_position],\n expected_type):\n data[optname] = args[opt_position]\n opt_position += 1", "def _collect_options(self, option_index):\n input_option = list()\n if not option_index:\n for k in self._options.keys():\n input_option.append(self._options.get(k))\n else:\n for index in option_index:\n input_option.append(self._options.get(index))\n return input_option", "def _calculate_options(self, options, option_overrides):\n _options = {}\n _options.update(WidgetSettings.OPTIONS)\n _options.update(options if isinstance(options, dict) else {})\n if 'dateFormat' in _options and 'altFormat' not in _options:\n _options['altFormat'] = _options.pop('dateFormat')\n _options.update(option_overrides)\n self.options = _options", "def MakeOpts():\n opt_parser = OptionParser()\n opt_parser.add_option(\"-s\", \"--thermodynamics_source\",\n dest=\"thermodynamics_source\",\n type=\"choice\",\n choices=['observed_only',\n 'hatzi_only',\n 'milo_only',\n 'milo_merged'],\n default=\"milo_merged\",\n help=\"The thermodynamic data to use\")\n opt_parser.add_option(\"-k\", \"--kegg_database_location\", \n dest=\"kegg_db_filename\",\n default=\"../data/public_data.sqlite\",\n help=\"The KEGG database location\")\n opt_parser.add_option(\"-d\", \"--database_location\", \n dest=\"db_filename\",\n default=\"../res/gibbs.sqlite\",\n help=\"The Thermodynamic database location\")\n opt_parser.add_option(\"-t\", \"--thermodynamics_filename\",\n dest=\"thermodynamics_filename\",\n default='../data/thermodynamics/dG0.csv',\n help=\"The name of the thermodynamics file to load.\")\n opt_parser.add_option(\"-i\", \"--input_filename\",\n dest=\"input_filename\",\n default=\"../data/thermodynamics/pathways.txt\",\n help=\"The file to read for pathways to analyze.\")\n opt_parser.add_option(\"-o\", \"--output_filename\",\n dest=\"output_filename\",\n default='../res/thermo_comparison/report.html',\n help=\"Where to write output to.\")\n return opt_parser", "def listopt(opt, f=None):\n args = vars(opt)\n\n if f is not None:\n f.write('------------ Options -------------\\n')\n else:\n print('------------ Options -------------')\n\n for k, v in sorted(args.items()):\n if f is not None:\n f.write('%s: %s\\n' % (str(k), str(v)))\n else:\n print('%s: %s' % (str(k), str(v)))\n\n if f is not None:\n f.write('-------------- End ----------------\\n')\n else:\n print('-------------- End ----------------')", "def listopt(opt, f=None):\n args = vars(opt)\n\n if f is not None:\n f.write('------------ Options -------------\\n')\n else:\n print('------------ Options -------------')\n\n for k, v in sorted(args.items()):\n if f is not None:\n f.write('%s: %s\\n' % (str(k), str(v)))\n else:\n print('%s: %s' % (str(k), str(v)))\n\n if f is not None:\n f.write('-------------- End ----------------\\n')\n else:\n print('-------------- End ----------------')", "def list_plugin_options(request):\n options = {}\n options.update(plugin.get_plugin_options(request.matchdict['plugin']))\n options.update(plugin.get_plugin_vizoptions(request.matchdict['plugin']))\n return options", "def get_options(self, key):\n # Get the set of options unique to the Workflow data model\n if key not in self.__workflow_options:\n raise KeyError(\"Key `{}` not understood.\".format(key))\n\n return copy.deepcopy(getattr(self.data, key))", "def parse_options(option_list: List[str]) -> Dict[str, Union[int, float, str]]:\n d = dict()\n for o in option_list:\n o = o.split('=')\n if len(o) != 3:\n raise OptionParsingError(\"Not enough elements in the parsed options. Need 3 elements.\")\n key = o[0]\n val = o[1]\n if o[2] not in type_mappings:\n raise OptionParsingError(f\"Unknown option type {o[2]}.\")\n type_func = type_mappings[o[2]]\n d.update({key: type_func(val)})\n return d", "def extract_kwargs_from_options(options):\n return modulation_utils.extract_kwargs_from_options(dqpsk_mod.__init__,\n ('self',), options)", "def to_dict(self):\n import tc\n opts_dict = {}\n for k, v in self.__class__.__dict__.iteritems():\n if isinstance(v, tc.TC):\n opts_dict.update({k: self.__getattribute__(k)})\n return opts_dict", "def create_options(self, saving=False):\n self.get_filters(saving)\n\n options = {}\n if(self.calibration_points):\n options['begin_calibration_index'] = self.begin_ind_calibration_entry.get()\n options['end_calibration_index'] = self.end_ind_calibration_entry.get()\n options['known_distance'] = self.known_distance_entry.get()\n else:\n options['begin_calibration_index']=''\n options['end_calibration_index']=''\n options['known_distance']=''\n options['unit_type'] = (re.sub(r'[^A-Za-z0-9_]', '', self.unit_type_entry.get())).capitalize()\n options['begin_index'] = self.begin_ind_entry.get()\n options['end_index'] = self.end_ind_entry.get()\n options['names_list'] = self.names_list_entry.get()\n options['name_column'] = self.name_column_var.get()\n options['x_column'] = self.x_column_var.get()\n options['y_column'] = self.y_column_var.get()\n options['z_column'] = self.z_column_var.get()\n options['filters'] = self.filter_entry_dict\n options['habitat_image'] = self.habitat_image\n options['x_ratio']=self.x_ratio_entry.get()\n options['y_ratio']=self.y_ratio_entry.get()\n options['z_ratio']=self.z_ratio_entry.get()\n options['sheet_name']=self.sheet_name_var.get()\n\n return options", "def processCmdlineOpts(cmdOpts):\n global opts\n opts = {}\n for i in range(1,len(cmdOpts)):\n if re.match('-i', cmdOpts[i]):\n opts['i'] = cmdOpts[i+1]\n if i not in opts: \n opts['i']='awn.xml'\n return opts", "def get_combinations(self):\n all_steps = self.do_steps()\n self.option = [k for k, v in all_steps.items()]\n result = itertools.product(*(v for k, v in all_steps.items()))\n return result", "def _get_options(ret):\n attrs = {\"host\": \"host\", \"port\": \"port\", \"skip\": \"skip_on_error\", \"mode\": \"mode\"}\n\n _options = salt.returners.get_returner_options(\n __virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__\n )\n return _options", "def get_all_options(self): \n return self._options.items()", "def extract_kwargs_from_options(options):\n return modulation_utils.extract_kwargs_from_options(gfsk_mod.__init__,\n ('self',), options)\n extract_kwargs_from_options=staticmethod(extract_kwargs_from_options)", "def getOptionsDict(self, sectionPath):\n\n opts = self.getOptions(sectionPath)\n pathDict = dict( [ ( o, self.getValue( \"%s/%s\" % ( sectionPath, o\n ) ) ) for o in opts ] )\n return pathDict", "def sorted_options(sort_options):\n return [\n {\n \"title\": v[\"title\"],\n \"value\": (\n \"-{0}\".format(k)\n if v.get(\"default_order\", \"asc\") == \"desc\"\n else k\n ),\n }\n for k, v in sorted(\n sort_options.items(), key=lambda x: x[1].get(\"order\", 0)\n )\n ]", "def sorted_options(sort_options):\n return [\n dict(\n title=v['title'],\n value=('-{0}'.format(k)\n if v.get('default_order', 'asc') == 'desc' else k),\n )\n for k, v in\n sorted(sort_options.items(), key=lambda x: x[1].get('order', 0))\n ]", "def getclsoptions(cls, tmpcls, session=None):\n if session is not None:\n cls.setclsoptions(tmpcls, session)\n return optionsdict[tmpcls]['OPTIONS']", "def options(self, section):\n try:\n return list(self._dict[section])\n except KeyError as e:\n raise NoSectionError(str(e)) from None", "def find_opts_linux(soup, header):\n\n # Get the source line of the header\n header_el = soup.find(id=header)\n if header_el is None:\n return set()\n header_source_line = soup.find(id=header).sourceline\n\n # Get the element where the options are described\n opts_el = [pre for pre in soup.find_all('pre') if pre.sourceline == header_source_line][0]\n\n opts_lines = opts_el.text.split('\\n')\n opts_lines = [line.lstrip().split(maxsplit=1)[0] for line in opts_lines if line]\n opts = [line for line in opts_lines if line[0] == '-' and line != '-']\n\n # Remove false positives\n opts = {o for o in opts if not o[-1] in NON_OPTS_CHARS}\n\n return opts", "def get_option_chains(\n self, symbol: str, expiration: date, greeks: bool = False\n ) -> List[Quote]:\n url = \"/v1/markets/options/chains\"\n params = {\n \"symbol\": symbol,\n \"expiration\": expiration,\n \"greeks\": greeks,\n }\n\n data = self.get(url, params)\n res = MarketsAPIResponse(**data)\n return res.options.option", "def _get_run_options(self, cmdp, exec_engine=None):\n cmdp.declare_options(\"-v= -e= -w= -u= -p= -i -t -a -P\")\n cmd_options = {\n \"netcoop\": {\n \"fl\": (\"-P\", \"--publish-all\", \"--netcoop\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"portsmap\": {\n \"fl\": (\"-p=\", \"--publish=\",), \"act\": \"E\",\n \"p2\": \"CMD_OPT\", \"p3\": True\n },\n \"novol\": {\n \"fl\": (\"--novol=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": True\n },\n \"vol\": {\n \"fl\": (\"-v=\", \"--volume=\",), \"act\": \"E\",\n \"p2\": \"CMD_OPT\", \"p3\": True\n },\n \"env\": {\n \"fl\": (\"-e=\", \"--env=\",), \"act\": \"E\",\n \"p2\": \"CMD_OPT\", \"p3\": True\n },\n \"envfile\": {\n \"fl\": (\"--env-file=\",), \"act\": 'E',\n \"p2\": \"CMD_OPT\", \"p3\": True\n },\n \"user\": {\n \"fl\": (\"-u=\", \"--user=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"cwd\": {\n \"fl\": (\"-w=\", \"--workdir=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"entryp\": {\n \"fl\": (\"--entrypoint=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"cpuset\": {\n \"fl\": (\"--cpuset-cpus=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"hostauth\": {\n \"fl\": (\"--hostauth\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"containerauth\": {\n \"fl\": (\"--containerauth\",), \"act\": 'R',\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"nosysdirs\": {\n \"fl\": (\"--nosysdirs\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"hostenv\": {\n \"fl\": (\"--hostenv\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"bindhome\": {\n \"fl\": (\"--bindhome\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"nometa\": {\n \"fl\": (\"--nometa\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"dri\": {\n \"fl\": (\"--dri\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"cmd\": {\n \"fl\": (\"P+\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"volfrom\": {\n \"fl\": (\"--volumes-from=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"dns\": {\n \"fl\": (\"--dns=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"dnssearch\": {\n \"fl\": (\"--dns-search=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"kernel\": {\n \"fl\": (\"--kernel=\",), \"act\": \"R\",\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"devices\": {\n \"fl\": (\"--device=\",), \"act\": \"E\",\n \"p2\": \"CMD_OPT\", \"p3\": True\n },\n \"nobanner\": {\n \"fl\": (\"--nobanner\",), \"act\": 'R',\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"platform\": {\n \"fl\": (\"--platform=\",), \"act\": 'R',\n \"p2\": \"CMD_OPT\", \"p3\": False\n },\n \"pull\": {\n \"fl\": (\"--pull=\"), \"act\": 'R',\n \"p2\": \"CMD_OPT\", \"p3\": False\n }\n }\n for option, cmdp_args in list(cmd_options.items()):\n last_value = None\n for cmdp_fl in cmdp_args[\"fl\"]:\n option_value = cmdp.get(cmdp_fl, cmdp_args[\"p2\"],\n cmdp_args[\"p3\"])\n if not exec_engine:\n continue\n if cmdp_args[\"act\"] == \"R\": # action is replace\n if option_value or last_value is None:\n exec_engine.opt[option] = option_value\n elif cmdp_args[\"act\"] == \"E\": # action is extend\n # if option == \"env\":\n # print (type(option_value))\n # print (option_value)\n exec_engine.opt[option].extend(option_value)\n last_value = option_value", "def generate_option_combos(self):\n available_options = list()\n for option in self.options:\n # generate a list of dicts for every value of the option\n tmp = list()\n for value in option.values:\n tmp.append({option.name: value})\n\n available_options.append(tmp)\n\n # generate a list of tuples for each product option combination\n option_combos = list(itertools.product(*available_options))\n\n return option_combos", "def get_hive_options(hive_label, opts):\n section_header = \"{}:{}\".format(PACKAGE_NAME, hive_label)\n if not opts.get(section_header):\n LOG.warning(\"Unable to find section header: %s\", section_header)\n return None\n\n return opts.get(section_header)", "def _parse_cli_options(func):\n options = []\n for param in inspect.signature(func).parameters.values():\n if param.kind not in {param.POSITIONAL_OR_KEYWORD, param.KEYWORD_ONLY}:\n # Only keyword arguments are currently supported\n continue\n\n option_name = '--' + param.name.lower().replace('_', '-').strip('-')\n kwargs = {}\n if param.annotation in {str, int, float, bool}:\n # Only basic types are currently supported\n kwargs['type'] = param.annotation\n\n if param.default != param.empty:\n kwargs['default'] = param.default\n else:\n # If the param doesn't have a default, then it's required\n kwargs['required'] = True\n\n if param.annotation == bool or isinstance(param.default, bool):\n if param.default is True:\n # If the default of a boolean option is ``True``, then add a\n # ``--no-x` off switch\n option_name += '/--no-' + option_name.lstrip('-')\n else:\n # If the default is ``False``, just make it a basic flag\n kwargs['is_flag'] = True\n\n args = (option_name, param.name)\n\n options.append((args, kwargs))\n\n # Reverse it so the decorators are applied in the correct order\n return options[::-1]", "def parse_request_export_options(params): # noqa C901\n boolean_list = [\"true\", \"false\"]\n options = {}\n remove_group_name = (\n params.get(\"remove_group_name\") and params.get(\"remove_group_name\").lower()\n )\n binary_select_multiples = (\n params.get(\"binary_select_multiples\")\n and params.get(\"binary_select_multiples\").lower()\n )\n do_not_split_select_multiples = params.get(\"do_not_split_select_multiples\")\n include_labels = params.get(\"include_labels\", False)\n include_reviews = params.get(\"include_reviews\", False)\n include_labels_only = params.get(\"include_labels_only\", False)\n include_hxl = params.get(\"include_hxl\", True)\n value_select_multiples = (\n params.get(\"value_select_multiples\")\n and params.get(\"value_select_multiples\").lower()\n )\n show_choice_labels = (\n params.get(\"show_choice_labels\") and params.get(\"show_choice_labels\").lower()\n )\n\n if include_labels is not None:\n options[\"include_labels\"] = str_to_bool(include_labels)\n\n if include_reviews is not None:\n options[\"include_reviews\"] = str_to_bool(include_reviews)\n\n if include_labels_only is not None:\n options[\"include_labels_only\"] = str_to_bool(include_labels_only)\n\n if include_hxl is not None:\n options[\"include_hxl\"] = str_to_bool(include_hxl)\n\n if remove_group_name in boolean_list:\n options[\"remove_group_name\"] = str_to_bool(remove_group_name)\n else:\n options[\"remove_group_name\"] = False\n\n if params.get(\"group_delimiter\") in [\".\", DEFAULT_GROUP_DELIMITER]:\n options[\"group_delimiter\"] = params.get(\"group_delimiter\")\n else:\n options[\"group_delimiter\"] = DEFAULT_GROUP_DELIMITER\n\n options[\"split_select_multiples\"] = not str_to_bool(do_not_split_select_multiples)\n if binary_select_multiples and binary_select_multiples in boolean_list:\n options[\"binary_select_multiples\"] = str_to_bool(binary_select_multiples)\n\n if \"include_images\" in params:\n options[\"include_images\"] = str_to_bool(params.get(\"include_images\"))\n else:\n options[\"include_images\"] = settings.EXPORT_WITH_IMAGE_DEFAULT\n\n options[\"win_excel_utf8\"] = str_to_bool(params.get(\"win_excel_utf8\"))\n\n if value_select_multiples and value_select_multiples in boolean_list:\n options[\"value_select_multiples\"] = str_to_bool(value_select_multiples)\n\n if show_choice_labels and show_choice_labels in boolean_list:\n options[\"show_choice_labels\"] = str_to_bool(show_choice_labels)\n\n index_tags = get_repeat_index_tags(params.get(\"repeat_index_tags\"))\n if index_tags:\n options[\"repeat_index_tags\"] = index_tags\n\n if \"language\" in params:\n options[\"language\"] = params.get(\"language\")\n\n return options", "def _get_options(self):\n return self.options", "def prune_option_list(opts, keys):\n opt_d = opt_to_dict(opts)\n for k in keys:\n if k in opt_d:\n del opt_d[k]\n return [k for item in opt_d.iteritems() for k in item]" ]
[ "0.5892671", "0.5606828", "0.56063414", "0.550141", "0.54399914", "0.5378535", "0.5304472", "0.52757764", "0.52567726", "0.52553415", "0.52336997", "0.5221113", "0.5217911", "0.51816285", "0.5152726", "0.51514137", "0.5142801", "0.51401424", "0.5134124", "0.5131874", "0.51075584", "0.51026165", "0.5091025", "0.50848156", "0.50646865", "0.50575083", "0.5042416", "0.5036603", "0.5033443", "0.5019094", "0.5002927", "0.49837875", "0.49688855", "0.4965133", "0.49391428", "0.49207357", "0.49176574", "0.49135715", "0.4901087", "0.48798376", "0.48619986", "0.48531368", "0.4852496", "0.48470426", "0.48401418", "0.48264793", "0.48263368", "0.4817896", "0.47939062", "0.47843394", "0.47764224", "0.47704437", "0.47700447", "0.4765911", "0.47657135", "0.476458", "0.4763726", "0.4749087", "0.47421238", "0.47380382", "0.47311172", "0.47238514", "0.47045967", "0.46992978", "0.46992815", "0.46989116", "0.469841", "0.4690082", "0.46827012", "0.46764967", "0.46764624", "0.46563277", "0.4653522", "0.4647298", "0.4647298", "0.46459267", "0.46309862", "0.46304506", "0.46210745", "0.46204418", "0.4613769", "0.4605514", "0.46052623", "0.4588375", "0.45867643", "0.45842308", "0.4584145", "0.4580183", "0.45714918", "0.45683426", "0.4561845", "0.4559259", "0.4552258", "0.45518604", "0.45471585", "0.45389208", "0.4527889", "0.45269138", "0.4526026", "0.4522453" ]
0.86038965
0
Looks up options for object, including plot defaults, keyfn determines returned key otherwise None key is used.
def lookup(x): options = cls.lookup_options(x, opt_type) selected = {o: options.options[o] for o in opts if o in options.options} if opt_type == 'plot' and defaults: plot = Store.registry[cls.backend].get(type(x)) selected['defaults'] = {o: getattr(plot, o) for o in opts if o not in selected and hasattr(plot, o)} key = keyfn(x) if keyfn else None return (key, selected)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _traverse_options(cls, obj, opt_type, opts, specs=None, keyfn=None, defaults=True):\n def lookup(x):\n \"\"\"\n Looks up options for object, including plot defaults,\n keyfn determines returned key otherwise None key is used.\n \"\"\"\n options = cls.lookup_options(x, opt_type)\n selected = {o: options.options[o]\n for o in opts if o in options.options}\n if opt_type == 'plot' and defaults:\n plot = Store.registry[cls.backend].get(type(x))\n selected['defaults'] = {o: getattr(plot, o) for o in opts\n if o not in selected and hasattr(plot, o)}\n key = keyfn(x) if keyfn else None\n return (key, selected)\n\n # Traverse object and accumulate options by key\n traversed = obj.traverse(lookup, specs)\n options = defaultdict(lambda: defaultdict(list))\n default_opts = defaultdict(lambda: defaultdict(list)) \n for key, opts in traversed:\n defaults = opts.pop('defaults', {})\n for opt, v in opts.items():\n options[key][opt].append(v)\n for opt, v in defaults.items():\n default_opts[key][opt].append(v)\n\n # Merge defaults into dictionary if not explicitly specified\n for key, opts in default_opts.items():\n for opt, v in opts.items():\n if opt not in options[key]:\n options[key][opt] = v\n return options if keyfn else options[None]", "def get_plot_kwargs(cfg, option, key=None):\n plot_kwargs = cfg.get(option, {}).get('plot_kwargs', {})\n if key is None:\n return plot_kwargs\n if '_xy' in option:\n additional_plot_kwargs = cfg.get('additional_plot_kwargs_xy_plots', {})\n if key in additional_plot_kwargs:\n return {**plot_kwargs, **additional_plot_kwargs[key]}\n subkey = key.split(SEP)[-1]\n if subkey in additional_plot_kwargs:\n return {**plot_kwargs, **additional_plot_kwargs[subkey]}\n return deepcopy(plot_kwargs)", "def _plot_option_logic(plot_options_from_args):\n default_plot_options = copy.deepcopy(DEFAULT_PLOT_OPTIONS)\n file_options = tools.get_config_file()\n session_options = session.get_session_plot_options()\n plot_options_from_args = copy.deepcopy(plot_options_from_args)\n\n # Validate options and fill in defaults w world_readable and sharing\n for option_set in [plot_options_from_args, session_options, file_options]:\n utils.validate_world_readable_and_sharing_settings(option_set)\n utils.set_sharing_and_world_readable(option_set)\n\n user_plot_options = {}\n user_plot_options.update(default_plot_options)\n user_plot_options.update(file_options)\n user_plot_options.update(session_options)\n user_plot_options.update(plot_options_from_args)\n user_plot_options = {\n k: v\n for k, v in user_plot_options.items()\n if k in default_plot_options or k == \"filename\"\n }\n\n return user_plot_options", "def __getitem__(self, key):\n if hasattr(self, key):\n return getattr(self, key)\n else:\n raise KeyError('No such option `{}`.'.format(key))", "def gen_default_plot_options(plot_type, fa_label, fn_label,\r\n plot_title=None):\r\n\r\n plot_opts = OrderedDict([\r\n ('title', \"Performance\" if plot_title is None else plot_title),\r\n ('suptitle', ''),\r\n ('figsize', (8, 6)),\r\n ('title_fontsize', 13),\r\n ('suptitle_fontsize', 11),\r\n ('xlim', [0, 1]),\r\n ('ylim', [0, 1]),\r\n ('xticks_label_size', 'medium'),\r\n ('yticks_label_size', 'medium'),\r\n ('xlabel', \"False Alarm Rate [%]\"),\r\n ('xlabel_fontsize', 11),\r\n ('ylabel_fontsize', 11)])\r\n\r\n if plot_type.lower() == \"det\" or plot_type.lower() == \"detpmthr\":\r\n if plot_type.lower() == \"detpmthr\": ### X-axis is the threshold\r\n plot_opts[\"xscale\"] = \"linear\"\r\n plot_opts[\"xlabel\"] = \"PresenceConf Value\"\r\n plot_opts[\"xticks\"] = [0.0, 0.2, 0.4, 0.6, 0.8, 1]\r\n plot_opts[\"xticks_labels\"] = [\r\n \"0.0\", \"0.2\", \"0.4\", \"0.6\", \"0.8\", \"1.0\"]\r\n \r\n elif (fa_label == \"TFA\"):\r\n plot_opts[\"xscale\"] = \"log\"\r\n plot_opts[\"xlabel\"] = \"Time-based False Alarm\"\r\n plot_opts[\"xticks\"] = [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1]\r\n plot_opts[\"xticks_labels\"] = [\r\n \"0.01\", \"0.02\", \"0.05\", \"0.1\", \"0.2\", \"0.5\", \"1.0\"]\r\n elif (fa_label == \"RFA\"):\r\n plot_opts[\"xscale\"] = \"log\"\r\n plot_opts[\"xlabel\"] = \"Rate of False Alarms (#FAs/minute)\"\r\n plot_opts[\"xticks\"] = [\r\n 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10]\r\n plot_opts[\"xticks_labels\"] = [\r\n \"0.01\", \"0.02\", \"0.05\", \"0.1\", \"0.2\", \"0.5\", \"1.0\", \"2.0\",\r\n \"5.0\", \"10.0\"]\r\n else:\r\n plot_opts[\"xscale\"] = \"log\"\r\n plot_opts[\"xlabel\"] = \"Prob. of False Alarm\"\r\n plot_opts[\"xticks\"] = [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1]\r\n plot_opts[\"xticks_labels\"] = [\r\n \"0.01\", \"0.02\", \"0.05\", \"0.1\", \"0.2\", \"0.5\", \"1.0\"]\r\n\r\n # Default\r\n plot_opts[\"xlim\"] = (plot_opts[\"xticks\"][0],\r\n plot_opts[\"xticks\"][-1])\r\n plot_opts[\"ylabel\"] = \"Prob. of Miss Detection\"\r\n\r\n plot_opts[\"yticks\"] = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6,\r\n 0.7, 0.8, 0.9, 1.0]\r\n plot_opts[\"yticks_labels\"] = [\r\n '0', '0.1', '0.2', '0.3', '0.4', '0.5', '0.6', '0.7',\r\n '0.8', '0.9', '1.0']\r\n plot_opts[\"ylim\"] = (plot_opts[\"yticks\"][0],\r\n plot_opts[\"yticks\"][-1])\r\n \r\n elif plot_type.lower() == \"roc\":\r\n plot_opts[\"xscale\"] = \"linear\"\r\n plot_opts[\"ylabel\"] = \"Correct Detection Rate [%]\"\r\n plot_opts[\"xticks\"] = [\r\n 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]\r\n plot_opts[\"yticks\"] = [\r\n 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]\r\n plot_opts[\"yticks_labels\"] = ['0', '10', '20', '30', '40', '50',\r\n '60', '70', '80', '90', '100']\r\n plot_opts[\"xticks_labels\"] = ['0', '10', '20', '30', '40', '50',\r\n '60', '70', '80', '90', '100']\r\n\r\n return plot_opts", "def plot_options(cls, obj, percent_size):\n raise NotImplementedError", "def __getitem__(self, option):\n if option not in self.__dict__.keys():\n raise KeyError(\"Option '{}' not found.\".format(option))\n\n return self.__dict__[option]", "def opt(self, key, default=False):\n if key not in self.options:\n return default\n return self.options.get(key)", "def get_options(self, key):\n if key in self.options.get_option_names():\n return self.options\n\n try:\n scope, scoped_key = key.split('.')\n except ValueError:\n return None\n\n if scope == 'input' and scoped_key in self.input.options.get_option_names():\n return self.input.options\n elif scope == 'output' and scoped_key in self.output.options.get_option_names():\n return self.output.options\n elif scope == 'exploit' and scoped_key in self.exploit.options.get_option_names():\n return self.exploit.options\n else:\n return None", "def get_options():\n user_options = {}\n user_options['surface'] = {'label': 'Surface',\n 'type': 'stringList',\n 'default': 'bcc100',\n 'values': surface_selections}\n\n user_options['metal'] = {'label': 'Metal',\n 'type': 'string',\n 'default': 'Au'}\n\n user_options['a'] = {'label': 'Lattice Constant',\n 'type': 'float',\n 'precision': 3,\n 'suffix': 'Å'}\n\n user_options['size-x'] = {'label': 'Size X',\n 'type': 'integer',\n 'default': 5}\n\n user_options['size-y'] = {'label': 'Size Y',\n 'type': 'integer',\n 'default': 5}\n\n user_options['size-z'] = {'label': 'Size Z',\n 'type': 'integer',\n 'default': 3}\n\n user_options['vacuum'] = {'label': 'Vacuum distance',\n 'type': 'float',\n 'precision': 1,\n 'suffix': 'Å'}\n\n user_options['orthogonal'] = {'label': 'Orthogonal',\n 'type': 'stringList',\n 'default': 'True',\n 'values': ['True', 'False']}\n\n return {'userOptions': user_options }", "def _default_options(cls) -> Options:\n options = super()._default_options()\n\n options.curve_fitter = multi_curve_fit\n options.data_processor = None\n options.normalization = False\n options.x_key = \"xval\"\n options.plot = True\n options.axis = None\n options.xlabel = None\n options.ylabel = None\n options.xlim = None\n options.ylim = None\n options.xval_unit = None\n options.yval_unit = None\n options.result_parameters = None\n options.return_data_points = False\n options.curve_plotter = \"mpl_single_canvas\"\n options.style = PlotterStyle()\n\n # automatically populate initial guess and boundary\n fit_params = cls._fit_params()\n options.p0 = {par_name: None for par_name in fit_params}\n options.bounds = {par_name: None for par_name in fit_params}\n\n return options", "def get_option_key(self, iprop, ioption):\n key = _pychidg.f90wrap_get_option_key(self=self._handle, iprop=iprop, \\\n ioption=ioption)\n return key", "def _get_options(self) -> Dict[str, Any]:\n # TODO: handle holidays as well\n return {\n \"growth\": self.growth,\n \"changepoints\": self.changepoints and list(self.changepoints.astype('str')),\n \"n_changepoints\": self.n_changepoints,\n \"changepoint_range\": self.changepoint_range,\n \"changepoint_prior_scale\": self.changepoint_prior_scale,\n \"mcmc_samples\": self.mcmc_samples,\n \"interval_width\": self.interval_width,\n \"uncertainty_samples\": self.uncertainty_samples,\n \"yearly_seasonality\": self.yearly_seasonality,\n \"weekly_seasonality\": self.weekly_seasonality,\n \"daily_seasonality\": self.daily_seasonality,\n \"seasonality_mode\": self.seasonality_mode,\n \"seasonality_prior_scale\": self.seasonality_prior_scale,\n\n \"seasonalities\": self.seasonalities,\n \"extra_regressors\": self.extra_regressors\n }", "def _get_option(self, arg_name: str) -> Any:\n try:\n return getattr(self, f\"__{arg_name}\")\n except AttributeError as ex:\n raise AnalysisError(\n f\"The argument {arg_name} is selected but not defined. \"\n \"This key-value pair should be defined in the analysis option.\"\n ) from ex", "def get_option(self, key):\n return self.options[key]", "def get(self, **kws):\n assert len (kws)==1,`kws`\n key, default = kws.items()[0]\n if key not in self.__dict__:\n if VERBOSE:\n print 'Options.get: adding new option: %s=%r' % (key, default)\n self.__dict__[key] = default\n value = self.__dict__[key]\n if value is None:\n value = self.__dict__[key] = default\n return value", "def get_opt(self, widget):\r\n opt = dict()\r\n opt[\"state\"] = widget[\"state\"]\r\n opt[\"fg\"] = widget[\"fg\"]\r\n opt[\"bg\"] = widget[\"bg\"]\r\n return opt", "def get_option_descriptor(self, key):\n return self._options.get(key)", "def translate_options(cls, options):\n used_options = cls.default_options()\n if options is None:\n options = {}\n for key, val in options.items():\n if key not in used_options:\n raise KeyError(f\"Cannot handle key {key}.\")\n used_options[key] = val\n return used_options", "def __getattr__(self, name):\n if name in PLOTTER_NAMES:\n self._plotter = name\n return self\n if name in self.opts:\n return self.opts[name]\n raise AttributeError(sub(\n \"No plotting option or attribute '{}'\", name))", "def get_options(self, panel=\"\"):\n return dict()", "def _generate_options(self, **kwargs: Any) -> dict:\n raise NotImplementedError", "def create_options(self, saving=False):\n self.get_filters(saving)\n\n options = {}\n if(self.calibration_points):\n options['begin_calibration_index'] = self.begin_ind_calibration_entry.get()\n options['end_calibration_index'] = self.end_ind_calibration_entry.get()\n options['known_distance'] = self.known_distance_entry.get()\n else:\n options['begin_calibration_index']=''\n options['end_calibration_index']=''\n options['known_distance']=''\n options['unit_type'] = (re.sub(r'[^A-Za-z0-9_]', '', self.unit_type_entry.get())).capitalize()\n options['begin_index'] = self.begin_ind_entry.get()\n options['end_index'] = self.end_ind_entry.get()\n options['names_list'] = self.names_list_entry.get()\n options['name_column'] = self.name_column_var.get()\n options['x_column'] = self.x_column_var.get()\n options['y_column'] = self.y_column_var.get()\n options['z_column'] = self.z_column_var.get()\n options['filters'] = self.filter_entry_dict\n options['habitat_image'] = self.habitat_image\n options['x_ratio']=self.x_ratio_entry.get()\n options['y_ratio']=self.y_ratio_entry.get()\n options['z_ratio']=self.z_ratio_entry.get()\n options['sheet_name']=self.sheet_name_var.get()\n\n return options", "def _default_options(cls):\n default_options = super()._default_options()\n default_options.data_processor = dp.DataProcessor(\n input_key=\"counts\",\n data_actions=[dp.Probability(\"1\"), dp.BasisExpectationValue()],\n )\n default_options.plotter.set_figure_options(\n xlabel=\"Flat top width\",\n ylabel=\"Pauli expectation values\",\n xval_unit=\"s\",\n ylim=(-1, 1),\n )\n default_options.data_subfit_map = {\n \"x\": {\"meas_basis\": \"x\"},\n \"y\": {\"meas_basis\": \"y\"},\n \"z\": {\"meas_basis\": \"z\"},\n }\n\n return default_options", "def do_opt(self, *_args, **kwargs):\n args = list(_args)\n if not args:\n largest = 0\n keys = [key for key in self.conf if not key.startswith(\"_\")]\n for key in keys:\n largest = max(largest, len(key))\n for key in keys:\n print(\"%s : %s\" % (key.rjust(largest), self.conf[key]))\n return\n option = args.pop(0)\n if not args and not kwargs:\n method = getattr(self, \"getopt_\" + option, None)\n if method is None:\n self.getopt_default(option)\n else:\n method()\n else:\n method = getattr(self, \"opt_\" + option, None)\n if method is None:\n print(\"Unrecognized option %r\" % option)\n else:\n method(*args, **kwargs)\n self.save_config()", "def prompt_options_dict(options=None,\n by_descr=True,\n prompt=\"Select from the following options\"):\n if 'Bullet' not in globals():\n raise RuntimeError(\"[-] can't use Bullet on Windows\")\n if options is None:\n raise RuntimeError('[-] no options specified')\n if not isinstance(options[0], dict):\n raise RuntimeError('[-] options is not a list of dictionaries')\n choices = ['<CANCEL>'] + [\n opt['descr']\n if by_descr\n else opt['ident']\n for opt in options\n ]\n cli = Bullet(prompt=f'\\n{prompt}',\n choices=choices,\n indent=0,\n align=2,\n margin=1,\n shift=0,\n bullet=\"→\",\n pad_right=5)\n choice = cli.launch()\n if choice == \"<CANCEL>\":\n logger.info('[-] cancelled selection of choice')\n return None\n selected = find(options,\n 'descr' if by_descr else 'ident',\n choice)\n try:\n return options[selected]['ident']\n except Exception as exc: # noqa\n return None", "def option(self, key):\n if self.integration is None:\n return None\n return self.configuration.get(f'{self.get_config_name()}.{key}')", "def get_option(self, key, default=None):\n current_profile = \"profiles.{}.{}\".format(self.get_profile(), key)\n global_profile = \"profiles.global.{}\".format(key)\n return self.__get_option__(current_profile, self.__get_option__(global_profile, default))", "def get(self, key):\n try:\n if key == key.upper():\n return self.config[key]\n return self.options[key]\n except KeyError:\n return None", "def options(name, option=None, value=None, opt_dict=None):\n\n if isinstance(name, int):\n name = list(pytplot.data_quants.keys())[name]\n\n if opt_dict is None:\n opt_dict = {option: value}\n else:\n if not isinstance(opt_dict,dict):\n print(\"dict must be a dictionary object. Returning.\")\n return\n\n if not isinstance(name, list):\n name = [name]\n\n for i in name:\n\n for option, value in opt_dict.items():\n\n # Lower case option for consistency\n option = option.lower()\n\n if i not in pytplot.data_quants.keys():\n print(str(i) + \" is currently not in pytplot.\")\n return\n\n if option == 'color':\n if isinstance(value, list):\n pytplot.data_quants[i].attrs['plot_options']['extras']['line_color'] = value\n else:\n pytplot.data_quants[i].attrs['plot_options']['extras']['line_color'] = [value]\n\n if option == 'link':\n if isinstance(value, list):\n pytplot.link(i, value[1], value[0])\n\n if option == 'colormap':\n if isinstance(value, list):\n pytplot.data_quants[i].attrs['plot_options']['extras']['colormap'] = value\n else:\n pytplot.data_quants[i].attrs['plot_options']['extras']['colormap'] = [value]\n\n if option == 'spec':\n _reset_plots(i)\n if value:\n if 'spec_bins' not in pytplot.data_quants[i].coords:\n print(f\"{i} does not contain coordinates for spectrogram plotting. Continuing...\")\n continue\n else:\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec'] = value\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['y_range'] = utilities.get_y_range(pytplot.data_quants[i])\n\n else:\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec'] = value\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['y_range'] = utilities.get_y_range(pytplot.data_quants[i])\n\n # Set the default dimension to plot by. All others will be summed over.\n if 'spec_dim_to_plot' not in pytplot.data_quants[i].attrs['plot_options']['extras']:\n if 'v' in pytplot.data_quants[i].coords:\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec_dim_to_plot'] = 'v'\n elif 'v2' in pytplot.data_quants[i].coords:\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec_dim_to_plot'] = 'v2'\n else:\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec_dim_to_plot'] = 'v1'\n\n if option == 'alt':\n _reset_plots(i)\n pytplot.data_quants[i].attrs['plot_options']['extras']['alt'] = value\n\n if option == 'map':\n _reset_plots(i)\n pytplot.data_quants[i].attrs['plot_options']['extras']['map'] = value\n\n if option == 'legend_names':\n if isinstance(value, list):\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['legend_names'] = value\n else:\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['legend_names'] = [value]\n\n if option == 'xlog_slice':\n if value:\n pytplot.data_quants[i].attrs['plot_options']['slice_xaxis_opt']['xi_axis_type'] = 'log'\n else:\n pytplot.data_quants[i].attrs['plot_options']['slice_xaxis_opt']['xi_axis_type'] = 'linear'\n\n if option == 'ylog':\n negflag = 0 # _ylog_check(data_quants, value, i)\n if negflag == 0 and value:\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['y_axis_type'] = 'log'\n else:\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['y_axis_type'] = 'linear'\n\n if option == 'ylog_slice':\n if value:\n pytplot.data_quants[i].attrs['plot_options']['slice_yaxis_opt']['yi_axis_type'] = 'log'\n else:\n pytplot.data_quants[i].attrs['plot_options']['slice_yaxis_opt']['yi_axis_type'] = 'linear'\n\n if option == 'zlog':\n # check for negative values and warn the user that they will be ignored\n negflag = _zlog_check(pytplot.data_quants, value, i)\n if negflag != 0 and value:\n print(str(i) + ' contains negative values; setting the z-axis to log scale will cause the negative values to be ignored on figures.')\n\n if value:\n pytplot.data_quants[i].attrs['plot_options']['zaxis_opt']['z_axis_type'] = 'log'\n else:\n pytplot.data_quants[i].attrs['plot_options']['zaxis_opt']['z_axis_type'] = 'linear'\n\n if option == 'nodata':\n pytplot.data_quants[i].attrs['plot_options']['line_opt']['visible'] = value\n\n if option == 'line_style':\n if value == 0 or value == 'solid_line':\n to_be = []\n elif value == 1 or value == 'dot':\n to_be = [2, 4]\n elif value == 2 or value == 'dash':\n to_be = [6]\n elif value == 3 or value == 'dash_dot':\n to_be = [6, 4, 2, 4]\n elif value == 4 or value == 'dash_dot_dot_dot':\n to_be = [6, 4, 2, 4, 2, 4, 2, 4]\n elif value == 5 or value == 'long_dash':\n to_be = [10]\n else:\n to_be=value\n\n pytplot.data_quants[i].attrs['plot_options']['line_opt']['line_style'] = to_be\n\n if(value == 6 or value == 'none'):\n pytplot.data_quants[i].attrs['plot_options']['line_opt']['visible'] = False\n\n if option == 'char_size':\n pytplot.data_quants[i].attrs['plot_options']['extras']['char_size'] = value\n\n if option == 'name':\n pytplot.data_quants[i].attrs['plot_options']['line_opt']['name'] = value\n\n if option == \"panel_size\":\n if value > 1 or value <= 0:\n print(\"Invalid value. Should be (0, 1]\")\n return\n pytplot.data_quants[i].attrs['plot_options']['extras']['panel_size'] = value\n\n if option == 'basemap':\n pytplot.data_quants[i].attrs['plot_options']['extras']['basemap'] = value\n\n if option == 'alpha':\n if value > 1 or value < 0:\n print(\"Invalid value. Should be [0, 1]\")\n return\n pytplot.data_quants[i].attrs['plot_options']['extras']['alpha'] = value\n\n if option == 'thick':\n pytplot.data_quants[i].attrs['plot_options']['line_opt']['line_width'] = value\n\n if option == 'yrange' or option == 'y_range':\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['y_range'] = [value[0], value[1]]\n\n if option == 'zrange' or option == 'z_range':\n pytplot.data_quants[i].attrs['plot_options']['zaxis_opt']['z_range'] = [value[0], value[1]]\n\n if option == 'xrange_slice':\n pytplot.data_quants[i].attrs['plot_options']['slice_xaxis_opt']['xi_range'] = [value[0], value[1]]\n\n if option == 'yrange_slice':\n pytplot.data_quants[i].attrs['plot_options']['slice_yaxis_opt']['yi_range'] = [value[0], value[1]]\n\n if option == 'xtitle':\n pytplot.data_quants[i].attrs['plot_options']['xaxis_opt']['axis_label'] = value\n\n if option == 'ytitle':\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['axis_label'] = value\n\n if option == 'ztitle':\n pytplot.data_quants[i].attrs['plot_options']['zaxis_opt']['axis_label'] = value\n\n if option == 'xsubtitle':\n pytplot.data_quants[i].attrs['plot_options']['xaxis_opt']['axis_subtitle'] = value\n\n if option == 'ysubtitle':\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['axis_subtitle'] = value\n\n if option == 'zsubtitle':\n pytplot.data_quants[i].attrs['plot_options']['zaxis_opt']['axis_subtitle'] = value\n\n if option == 'ybar':\n pytplot.data_quants[i].attrs['plot_options']['extras']['ybar'] = value\n\n if option == 'ybar_color':\n pytplot.data_quants[i].attrs['plot_options']['extras']['ybar'] = value\n\n if option == 'ybar_size':\n pytplot.data_quants[i].attrs['plot_options']['extras']['ysize'] = value\n\n if option == 'plotter':\n _reset_plots(i)\n pytplot.data_quants[i].attrs['plot_options']['extras']['plotter'] = value\n\n if option == 'crosshair_x':\n pytplot.data_quants[i].attrs['plot_options']['xaxis_opt']['crosshair'] = value\n\n if option == 'crosshair_y':\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['crosshair'] = value\n\n if option == 'crosshair_z':\n pytplot.data_quants[i].attrs['plot_options']['zaxis_opt']['crosshair'] = value\n\n if option == 'static':\n pytplot.data_quants[i].attrs['plot_options']['extras']['static'] = value\n\n if option == 'static_tavg':\n pytplot.data_quants[i].attrs['plot_options']['extras']['static_tavg'] = [value[0], value[1]]\n\n if option == 't_average':\n pytplot.data_quants[i].attrs['plot_options']['extras']['t_average'] = value\n\n if option == 'spec_dim_to_plot' or option == 'spec_plot_dim':\n if len(pytplot.data_quants[i].values.shape) <= 2:\n print(f\"Must have more than 2 coordinate dimensions to set spec_coord_to_plot for {pytplot.data_quants[i].name}\")\n continue\n\n # Set the 'spec_dim_to_plot' value to either 'v' or 'v1', 'v2', 'v3', etc.\n if isinstance(value, int):\n coord_to_plot = \"v\" + str(value)\n if coord_to_plot not in pytplot.data_quants[i].coords:\n if value == 1:\n coord_to_plot = \"v\"\n if coord_to_plot not in pytplot.data_quants[i].coords:\n print(f\"Dimension {value} not found in {pytplot.data_quants[i].name}\")\n continue\n else:\n print(f\"Dimension {value} not found in {pytplot.data_quants[i].name}\")\n continue\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec_dim_to_plot'] = coord_to_plot\n elif isinstance(value, str):\n coord_to_plot = value\n if coord_to_plot not in pytplot.data_quants[i].coords:\n print(f\"Dimension {value} not found in {pytplot.data_quants[i].name}\")\n continue\n else:\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec_dim_to_plot'] = value\n\n # If we're plotting against different coordinates, we need to change what we consider the \"spec_bins\"\n pytplot.data_quants[i].coords['spec_bins'] = pytplot.data_quants[i].coords[coord_to_plot]\n pytplot.data_quants[i].attrs['plot_options']['yaxis_opt']['y_range'] = utilities.get_y_range(pytplot.data_quants[i])\n\n if option == 'spec_slices_to_use':\n if not isinstance(value, dict):\n print(\"Must be a dictionary object in the format {'v2':15, 'v3':7}\")\n return\n else:\n for coord in value:\n if coord not in pytplot.data_quants[i].coords:\n print(f\"Dimension {coord} not found in {pytplot.data_quants[i].name}\")\n continue\n\n pytplot.data_quants[i].attrs['plot_options']['extras']['spec_slices_to_use'] = value\n\n if option == 'border':\n pytplot.data_quants[i].attrs['plot_options']['extras']['border'] = value\n\n if option == 'var_label_ticks':\n pytplot.data_quants[i].attrs['plot_options']['var_label_ticks'] = value\n\n\n return", "def __getitem__(self, item):\n if item not in self._moptions:\n raise KeyError(\"Invalid option '%s'.\" % item)\n return self._runopts.get(item)", "def extract_kwargs_from_options(options):\n return modulation_utils.extract_kwargs_from_options(dqpsk_mod.__init__,\n ('self',), options)", "def _get_options(self, struct, field):\n return struct.DESCRIPTOR.fields_by_name[field].GetOptions() if hasattr(struct, \"DESCRIPTOR\") else None", "def test_kpoints_params_auto_file_object(kpoints_parser_auto_file_object):\n\n kpoints = kpoints_parser_auto_file_object.get_dict()\n assert kpoints['mode'] == 'automatic'\n assert kpoints['comment'] == 'Example file'\n assert kpoints['divisions'] == [4, 4, 4]\n assert kpoints['shifts'] == None\n assert kpoints['points'] == None\n assert kpoints['centering'] == 'Gamma'\n assert kpoints['tetra'] == None\n assert kpoints['tetra_volume'] == None\n assert kpoints['num_kpoints'] == 0", "def GetKey(self, obj, keyName):\n\n key = (self._configKey is None and [\"Persistence_Options\"] or [self._configKey])[0]\n\n key += CONFIG_PATH_SEPARATOR + obj.GetKind()\n key += CONFIG_PATH_SEPARATOR + obj.GetName()\n key += CONFIG_PATH_SEPARATOR + keyName\n\n return key", "def common_options(func):\n\n def parse_preset(ctx, param, value):\n return PRESETS.get(value, (None, None))\n\n def parse_private(ctx, param, value):\n return hex_from_b64(value) if value else None\n\n func = click.option('--private', default=None, help='Private.', callback=parse_private)(func)\n\n func = click.option(\n '--preset',\n default=None, help='Preset ID defining prime and generator pair.',\n type=click.Choice(PRESETS.keys()), callback=parse_preset\n )(func)\n\n return func", "def _options(self):\n return", "def _experiment_options(self, index: int = -1) -> Dict[str, Any]:\n try:\n return self.__experiment_metadata[\"job_metadata\"][index][\"experiment_options\"]\n except (TypeError, KeyError, IndexError):\n # Ignore experiment metadata or job metadata is not set or key is not found\n return None", "def my(d,k):\n try:\n return d[k]\n except KeyError:\n return CONFIG_DEFAULTS[k]", "def get_default(section, option=\"\"):\n\tif not option:\n\t\tif defaults.has_key(section):\n\t\t\treturn defaults[section]\n\telse:\n\t\tif defaults.has_key(section):\n\t\t\tif defaults[section].has_key(option):\n\t\t\t\treturn defaults[section][option]\n\treturn None", "def get_default_options():\n out = _SFrame({'name': ['method', 'feature_model', 'verbose'],\n 'default_value' : ['lsh', 'auto', 'True'],\n 'lower_bound': [None, None, 0],\n 'upper_bound': [None, None, 1],\n 'description': ['Method for searching reference data',\n 'Trained model for extracting features from raw data objects',\n 'Whether progress output is printed'],\n 'parameter_type': ['string', 'model', 'boolean']})\n\n return out", "def underride(d, **options):\n if d is None:\n d = {}\n\n for key, val in options.items():\n d.setdefault(key, val)\n\n return d", "def fittingOptionGetter(self) -> Optional[FittingOptions]:\n if DEBUG:\n print(\"GUI...: \", 'getter in gui called')\n # get the current model selected\n model = self.getCurrentModel()\n if model is None:\n return None\n # get the parameters for current model\n parameters = lmParameters()\n for i in range(self.param_table.rowCount()):\n table_item = self.param_table.verticalHeaderItem(i)\n assert isinstance(table_item, QtWidgets.QTableWidgetItem)\n param_name = table_item.text()\n param = lmParameter(param_name)\n item0 = self.param_table.cellWidget(i, 0)\n item1 = self.param_table.cellWidget(i, 1)\n item2 = self.param_table.cellWidget(i, 2)\n item3 = self.param_table.cellWidget(i, 3)\n assert isinstance(item0, QtWidgets.QCheckBox)\n assert isinstance(item1, OptionSpinbox)\n assert isinstance(item2, NumberInput)\n assert isinstance(item3, NumberInput)\n param.vary = not item0.isChecked()\n param.value = item1.value()\n param.min = item2.value()\n param.max = item3.value()\n parameters[param_name] = param\n\n fitting_options = FittingOptions(model, parameters, self.dry_run)\n if DEBUG:\n print(\"GUI...: \", 'getter in gui got', fitting_options)\n return fitting_options", "def _widget_cget(self, option, **kwargs):\n cook_b = kwargs.get(\"cook\", True)\n default_b = kwargs.get(default_s, False)\n if default_b:\n value = self.widget.config(option)[-2]\n return value\n try: # if True: #\n value = self.widget.cget(option)\n except tk.TclError: # else: #\n value = None\n if cook_b:\n # <border object: >\n if option in (\n activebackground_s,\n background_s,\n font_s,\n highlightbackground_s,\n ):\n value = str(value)\n # <color object: >\n elif option in (\n activeforeground_s,\n disabledforeground_s,\n foreground_s,\n highlightcolor_s,\n ):\n value = str(value)\n # <font object: >\n elif option in (font_s,):\n value = str(value)\n # <index object: >\n elif option in (\n anchor_s,\n compound_s,\n default_s,\n justify_s,\n relief_s,\n state_s,\n ):\n value = str(value)\n # <pixel object: >\n elif option in (\n borderwidth_s,\n highlightthickness_s,\n padx_s,\n pady_s,\n wraplength_s,\n ):\n value = str(value)\n # int\n elif option in (\n height_s,\n repeatinterval_s,\n repeatdelay_s,\n underline_s,\n width_s,\n ):\n value = int(value)\n # str\n elif option in (\n bitmap_s,\n command_s,\n cursor_s,\n image_s,\n overrelief_s,\n takefocus_s,\n text_s,\n textvariable_s,\n ):\n if option == command_s:\n if self.command:\n value = self.command\n elif \"<lambda>\" not in value:\n value = eval(value.lstrip(string.digits))\n else:\n value = \"<lambda>\" # NotImplemented #\n elif option == textvariable_s:\n if self.textvariable:\n value = self.textvariable\n elif value:\n try:\n value = eval(str(value))\n except NameError:\n pass\n else:\n value = str(value)\n else:\n pass\n return value", "def get_defaults(cls, mode):\n mode_defaults = getattr(cls, f\"{mode.upper()}_DEFAULTS\")\n defaults = PlotConfig({**cls.COMMON_DEFAULTS, **mode_defaults})\n return defaults", "def get_defaults(cls, mode):\n mode_defaults = getattr(cls, f\"{mode.upper()}_DEFAULTS\")\n defaults = PlotConfig({**cls.COMMON_DEFAULTS, **mode_defaults})\n return defaults", "def __init__(self, s=None, unchecked=False):\r\n # if not Options.defaults: # this is different from self.defaults!!!\r\n # Options.defaults = fmin([],[])\r\n if s is None:\r\n super(Options, self).__init__(Options.defaults())\r\n # self = Options.defaults()\r\n elif type(s) is str:\r\n super(Options, self).__init__(Options().match(s))\r\n # we could return here\r\n else:\r\n super(Options, self).__init__(s)\r\n\r\n if not unchecked:\r\n for key in list(self.keys()):\r\n if key not in Options.defaults():\r\n print('Warning in cma.Options.__init__(): invalid key ``' + str(key) + '`` popped')\r\n self.pop(key)\r\n # self.evaluated = False # would become an option entry\r", "def get_hive_options(hive_label, opts):\n section_header = \"{}:{}\".format(PACKAGE_NAME, hive_label)\n if not opts.get(section_header):\n LOG.warning(\"Unable to find section header: %s\", section_header)\n return None\n\n return opts.get(section_header)", "def get_options(cls, mode):\n return dict(\n (key, properties[mode])\n for key, properties in cls.__register.items()\n if mode in properties\n )", "def fmt_option_key(key, value):\n if value is None:\n return \"\"\n return f\"{key}={value}\"", "def _parse_options(options):\n opts = dict()\n for attr in dir(options):\n if attr.startswith(\"__\"):\n continue\n opts[attr] = getattr(options, attr)\n return opts", "def validate_plot_options(plot_options):\n\n class PlotOptionValidationError(Exception):\n \"\"\"Custom Exception raised for errors in the global plot option json\n file\n Attributes:\n msg (str): explanation message of the error\n \"\"\"\n def __init__(self, msg):\n self.msg = msg\n\n logger = logging.getLogger(\"DMlog\")\n logger.info(\"Validating global plot options...\")\n valid_options = [\n 'title', 'suptitle', 'title_fontsize', 'suptitle_fontsize', 'xlim',\n 'ylim', 'xticks', 'yticks', 'xticks_labels', 'yticks_labels',\n 'xticks_size', 'yticks_size', 'xticks_label_size', 'yticks_label_size',\n 'xlabel', 'xlabel_fontsize', 'ylabel', 'ylabel_fontsize', 'xscale',\n 'yscale']\n try:\n # Handle plot options validation here\n for opt in plot_options.keys():\n if opt not in valid_options:\n raise PlotOptionValidationError(\"Invalid option `{}` detected\\\n \".format(opt))\n except PlotOptionValidationError as e:\n logging.error(\"PlotOptionValidationError: {}\".format(e.msg))\n DMRenderExit(logger)", "def key_callback(option,opt_str,value,parser):\n if \"--epi-key\" in opt_str:\n parser.values.epi_keys.append(value)\n elif \"--exclude\" in opt_str:\n parser.values.exclude_paths.append(value)", "def options(self) -> Mapping[str, str]:\n return pulumi.get(self, \"options\")", "def _getDefaultSettings(cls):\n return {'minimumROIDimensions': 1,\n 'minimumROISize': None, # Skip testing the ROI size by default\n 'normalize': False,\n 'normalizeScale': 1,\n 'removeOutliers': None,\n 'resampledPixelSpacing': None, # No resampling by default\n 'interpolator': 'sitkBSpline', # Alternative: sitk.sitkBSpline,\n 'padDistance': 5,\n 'distances': [1],\n 'force2D': False,\n 'force2Ddimension': 0,\n 'label': 1,\n 'enableCExtensions': True,\n 'additionalInfo': True}", "def get_options(filepath):\n options = {}\n with open(filepath, \"r\") as f:\n reader = csv.DictReader(f, delimiter=';')\n for row in reader:\n function = row[\"Option\"]\n options.setdefault(function, {})\n options[function][\"description\"] = row[\"Description\"]\n return options", "def extract_kwargs_from_options(options):\n return modulation_utils.extract_kwargs_from_options(\n dqpsk_demod.__init__, ('self',), options)", "def get_options(self, key):\n # Get the set of options unique to the Workflow data model\n if key not in self.__workflow_options:\n raise KeyError(\"Key `{}` not understood.\".format(key))\n\n return copy.deepcopy(getattr(self.data, key))", "def extract_kwargs_from_options(options):\n return modulation_utils.extract_kwargs_from_options(gfsk_mod.__init__,\n ('self',), options)\n extract_kwargs_from_options=staticmethod(extract_kwargs_from_options)", "def default_options(cls) -> Dict:\n return {}", "def get_option(self, option):\n if not self._options.has_key(option):\n raise KeyError, \"Invalid option: \" + option\n else:\n return self._options.get(option)", "def get_or_prompt(self, key, options, title=\"Prompt\", descr=\"\"):\n \n # First check if we have a remembered setting.\n val = self[key]\n if val is not None:\n return val\n \n # Setup the dialog\n mb = ExRememberPrompt(QMessageBox.Question, title, descr)\n if len(options) < 5:\n buttons = []\n opt = options[0] # Make first option default (accept)\n buttons.append(mb.addButton(opt[1], QMessageBox.AcceptRole))\n for opt in options[1:]:\n buttons.append(mb.addButton(opt[1], QMessageBox.RejectRole))\n else:\n pass #TODO: Make list selection\n mb.addButton(QMessageBox.Cancel)\n \n # Show the dialog\n mb.exec_()\n btn = mb.clickedButton()\n if btn not in buttons:\n # The user did not make a valid selection = cancelled\n return None\n sel = btn.text()\n idx = [o[1] for o in options].index(sel)\n ret = options[idx][0]\n if mb.isChecked():\n self[key] = ret\n return ret", "def options_by_name(self):\n pass", "def initialize_options(self):", "def subselect(self, obj):\n return dict(\n (key, value) for (key, value)\n in obj.items()\n if key in self.defaults)", "def __check_option(self,name):\n # Check if option exists\n if not self.__options.has_key(name):\n raise AttributeError('(EVOGTK - Preferences Helper) Preferences object has no attribute \\'%s\\'' % name)\n # Check for option type\n if self.__options[name][0] not in self.__supported_types:\n raise TypeError('(EVOGTK - Preferences Helper) Inconsistent data type \\'%s\\' for option \\'%s\\'' % (type,name)) \n return self.__options[name]", "def base_plot_keys(self):\n plot_keys = [\"loss\", \"l1_loss\", \"duration_loss\"]\n if self.use_scaled_pos_enc:\n plot_keys += [\"encoder_alpha\", \"decoder_alpha\"]\n\n return plot_keys", "def __getattr__(self,name):\n # Check if we are getting an option\n if name not in ['_PreferencesSection__section','_PreferencesSection__options',\n '_PreferencesSection__config','_PreferencesSection__initialised','_PreferencesSection__get_option','_PreferencesSection__set_option']:\n if not self.__options.has_key(name):\n raise AttributeError('(EVOGTK - Preferences Helper) Preferences object has no attribute \\'%s\\'' % name)\n # Get option value\n return self.get_option(name)\n else:\n # Call original __getattr__ method\n return super(_PreferencesSection,self).__getattr__(name)", "def optionHelp(self):\n return {}", "def _transpile_options(self, index: int = -1) -> Dict[str, Any]:\n try:\n return self.__experiment_metadata[\"job_metadata\"][index][\"transpile_options\"]\n except (TypeError, KeyError, IndexError):\n # Ignore experiment metadata or job metadata is not set or key is not found\n return None", "def getdict(self, section, option, default=None):\r\n return self.get(section, option, type=dict, default=default)", "def base_plot_keys(self):\r\n plot_keys = [\"loss\", \"l1_loss\", \"mse_loss\", \"dur_loss\"]\r\n \r\n if self.use_fe_condition:\r\n plot_keys += [\"pitch_loss\", \"energy_loss\"]\r\n return plot_keys", "def _get_default_options():\n return {\n \"library_folders\": [],\n \"verbose\": False,\n \"check_balanced\": True,\n \"mtime_check\": True,\n \"cache\": False,\n \"codegen\": False,\n \"expand_mx\": False,\n \"unroll_loops\": True,\n \"inline_functions\": True,\n \"expand_vectors\": False,\n \"resolve_parameter_values\": False,\n \"replace_parameter_expressions\": False,\n \"replace_constant_expressions\": False,\n \"eliminate_constant_assignments\": False,\n \"replace_parameter_values\": False,\n \"replace_constant_values\": False,\n \"eliminable_variable_expression\": None,\n \"factor_and_simplify_equations\": False,\n \"detect_aliases\": False,\n \"allow_derivative_aliases\": True,\n \"reduce_affine_expression\": False,\n }", "def plot_params(variable,context, custom_plot_params=None) :\n\n defaults = { \n 'contours' : 1 ,\n 'color' :'temp_19lev',\n }\n\n per_variable = {}\n # --> Adding the default plot params\n per_variable.update(atmos_plot_params.dict_plot_params)\n per_variable.update(ocean_plot_params.dict_plot_params)\n if centerspecs : \n # --> Then, add the plot params specific to the centers\n per_variable.update(atmos_plot_params_centerspecs.dict_plot_params)\n per_variable.update(ocean_plot_params_centerspecs.dict_plot_params)\n # --> If needed, adding a custom dictionnary of plot params\n if custom_plot_params:\n per_variable.update(custom_plot_params)\n #\n rep=defaults.copy()\n if variable in per_variable : \n var_entry=per_variable[variable]\n for cont in [ 'default', context ] :\n if cont in var_entry : rep.update(var_entry[cont])\n return rep", "def __init__(self, **options):\n self.__dict__.update(\n (k, v) for (k, v) in options.items() if not k.startswith('__'))", "def send_options_to_dict(self, out_dict, data_frame):\n options = self.create_options()\n flag, errCode = self.check_options(options, data_frame)\n if flag:\n options = self.adjust_options(options)\n d, key = out_dict\n d[key] = options\n self.destroy()\n else:\n self.attributes('-topmost', 'false')\n self.input_error(errCode)\n self.attributes('-topmost', 'true')", "def options(self): # 获取火车票查询选项 ex: iquary -dgktz 上海 北京 返回dgktz\n arg = self.get(0) # -dgktz\n if arg.startswith('-') and not self.is_asking_for_help:\n return arg[1:] # dgktz\n return ''.join(x for x in arg if x in 'dgktz')", "def config(self, **kwargs):\n\n logger.debug(\"LabeledWidget options\")\n # The options that we deal with this\n label = options[\"label\"]\n\n # cannot modify kwargs while iterating over it...\n keys = [*kwargs.keys()]\n for k in keys:\n if k in label:\n v = kwargs.pop(k)\n logger.debug(\" {} --> {}: {}\".format(k, label[k], v))\n self.label.config(**{label[k]: v})\n elif k == \"labelpos\":\n self.labelpos = kwargs.pop(k)\n else:\n # Since this is the base class, raise an error force\n # unrecognized options\n raise RuntimeError(\"Unknown option '{}'\".format(k))", "def get_plot_options(self):\n plot_options = []\n # Get drawdown plot options\n op = self.drawdown.get_plot_options()\n if op['visible']:\n plot_options.append(op)\n # Get associated data options\n for i in range(self.data_count()):\n op = self.data[i].get_plot_options()\n if op['visible']:\n plot_options.append(op)\n return(plot_options)", "def get_export_options(options):\n export_options = {\n key: value\n for (key, value) in iteritems(options)\n if key in Export.EXPORT_OPTION_FIELDS\n }\n\n return export_options", "def _get_options(self):\n return self.options", "def get_commandlinearg(self, keyname, defaultval=None):\n if (hasattr(self.commandlineargs,keyname)):\n val = getattr(self.commandlineargs,keyname)\n if (val != None):\n return val\n try:\n # try to access commandline args as dictionary\n return self.commandlineargs[keyname]\n except:\n pass\n # return default val\n return defaultval", "def __get_option(self, option):\n if option in Config.OPTIONS.keys():\n _default = Config.OPTIONS[option]\n elif option in Config.FILE_OPTIONS.keys():\n _default = Config.FILE_OPTIONS[option]\n elif option in Config.PATH_OPTIONS.keys():\n _default = Config.PATH_OPTIONS[option]\n else:\n _default = None # XXX ??\n \n _val = self.__get(option)\n\n if _val: \n return _val\n else:\n return _default", "def define_options(self):\n return {\n 'basename': OptionDef(required=True, default_value='keycloak', allowed_types=[str]),\n 'namespace': OptionDef(required=True, default_value='default', allowed_types=[str]),\n 'config': {\n 'service_port': OptionDef(required=True, default_value=8080, allowed_types=[int]),\n 'realm_import': OptionDef(format=OptionDefFormat.KDATA_VOLUME, allowed_types=[str, bytes, KData_Secret]),\n 'proxy_address_forwarding': OptionDef(format=OptionDefFormat.KDATA_ENV,\n allowed_types=[bool, *KDataHelper_Env.allowed_kdata()]),\n 'frontend_url': OptionDef(allowed_types=[str]),\n 'admin': {\n 'user': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'password': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, KData_Secret]),\n },\n 'db': {\n 'vendor': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'addr': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'port': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[int, *KDataHelper_Env.allowed_kdata()]),\n 'database': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'schema': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'user': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, *KDataHelper_Env.allowed_kdata()]),\n 'password': OptionDef(format=OptionDefFormat.KDATA_ENV, allowed_types=[str, KData_Secret]),\n },\n },\n 'container': {\n 'keycloak': OptionDef(required=True, default_value='quay.io/keycloak/keycloak:11.0.2', allowed_types=[str]),\n },\n 'kubernetes': {\n 'resources': {\n 'deployment': OptionDef(allowed_types=[Mapping]),\n }\n },\n }", "def set_option(self, key, value):\r\n return self.set_options([(key, value)])", "def _scaling_model_from_dict(obj):\n for entry_point in pkg_resources.iter_entry_points(\"dxtbx.scaling_model_ext\"):\n if entry_point.name == obj[\"__id__\"]:\n return entry_point.load().from_dict(obj)", "def config(self, key, default='', astype='auto'):\n if hasattr(self, '_config') and self._config.length() > 0:\n out = self._config.curve(0).getAttribute(key, default=default)\n if astype in [str, 'str']:\n return str(out)\n else:\n return strToVar(out)\n return default", "def get(self, section, option):\n if self._dict.has_key(section):\n return self._dict[section].get(option, None)\n return None", "def get_option(self, option):\n\t\treturn self.options[option]", "def __assert_option(self, key):\n\n if not self.has_option(key):\n raise KeyError(\"No such option.\")", "def options(self):\n options = {\n o.name: getattr(self, o.name)\n for o in _OPTIONS\n }\n return options", "def getKey(kwargs,key,default=None):\n value = kwarg.get(key,default)\n if value==None:\n return default\n return value", "def _analysis_options(self, index: int = -1) -> Dict[str, Any]:\n try:\n return self.__experiment_metadata[\"job_metadata\"][index][\"analysis_options\"]\n except (TypeError, KeyError, IndexError):\n # Ignore experiment metadata or job metadata is not set or key is not found\n return None", "def _option_key(self, option_id, entity=None):\n parts = [] if option_id.namespace is None else [option_id.namespace]\n if option_id.at is not None:\n if entity is None:\n raise ValueError(\n 'Error reading entity config option {option} without entity'.format(\n option=option_id.name))\n parts.append(re.sub(r'[_-]', '.', entity))\n parts.append(re.sub(r'[_-]', '.', option_id.name))\n return '.'.join(parts)", "def default_options(cls) -> Dict:\n options = super().default_options()\n # scaling factor for temperature adaptation\n options['eta'] = 100\n # controls the adaptation degeneration velocity of the temperature\n # adaption.\n options['nu'] = 1e3\n\n return options", "def test_kpoints_params_auto(kpoints_parser_auto):\n\n kpoints = kpoints_parser_auto.get_dict()\n assert kpoints['mode'] == 'automatic'\n assert kpoints['comment'] == 'Example file'\n assert kpoints['divisions'] == [4, 4, 4]\n assert kpoints['shifts'] == None\n assert kpoints['points'] == None\n assert kpoints['centering'] == 'Gamma'\n assert kpoints['tetra'] == None\n assert kpoints['tetra_volume'] == None\n assert kpoints['num_kpoints'] == 0", "def getKey(self):\n return _libsbml.ConversionOption_getKey(self)", "def default(self, o):\n return o.__dict__", "def dict_to_kwds(inobject, kwdstr='', overwrite=True):\n if not isinstance(kwdstr, string_types):\n raise ValueError(\"kwdstr '{}' not a string\".format(kwdstr))\n\n optdict = {}\n for item in kwdstr.split(\",\"):\n if item == \"\":\n continue\n ikey, ival = _split_option(item, kwdstr)\n if ikey in optdict:\n raise ValueError(\n \"kwdstr '{}' contain multiple references to '{}'\".format(\n kwdstr, ikey\n ))\n optdict[ikey] = ival\n\n if isinstance(inobject, (list, tuple)):\n for item in inobject:\n if item == \"\":\n continue\n if not isinstance(item, string_types):\n raise ValueError(\n \"option '{}' from option list is not a string: {}\".format(\n item, kwdstr))\n okey, oval = _split_option(item, inobject)\n if okey not in optdict or overwrite:\n optdict[okey] = oval\n else:\n for kkey in sorted(inobject.keys()):\n keystr = str(kkey)\n if keystr not in optdict or overwrite:\n optdict[kkey] = str(inobject[kkey])\n\n outstring1 = []\n outstring2 = []\n for skey in sorted(optdict.keys()):\n if optdict[skey] is None:\n outstring1.append(skey)\n else:\n outstring2.append(\"{}={}\".format(skey, optdict[skey]))\n\n outstring = outstring1 + outstring2\n return \",\".join(outstring)", "def parse(self, section):\n # try to find alternatives if they exist\n alternatives = deepcopy(self.alternatives)\n while len(alternatives) != 0 and self.name not in section.dict:\n other_name = alternatives.pop(0)\n if other_name in section.dict:\n section.dict[self.name] = section.dict[other_name]\n del section.dict[other_name]\n break\n if not self.optional:\n assert_exists(self.name, section.dict, section.name)\n if self.name not in section.dict:\n return self.default\n else:\n if self.dtype != list:\n if self.dtype == bool:\n # this is necessary since ``bool(\"False\")`` returns ``True``.\n value = parse_bool(section, self.name)\n else:\n value = self.dtype(section.dict[self.name])\n if not self.validation_func(value):\n raise ValueError('Invalid input for option ' + self.name +\n ' in section ' + section.name)\n return value\n else:\n\n value = parse_list(section.dict[self.name], self.datatype)\n\n # value validation\n if not all_true(self.validation_func, value):\n raise ValueError('Invalid input for option ' + self.name +\n ' in section ' + section.name)\n\n shape = deepcopy(self.shape)\n\n # now we need to get the correct shape\n if shape == -1:\n # we don't care for the shape of this\n if not isinstance(value, list):\n value = [value]\n return value\n\n if isinstance(shape, str):\n # in this case we simply use the shape of the option with this name\n if shape not in section.dict:\n raise ValueError(self.name + ' in ' + section.name + ' has an invalid ' +\\\n 'shape because the options whose shape it should have ' +\\\n 'does not exist. Check your option definitions!')\n shape = get_shape(section.dict[shape])\n if isinstance(shape, int):\n shape = [shape]\n # shape is now a list, but it might still contain strings\n for i in range(len(shape)):\n if isinstance(shape[i], str):\n shape[i] = len(section.dict[shape[i]])\n\n\n\n # shape is now either a 'flat' shape, i.e. something like [2, 3, 2],\n # or an expanded shape, e.g. [2, [3, 3], [[2, 2, 2],[2, 2, 2]]]\n # if it's flat, it might contain dimensions with -1 that cannot be\n # autoexpanded. We first need to determine the shape of this dimension.\n if is_flat(shape):\n real_shape = get_shape(value)\n if isinstance(real_shape, (list, tuple)):\n # if it's just a single number we can expand it\n # Here I'm trying to find the flat shape of the value that was\n # given in the configuration file.\n flat_shape_value = try_flattening_shape(real_shape)\n # It might happen that we cannot flatten the shape, in this\n # case there are negative values remaining in flat_shape_value.\n # If there are, this means that there is a dimension\n # containing lists of different lengths.\n # In any case I will try to replace any -1 in ``shape``\n # with the value in ``flat_shape_value``.\n shape = get_positive_shape(shape, flat_shape_value)\n # Now we do a test for equality of the asserted shape and\n # the shape of the value found in the config file. Keep in\n # mind that there might be -1 values left.\n if flat_shape_value != shape[-len(flat_shape_value):]:\n raise ShapeError(self.name, section.name)\n # If there are -1's left we must ensure that the \"depth\" of\n # the given value, i.e. the number of dimensions, is higher\n # than the ``number of dimensions after the value preceding\n # the first -1`` + 1 .\n if any(map(lambda x: x == -1, shape)):\n depth = numdim(value)\n mindepth = len(shape) - shape.index(-1) + 1\n if depth < mindepth:\n raise ValueError('Option ' + self.name + ' in section ' +\n section.name + ' can not be expanded!')\n shape = expand_shape(shape)\n\n # Now we have an expanded shape, so only two tasks remain:\n # * auto-expansion\n # * shape validation\n value = expand_to_shape(shape, value)\n if not compare_shapes(shape, get_shape(value)):\n raise ShapeError(self.name, section.name)\n return value" ]
[ "0.6476717", "0.6211503", "0.57079715", "0.5571008", "0.5534396", "0.547191", "0.54293185", "0.5385394", "0.53531367", "0.53213716", "0.5306876", "0.5284712", "0.5159889", "0.51395476", "0.5116941", "0.51114714", "0.50779635", "0.507736", "0.5073758", "0.5022548", "0.5020879", "0.50104797", "0.49683166", "0.49671754", "0.49628994", "0.49560538", "0.4949455", "0.49383163", "0.4930963", "0.49281263", "0.49249035", "0.49188116", "0.4915429", "0.49137393", "0.49132338", "0.490356", "0.49030364", "0.4874224", "0.48574522", "0.48500267", "0.4849538", "0.4846144", "0.48321915", "0.4819824", "0.48171586", "0.48171586", "0.48091823", "0.4807479", "0.47925752", "0.47915822", "0.47855127", "0.4777044", "0.47742033", "0.47488052", "0.47484142", "0.47436848", "0.4742919", "0.4720539", "0.47188413", "0.47180155", "0.47121403", "0.47102422", "0.47075447", "0.47039017", "0.46951237", "0.46837425", "0.46810746", "0.46776798", "0.46775204", "0.4673536", "0.46704096", "0.46676666", "0.46665198", "0.4663469", "0.4658826", "0.4656373", "0.46550885", "0.46534705", "0.46481368", "0.46465847", "0.4644484", "0.46443972", "0.46442553", "0.46430007", "0.46415702", "0.4636991", "0.4622355", "0.46212378", "0.46198192", "0.46104026", "0.46084505", "0.46002635", "0.4598458", "0.45973897", "0.45957345", "0.45938352", "0.45931515", "0.45871294", "0.45861116", "0.45852593" ]
0.6997502
0
Uses traversal to find the appropriate projection for a nested object. Respects projections set on Overlays before considering Element based settings, before finally looking up the default projection on the plot type. If more than one nonNone projection type is found an exception is raised.
def _get_projection(cls, obj): isoverlay = lambda x: isinstance(x, CompositeOverlay) opts = cls._traverse_options(obj, 'plot', ['projection'], [CompositeOverlay, Element], keyfn=isoverlay) from_overlay = not all(p is None for p in opts[True]['projection']) projections = opts[from_overlay]['projection'] custom_projs = [p for p in projections if p is not None] if len(set(custom_projs)) > 1: raise Exception("An axis may only be assigned one projection type") return custom_projs[0] if custom_projs else None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def projectionManip(*args, fitBBox: bool=True, projType: int=0, switchType: bool=True, q=True,\n query=True, **kwargs)->Union[None, Any]:\n pass", "def create_projection_from_projector_element(\n self, element, weight, type, projector_id\n ):\n projection = {\n \"id\": self.projection_id_counter,\n \"stable\": element.get(\"stable\", True),\n \"weight\": weight,\n \"options\": {},\n \"current_projector_id\": None,\n \"preview_projector_id\": None,\n \"history_projector_id\": None,\n \"meeting_id\": 1,\n }\n projection[f\"{type}_projector_id\"] = projector_id\n for k, v in element.items():\n if k not in (\"id\", \"name\", \"stable\"):\n projection[\"options\"][k] = v\n\n collection = element[\"name\"]\n if collection in COLLECTION_MAPPING:\n id = self.to_new_id(collection, element[\"id\"])\n collection = COLLECTION_MAPPING[collection]\n projection[\"content_object_id\"] = f\"{collection}/{id}\"\n projection[\"type\"] = None\n elif collection == \"agenda/item-list\":\n collection = \"meeting\"\n id = 1\n projection[\"content_object_id\"] = \"meeting/1\"\n projection[\"type\"] = \"agenda_item_list\"\n elif collection in (\n \"agenda/current-list-of-speakers\",\n \"agenda/current-list-of-speakers-overlay\",\n ):\n collection = \"meeting\"\n id = 1\n projection[\"content_object_id\"] = \"meeting/1\"\n projection[\"type\"] = \"current_list_of_speakers\"\n elif collection == \"agenda/current-speaker-chyron\":\n collection = \"meeting\"\n id = 1\n projection[\"content_object_id\"] = \"meeting/1\"\n projection[\"type\"] = \"current_speaker_chyron\"\n else:\n raise OS4ExporterException(f\"Unknown slide {collection}\")\n\n if collection != \"user\":\n content_object = self.get_model(collection, id)\n content_object[\"projection_ids\"].append(projection[\"id\"])\n else:\n user = self.get_model(collection, id)\n if not user[\"projection_$_ids\"]:\n user[\"projection_$_ids\"] = [\"1\"]\n user[\"projection_$1_ids\"] = []\n user[\"projection_$1_ids\"].append(projection[\"id\"])\n\n self.projection_id_counter += 1\n self.set_model(\"projection\", projection)\n return projection[\"id\"]", "async def test_entity_nested_projection(self):\n test_name = 'test_entity_nested_projection'\n entity_name = 'TestEntityNestedProjection'\n\n corpus = TestHelper.get_local_corpus(self.tests_subpath, test_name)\n expected_output_path = TestHelper.get_expected_output_folder_path(self.tests_subpath, test_name)\n manifest = await corpus.fetch_object_async('local:/default.manifest.cdm.json')\n\n ent_test_entity_nested_projection = await corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_name, entity_name), manifest)\n self.assertIsNotNone(ent_test_entity_nested_projection)\n resolved_test_entity_nested_projection = await ProjectionTestUtils.get_resolved_entity(corpus, ent_test_entity_nested_projection, [])\n self.assertIsNotNone(resolved_test_entity_nested_projection)\n await AttributeContextUtil.validate_attribute_context(self, corpus, expected_output_path, entity_name, resolved_test_entity_nested_projection)", "def apply_projection(projection, dataset):\n out = DatasetType(name=dataset.name, attributes=dataset.attributes)\n\n for var in projection:\n target, template = out, dataset\n while var:\n name, slice_ = var.pop(0)\n candidate = template[name]\n \n # apply slice\n if slice_:\n if isinstance(candidate, BaseType):\n candidate.data = candidate[slice_]\n elif isinstance(candidate, SequenceType):\n candidate = candidate[slice_[0]]\n elif isinstance(candidate, GridType):\n candidate = candidate[slice_]\n\n # handle structures\n if isinstance(candidate, StructureType):\n # add variable to target\n if name not in target.keys():\n if var:\n # if there are more children to add we need to clear the\n # candidate so it has only explicitly added children; \n # also, Grids are degenerated into Structures\n if isinstance(candidate, GridType):\n candidate = StructureType(candidate.name, candidate.attributes)\n candidate._keys = []\n target[name] = candidate\n target, template = target[name], template[name]\n else:\n target[name] = candidate\n\n # fix sequence data, including only variables that are in the sequence\n for seq in walk(out, SequenceType):\n seq.data = get_var(dataset, seq.id)[tuple(seq.keys())].data\n\n return out", "def Projection(W, TYPE_PROJ = proj_l11ball, ETA = 100, AXIS = 0, ETA_STAR = 100, device = \"cpu\" ): \n \n #global TYPE_PROJ, ETA, ETA_STAR, AXIS, device \n if TYPE_PROJ == 'No_proj':\n W_new = W\n if (TYPE_PROJ == proj_l1ball or TYPE_PROJ == proj_l11ball or TYPE_PROJ == proj_l11ball_line ):\n W_new = TYPE_PROJ(W, ETA, device)\n if TYPE_PROJ == proj_l21ball or TYPE_PROJ == proj_l12ball:\n W_new = TYPE_PROJ(W, ETA, AXIS, device = device)\n if TYPE_PROJ == proj_nuclear:\n W_new = TYPE_PROJ(W, ETA_STAR, device=device)\n return W_new", "def _get_projection(el):\n result = None\n if hasattr(el, 'crs'):\n result = (int(el._auxiliary_component), el.crs)\n return result", "def test_compatible_projections(self):\n\n # Read two layers with compatible projections\n hazard_filename = '%s/donut.shp' % TESTDATA\n exposure_filename = ('%s/pop_merapi_prj_problem.asc' % TESTDATA)\n H = read_layer(hazard_filename)\n E = read_layer(exposure_filename)\n\n # Verify that their projection strings are different\n assert H.get_projection() != E.get_projection()\n assert H.get_projection(proj4=True) != E.get_projection(proj4=True)\n\n # But the InaSAFE comparison does pass\n assert H.projection == E.projection", "async def test_entity_attribute_nested_projection(self):\n test_name = 'test_entity_attribute_nested_projection'\n entity_name = 'TestEntityAttributeNestedProjection'\n\n corpus = TestHelper.get_local_corpus(self.tests_subpath, test_name)\n expected_output_path = TestHelper.get_expected_output_folder_path(self.tests_subpath, test_name)\n manifest = await corpus.fetch_object_async('local:/default.manifest.cdm.json')\n\n ent_test_entity_attribute_nested_projection = await corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_name, entity_name), manifest)\n self.assertIsNotNone(ent_test_entity_attribute_nested_projection)\n resolved_test_entity_attribute_nested_projection = await ProjectionTestUtils.get_resolved_entity(corpus, ent_test_entity_attribute_nested_projection, [])\n self.assertIsNotNone(resolved_test_entity_attribute_nested_projection)\n await AttributeContextUtil.validate_attribute_context(self, corpus, expected_output_path, entity_name, resolved_test_entity_attribute_nested_projection)", "def test_projection_comparisons(self):\n\n # Although the two test datasets have the same projection,\n # this example failed with the message:\n # The reason was that comparison was done with get_projection()\n # rather than the projection objects themselves.\n\n #Projections must be the same: I got\n #GEOGCS[\"GCS_WGS_1984\",DATUM[\"WGS_1984\",\n # SPHEROID[\"WGS_1984\",6378137,298.257223563]],\n # PRIMEM[\"Greenwich\",0],UNIT[\"Degree\",0.017453292519943295]] and\n #GEOGCS[\"WGS 84\",DATUM[\"WGS_1984\",\n # SPHEROID[\"WGS 84\",6378137,298.257223563,\n # AUTHORITY[\"EPSG\",\"7030\"]],TOWGS84[0,0,0,0,0,0,0],\n # AUTHORITY[\"EPSG\",\"6326\"]],PRIMEM[\"Greenwich\",0,\n # AUTHORITY[\"EPSG\",\"8901\"]],UNIT[\"degree\",0.01745329251994328,\n # AUTHORITY[\"EPSG\",\"9122\"]],AUTHORITY[\"EPSG\",\"4326\"]]\n\n # Name file names for hazard level and exposure\n hazard_filename = ('%s/rw_jakarta_singlepart.shp' % TESTDATA)\n exposure_filename = ('%s/indonesia_highway_sample.shp' % TESTDATA)\n\n # Read\n H = read_layer(hazard_filename)\n E = read_layer(exposure_filename)\n\n Hp = H.projection\n Ep = E.projection\n msg = 'Projections did not match: %s != %s' % (Hp, Ep)\n assert Hp == Ep, msg", "def projection(self):\n pass", "def _convert_projection(keys_only, gae_projection_fields, index_schema):\n if gae_projection_fields:\n # Process projection_fields\n solr_projection_fields = ['id', 'rank', 'language']\n for gae_name in gae_projection_fields:\n # (1) In GAE fields with different type can have the same name,\n # in Solr they are stored as fields with different name (type suffix).\n try:\n solr_projection_fields += [\n solr_field.solr_name for solr_field in\n index_schema.grouped_fields[gae_name]\n ]\n except KeyError:\n logger.warning('Unknown field \"{}\" in projection'.format(gae_name))\n return solr_projection_fields\n elif keys_only:\n # Skip everything but ID.\n return ['id', 'rank', 'language']\n else:\n # Return all fields.\n return None", "def projection(self, point):\n projected_point = self._iterate_over_factors(\"projection\", {\"point\": point})\n return projected_point", "def test_project(self):\n import itertools\n from numpy import array, dot\n from numpy.linalg import det\n\n # our little magic constant\n magic = 0.33377777373737737777\n\n # test for all kinds of curvatures K\n for k in (0, 1, -1, 1/11, -1/11, 1 + magic, -1 - magic):\n \n s = space(curvature=k)\n\n # test line preserving projection\n # 3 points are colinear when\n # | x1 y1 1 |\n # | x2 y2 1 | = 0\n # | x3 y3 1 |\n # let's test this!\n\n for p, q in itertools.permutations((\n (1, 0),\n (3/5, 4/5),\n (-5/13, 12/13),\n (-8/17, -15/17),\n ), 2):\n p = s.make_point(p, magic)\n q = s.make_point(q, magic)\n u = p.project(projection_types.preserve_lines)\n v = (p+q).project(projection_types.preserve_lines)\n w = (p+(-magic)*q).project(projection_types.preserve_lines)\n d = det([[*u, 1],[*v, 1],[*w, 1]])\n self.assertTrue(abs(d) < 1e-9)\n\n # test angle preserving projection\n # map will be conformal, so we do like a secant test\n\n delta = 1e-9\n vi = s.make_point((1, 0, 0), delta)\n vj = s.make_point((0, 1, 0), delta)\n vk = s.make_point((0, 0, 1), delta)\n for p in (\n (1, 0, 0),\n (0, 3/5, 4/5),\n (-5/13, 12/13, 0),\n (2/11, 6/11, 9/11),\n (3/7, 6/7, 2/7)\n ):\n p = s.make_point(p, magic)\n pp = p.project(projection_types.preserve_angles)\n pi, pj, pk = (array((p+v).project(projection_types.preserve_angles)) - pp for v in (vi, vj, vk))\n # should stay orthogonal and same size\n # note that we're doing a secant thing so it's only approximate\n # thus we set a relatively high tolerance\n self.assertTrue(isclose(\n dot(pi, pi),\n dot(pj, pj),\n rel_tol = 1e-6\n ))\n self.assertTrue(isclose(\n dot(pi, pi),\n dot(pk, pk),\n rel_tol = 1e-6\n ))\n self.assertTrue(isclose(\n dot(pi, pj),\n 0,\n abs_tol = 1e-6\n ))\n self.assertTrue(isclose(\n dot(pi, pk),\n 0,\n abs_tol = 1e-6\n ))\n self.assertTrue(isclose(\n dot(pj, pk),\n 0,\n abs_tol = 1e-6\n ))", "def _solve3D_proj_multires(self, simu):\n if self.do_projection(simu):\n self._project()\n self._solve3D_multires()", "def test_validation_get_valid_projections(self):\n self.assertIsInstance(api.validation.fetch_projections(), dict)", "async def test_entity_projection(self):\n test_name = 'test_entity_projection'\n entity_name = 'TestEntityProjection'\n\n corpus = TestHelper.get_local_corpus(self.tests_subpath, test_name)\n expected_output_path = TestHelper.get_expected_output_folder_path(self.tests_subpath, test_name)\n manifest = await corpus.fetch_object_async('local:/default.manifest.cdm.json')\n\n ent_test_entity_projection = await corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_name, entity_name), manifest)\n self.assertIsNotNone(ent_test_entity_projection)\n resolved_test_entity_projection = await ProjectionTestUtils.get_resolved_entity(corpus, ent_test_entity_projection, [])\n self.assertIsNotNone(resolved_test_entity_projection)\n await AttributeContextUtil.validate_attribute_context(self, corpus, expected_output_path, entity_name, resolved_test_entity_projection)", "def set_projection_type(self, p_type):\n self.scenes[self.current_scene].set_projection_type(p_type)", "def _init_projection(self):\n radius = 6370e3\n \n # Spherical latlon used by WRF\n self.latlon_sphere = pyproj.Proj(proj='latlong',\n a=radius, b=radius, towgs84='0,0,0', no_defs=True)\n\n # Lambert Conformal Conic used by WRF\n self.lambert_grid = pyproj.Proj(proj='lcc',\n lat_1=self.truelats[0],\n lat_2=self.truelats[1],\n lat_0=self.ref_latlon[0],\n lon_0=self.stand_lon,\n a=radius, b=radius, towgs84='0,0,0', no_defs=True)\n\n grid_size_i = (self.domain_size[0] - 2) * self.cell_size[0]\n grid_size_j = (self.domain_size[1] - 2) * self.cell_size[1]\n\n grid_center_i, grid_center_j = pyproj.transform(\n self.latlon_sphere, self.lambert_grid,\n self.ref_latlon[1], self.ref_latlon[0])\n \n self.offset_i = grid_center_i - grid_size_i * .5\n self.offset_j = grid_center_j - grid_size_j * .5", "def projectionContext(*args, exists: bool=True, history: bool=True, image1: Union[AnyStr,\n bool]=\"\", image2: Union[AnyStr, bool]=\"\", image3: Union[AnyStr, bool]=\"\",\n name: AnyStr=\"\", q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def read_projection(fname, element, theta_index):\n\n projections = dxchange.read_hdf5(fname, \"MAPS/XRF_roi\")\n theta = float(dxchange.read_hdf5(fname, \"MAPS/extra_pvs_as_csv\")[theta_index].split(b',')[1])\n elements = read_channel_names(fname)\n\n try:\n if find_index(elements, element) != None:\n return projections[find_index(elements, element),:, :], theta\n else:\n raise TypeError\n except TypeError:\n print(\"**** ERROR: Element %s does exist in the file: %s \" % (element, fname))\n return None", "def _project_elem(self, elem, mapping):\r\n\t\tif isinstance(elem, basestring):\r\n\t\t\treturn elem\r\n\t\telif isinstance(elem, xmlmodel.XmlElem):\r\n\t\t\tcls = mapping.get_class_for(elem)\r\n\t\t\tif cls is None:\r\n\t\t\t\traise TypeError, 'Could not determine object class for \\'{0}\\' element for node type {1}'.format(elem.tag, type(self))\r\n\t\t\tif not isinstance(cls, NodeClass):\r\n\t\t\t\tif callable(cls):\r\n\t\t\t\t\tcls = cls()\r\n\t\t\t\telse:\r\n\t\t\t\t\traise TypeError, 'Object class for \\'{0}\\' element for node type {1} is of type {2}, should be a NodeClass or a callable'.format(elem.tag, type(self), type(cls))\r\n\t\t\tnode = self._projection_table.get(elem, cls)\r\n\t\t\tif node is None:\r\n\t\t\t\tnode = cls(self._projection_table, elem)\r\n\t\t\t\tself._projection_table.put(elem, cls, node)\r\n\t\t\t\tnode.node_init()\r\n\t\t\treturn node\r\n\t\telse:\r\n\t\t\traise TypeError, 'elem should be a string or an XmlElem'", "def get_chained_proj(self):\n #non recursive call\n N = self.N_in\n projs = []\n for layer in self.layers:\n proj,N_out = self.get_deprojecter(layer,N)\n projs.append(proj)\n N = N_out\n return projs", "def subdAutoProjection(*args, caching: bool=True, nodeState: Union[int, bool]=0,\n constructionHistory: bool=True, layout: Union[int, bool]=0,\n layoutMethod: Union[int, bool]=0, name: AnyStr=\"\", optimize: Union[int,\n bool]=0, percentageSpace: Union[float, bool]=0.0, planes: Union[int,\n bool]=6, scale: Union[int, bool]=0, skipIntersect: bool=True,\n worldSpace: bool=True, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def projection(poly1, dim, solver=None, abs_tol=ABS_TOL, verbose=0):\n if isinstance(poly1, Region):\n ret = Polytope()\n for i in range(len(poly1.list_poly)):\n p = projection(\n poly1.list_poly[i], dim,\n solver=solver, abs_tol=abs_tol)\n ret = ret + p\n return ret\n # flat ?\n if (poly1.dim < len(dim)) or is_empty(poly1):\n return poly1\n # `poly1` isn't flat\n poly_dim = poly1.dim\n dim = np.array(dim)\n org_dim = range(poly_dim)\n new_dim = dim.flatten() - 1\n del_dim = np.setdiff1d(org_dim, new_dim) # Index of dimensions to remove\n # logging\n logger.debug('polytope dim = ' + str(poly_dim))\n logger.debug('project on dims = ' + str(new_dim))\n logger.debug('original dims = ' + str(org_dim))\n logger.debug('dims to delete = ' + str(del_dim))\n mA, nA = poly1.A.shape\n # fewer rows than dimensions ?\n if mA < poly_dim:\n msg = 'fewer rows in A: ' + str(mA)\n msg += ', than polytope dimension: ' + str(poly_dim)\n logger.warning(msg)\n # enlarge A, b with zeros\n A = poly1.A.copy()\n poly1.A = np.zeros((poly_dim, poly_dim))\n poly1.A[0:mA, 0:nA] = A\n # stack\n poly1.b = np.hstack([poly1.b, np.zeros(poly_dim - mA)])\n logger.debug('m, n = ' + str((mA, nA)))\n # Compute cheby ball in lower dim to see if projection exists\n norm = np.sum(poly1.A * poly1.A, axis=1).flatten()\n norm[del_dim] = 0\n c = np.zeros(len(org_dim) + 1, dtype=float)\n c[len(org_dim)] = -1\n G = np.hstack([poly1.A, norm.reshape(norm.size, 1)])\n h = poly1.b\n sol = lpsolve(c, G, h)\n if sol['status'] != 0:\n # Projection not fulldim\n return Polytope()\n if sol['x'][-1] < abs_tol:\n return Polytope()\n # select projection solver\n if solver == \"esp\":\n return projection_esp(poly1, new_dim, del_dim)\n elif solver == \"exthull\":\n return projection_exthull(poly1, new_dim)\n elif solver == \"fm\":\n return projection_fm(poly1, new_dim, del_dim)\n elif solver == \"iterhull\":\n return projection_iterhull(poly1, new_dim)\n elif solver is not None:\n logger.warning('unrecognized projection solver \"' +\n str(solver) + '\".')\n # `solver` undefined or unknown\n # select method based on dimension criteria\n if len(del_dim) <= 2:\n logger.debug(\"projection: using Fourier-Motzkin.\")\n return projection_fm(poly1, new_dim, del_dim)\n elif len(org_dim) <= 4:\n logger.debug(\"projection: using exthull.\")\n return projection_exthull(poly1, new_dim)\n else:\n logger.debug(\"projection: using iterative hull.\")\n return projection_iterhull(poly1, new_dim)", "def projections(self):\n # backwards compatiblity for naming_scheme key\n conf = self.module.configuration(self.name)\n if \"naming_scheme\" in conf:\n default = {\"all\": conf[\"naming_scheme\"]}\n else:\n default = self.default_projections\n projections = conf.get(\"projections\", default)\n\n # Ensure the named tokens we are expanding are allowed, see\n # issue #2884 for reference\n msg = \"some tokens cannot be part of the module naming scheme\"\n for projection in projections.values():\n _check_tokens_are_valid(projection, message=msg)\n\n return projections", "def test_has_projection(self):\n for klass in Event.__subclasses__():\n self.assertTrue(hasattr(klass, 'project'),\n f'{klass.__name__} is missing project() method')\n self.assertTrue(inspect.isfunction(klass.project),\n f'{klass.__name__} is missing project() method')", "def read_gdal_projection(dataset):\n wkt = dataset.GetProjection()\n srs = osr.SpatialReference()\n srs.ImportFromWkt(wkt)\n # src = None\n return srs", "def projection_depth(X, *, pointwise=False):\n\n depth = outlyingness_to_depth(_stagel_donoho_outlyingness)\n\n return depth(X, pointwise=pointwise)", "def get_projection(self):\n return self.projection", "def projection(self):\n self.projection = Projection(self)\n return self.projection", "def _project(self):\n ghosts_w = self.input_field.topology.ghosts()\n self.input_field.data[0], self.input_field.data[1], \\\n self.input_field.data[2] = \\\n fftw2py.projection_om_3d(self.input_field.data[0],\n self.input_field.data[1],\n self.input_field.data[2], ghosts_w)", "def polyAutoProjection(*args, layoutMethod: Union[int, bool]=0, pivot: Union[List[float, float,\n float], bool]=None, pivotX: Union[float, bool]=0.0, pivotY: Union[float,\n bool]=0.0, pivotZ: Union[float, bool]=0.0, rotate: Union[List[float,\n float, float], bool]=None, rotateX: Union[float, bool]=0.0, rotateY:\n Union[float, bool]=0.0, rotateZ: Union[float, bool]=0.0, scale:\n Union[List[float, float, float], bool]=None, scaleX: Union[float,\n bool]=1.0, scaleY: Union[float, bool]=1.0, scaleZ: Union[float,\n bool]=1.0, translate: Union[List[float, float, float], bool]=None,\n translateX: Union[float, bool]=0.0, translateY: Union[float, bool]=0.0,\n translateZ: Union[float, bool]=0.0, caching: bool=True,\n constructionHistory: bool=True, createNewMap: bool=True,\n insertBeforeDeformers: bool=True, layout: Union[int, bool]=0, name:\n AnyStr=\"\", nodeState: Union[int, bool]=0, optimize: Union[int, bool]=0,\n percentageSpace: Union[float, bool]=0.0, planes: Union[int, bool]=6,\n projectBothDirections: bool=True, scaleMode: Union[int, bool]=0,\n skipIntersect: bool=True, uvSetName: AnyStr=\"\", worldSpace: bool=True,\n q=True, query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def assign_projection_to_srs(srs, projdef, verbose=False, warn=True):\n try:\n if srs.ExportToWkt() == \"\" and \\\n isinstance(projdef, int):\n srs.ImportFromEPSG(projdef)\n except TypeError:\n if verbose:\n print('Ignoring TypeError when importing from epsg')\n\n try:\n if srs.ExportToWkt() == \"\" and \\\n isinstance(projdef, str) and \\\n ('PROJ' in projdef or 'GEOGCS' in projdef):\n srs.ImportFromWkt(projdef)\n except TypeError:\n if verbose:\n print('Ignoring TypeError when importing from WKT')\n\n try:\n if srs.ExportToWkt() == \"\" and \\\n isinstance(projdef, str) and \\\n '+proj' in projdef:\n srs.ImportFromProj4(projdef)\n except TypeError:\n if verbose:\n print('Ignoring TypeError when importing from Proj4')\n\n if warn:\n if srs.ExportToWkt() == \"\":\n if verbose:\n print('projdef was: {}'.format(projdef))\n raise Warning('SRS not determined in GeoGrid initialization')", "def run_projections(self):\n self.projection_collection100 = DonorCollection()\n self.projection_collection50 = DonorCollection()\n double_over_100 = dict(list(\n (name, list(map(lambda x: x * 2, donations.donations))) for name, donations in self.donors.items()))\n triple_under_50 = dict(list(\n (name, list(map(lambda x: x * 3, donations.donations))) for name, donations in self.donors.items()))\n for donor, donations in double_over_100.items():\n self.projection_collection100.add(donor, donations)\n for donor, donations in triple_under_50.items():\n self.projection_collection50.add(donor, donations)\n return self.projection_collection50, self.projection_collection100", "def get_rois4plotting(params, sub_id = None, pysub = 'hcp_999999', use_atlas = True, atlas_pth = '', space = 'fsLR_den-170k'): \n\n if use_atlas:\n # Get Glasser atlas\n atlas_df, atlas_array = create_glasser_df(atlas_pth)\n \n if sub_id:\n \n # if single id provided, put in list\n if isinstance(sub_id, str) or isinstance(sub_id, int):\n sub_id = [sub_id]\n\n sub_id_list = ['sub-{sj}'.format(sj = str(pp).zfill(3)) if 'sub-' not in str(pp) else str(pp) for pp in sub_id]\n\n ## start empty dictionaries \n ROIs = {}\n color_codes = {}\n roi_verts = {}\n\n # loop over participant list\n for pp in sub_id_list:\n\n print('Getting ROIs for participants %s'%pp)\n\n if use_atlas:\n print('Using Glasser ROIs')\n # ROI names\n ROIs[pp] = list(params['plotting']['ROIs']['glasser_atlas'].keys())\n\n # colors\n color_codes[pp] = {key: params['plotting']['ROIs']['glasser_atlas'][key]['color'] for key in ROIs[pp]}\n\n # get vertices for ROI\n roi_verts[pp] = {}\n for _,key in enumerate(ROIs[pp]):\n print(key)\n roi_verts[pp][key] = np.hstack((np.where(atlas_array == ind)[0] for ind in atlas_df[atlas_df['ROI'].isin(params['plotting']['ROIs']['glasser_atlas'][key]['ROI'])]['index'].values))\n\n else:\n ## check if dict or str\n if isinstance(pysub, dict):\n pysub_pp = pysub[pp]\n else:\n pysub_pp = pysub\n\n # set ROI names\n ROIs[pp] = params['plotting']['ROIs'][space]\n\n # dictionary with one specific color per group - similar to fig3 colors\n color_codes[pp] = {key: params['plotting']['ROI_pal'][key] for key in ROIs[pp]}\n\n # get vertices for ROI\n roi_verts[pp] = {}\n for _,val in enumerate(ROIs[pp]):\n print(val)\n roi_verts[pp][val] = cortex.get_roi_verts(pysub_pp,val)[val]\n \n else:\n raise NameError('No subject ID provided')\n \n return ROIs, roi_verts, color_codes", "def _compute_projection_specs(self, model):\n sheetspec_product = itertools.product(model.sheets.data.values(),\n model.sheets.data.values())\n for src_sheet, dest_sheet in sheetspec_product:\n\n conditions = self.definition.compute_conditions(dest_sheet.level, self,\n dest_sheet.properties)\n for matchname, matchconditions in conditions.items():\n\n if self._matchcondition_holds(matchconditions, src_sheet):\n\n paramfn = self.definition.lookup(self.__class__, matchname, 'method')\n projtype = self.definition.lookup(self.__class__, matchname, 'type')\n\n proj = ProjectionSpec(projtype, src_sheet, dest_sheet)\n\n paramsets = paramfn(self, src_sheet.properties, dest_sheet.properties)\n paramsets = [paramsets] if isinstance(paramsets, dict) else paramsets\n\n for paramset in paramsets:\n proj = ProjectionSpec(projtype, src_sheet, dest_sheet)\n proj.update(**paramset)\n # Only used when time_dependent=False\n # (which is to be deprecated)\n proj.matchname = matchname\n\n path = (str(dest_sheet), paramset['name'])\n model.projections.set_path(path, proj)\n return model", "def get_visitor(self, obj, use_default=True):\n py_type = type(obj)\n result = self.get(py_type) or self._get_parent_type_visitor(obj, py_type)\n if result:\n return result\n if self.parent_map is not None:\n result = self.parent_map.get_visitor(obj, False)\n if not result and use_default:\n result = self.get(DEFAULT)\n if not result and self.parent_map is not None:\n result = self.parent_map.get(DEFAULT)\n return result", "def projection(self) -> Projection:\n return self._projection", "def project(self,\n projection: typing.Dict[str, int],\n repl_missing=None) -> typing.Iterator:\n cursor = self.cursor()\n\n while True:\n try:\n yield cursor.next().project(projection, repl_missing=repl_missing)\n except StopIteration:\n break", "def subdPlanarProjection(*args, caching: bool=True, nodeState: Union[int, bool]=0,\n constructionHistory: bool=True, createNewMap: bool=True, imageCenter:\n Union[List[float, float], bool]=None, imageCenterX: Union[float,\n bool]=0.5, imageCenterY: Union[float, bool]=0.5, imageScale:\n Union[List[float, float], bool]=None, imageScaleU: Union[float,\n bool]=1.0, imageScaleV: Union[float, bool]=1.0, insertBeforeDeformers:\n bool=True, keepImageRatio: bool=True, mapDirection: AnyStr=\"\", name:\n AnyStr=\"\", projectionCenter: Union[List[float, float, float],\n bool]=None, projectionCenterX: Union[float, bool]=0.0,\n projectionCenterY: Union[float, bool]=0.0, projectionCenterZ:\n Union[float, bool]=0.0, projectionHeight: Union[float, bool]=1.0,\n projectionScale: Union[List[float, float], bool]=None,\n projectionWidth: Union[float, bool]=1.0, rotate: Union[List[float,\n float, float], bool]=None, rotateX: Union[float, bool]=0.0, rotateY:\n Union[float, bool]=0.0, rotateZ: Union[float, bool]=0.0,\n rotationAngle: Union[float, bool]=10.0, smartFit: bool=True,\n worldSpace: bool=True, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def reproject(dataset, epsg):\n dataclass = dataset.__class__.__name__\n # Run appropriate reprojection method\n if dataclass == 'GeoDataFrame':\n repro = geopandas.GeoDataFrame.copy(dataclass)\n repro[repro.geometry.name] = repro.geometry.to_crs(epsg=epsg)\n repro.crs = fiona.crs.from_epsg(epsg)\n elif dataclass == 'Dataset':\n repro = gdal_reproject(dataset, '', epsg=epsg)\n return repro", "def projection(self, point):\n return gs.copy(point)", "def projection(self):\n return self._map_projection", "def projection(self):\n return self._map_projection", "def test_access(geometry):\n geometry.print_list_of_geos()\n geometry.print_list_of_geos_children()\n\n logger.info('TOP GEO:')\n top_geo = geometry.get_top_geo()\n top_geo.print_geo_children()\n\n logger.info('INTERMEDIATE GEO (QUAD):')\n geo = geometry.get_geo('QUAD:V1', 0)\n #geo = geometry.get_top_geo()\n geo.print_geo_children()\n\n t0_sec = time()\n X,Y,Z = geo.get_pixel_coords(do_tilt=True)\n #X,Y = geo.get_2d_pixel_coords()\n s = 'X: %s' % str(X)\n s+= '\\n Consumed time to get 3d pixel coordinates = %7.3f sec' % (time()-t0_sec)\n s+= '\\n Geometry object: %s:%d X.shape:%s' % (geo.oname, geo.oindex, str(X.shape))\n logger.info(s)\n\n logger.info('Test of print_pixel_coords() for quad:')\n geometry.print_pixel_coords('QUAD:V1', 1)\n logger.info('Test of print_pixel_coords() for CSPAD:')\n geometry.print_pixel_coords()\n\n s = 'Test of get_pixel_areas() for QUAD:'\n A = geo.get_pixel_areas()\n s+= '\\n Geometry object: %s:%d A.shape:%s' % (geo.oname, geo.oindex, str(A.shape))\n s+= '\\n A[0,0:5,190:198]:\\n' + str(A[0,0:5,190:198])\n logger.info(s)\n\n s = 'Test of get_pixel_areas() for CSPAD:'\n A = top_geo.get_pixel_areas()\n s+= '\\n Geometry object: %s:%d A.shape:%s' % (geo.oname, geo.oindex, str(A.shape))\n s+= '\\n A[0,0,0:5,190:198]:\\n' + str(A[0,0,0:5,190:198])\n logger.info(s)\n\n s = 'Test of get_size_geo_array()'\n s+= '\\n for QUAD: %d' % geo.get_size_geo_array()\n s+= '\\n for CSPAD: %d' % top_geo.get_size_geo_array()\n logger.info(s)\n\n s = 'Test of get_pixel_scale_size()'\n s+= '\\n for QUAD : %8.2f' % geo.get_pixel_scale_size()\n s+= '\\n for CSPAD : %8.2f' % top_geo.get_pixel_scale_size()\n s+= '\\n for geometry: %8.2f' % geometry.get_pixel_scale_size()\n logger.info(s)\n\n s = 'Test of get_dict_of_comments():'\n d = geometry.get_dict_of_comments()\n s+= '\\n d[0] = %s' % str(d[0])\n logger.info(s)", "def project_tree(T0: nx.DiGraph, T1: nx.DiGraph, trim=True, projection: str = 'both', merge=True) -> nx.DiGraph:\n if projection not in ('inner', 'outer', 'both'):\n raise ValueError(\"projection must be one of inner, outer, or both\")\n\n keep_inner = projection in ('inner', 'both')\n keep_outer = projection in ('outer', 'both')\n\n def merge_helper(merge_onto_graph: nx.DiGraph, merge_node: object, to_merge_subgraph: nx.DiGraph) -> nx.DiGraph:\n # insert a node onto the subgraph which has the same line as the merge graph\n tree_ahead, tree_behind, colinear_segments = split_tree(to_merge_subgraph,\n merge_onto_graph.nodes[merge_node][\"line\"])\n\n # if we're not merging the graphs, clear data from the project onto graph\n if not merge:\n merge_onto_graph.nodes[merge_node]['colinear_segments'] = np.empty((0, 2, 2))\n\n # append the colinear points\n add_segments_to_node(merge_onto_graph, merge_node, colinear_segments)\n\n ahead_child = get_child_ahead(merge_onto_graph, merge_node)\n behind_child = get_child_behind(merge_onto_graph, merge_node)\n\n # descend down the positive branch of the tree\n # recursively call if the merge onto graph has a positive child\n if ahead_child is not None:\n merge_onto_graph = merge_helper(merge_onto_graph, ahead_child, tree_ahead)\n\n # otherwise add the remaining ahead nodes to the merge_onto_graph and connect with an edge\n elif tree_ahead.number_of_nodes() > 0 and keep_outer:\n # rename the nodes\n node_labels = {x: f\"f{merge_node}-{x}\" for x in tree_ahead.nodes}\n nx.relabel.relabel_nodes(tree_ahead, node_labels, copy=False)\n merge_onto_graph = nx.union(merge_onto_graph, tree_ahead)\n\n # add an edge\n tree_ahead_head = get_root(tree_ahead)\n merge_onto_graph.add_edge(merge_node, tree_ahead_head, position=+1)\n\n # descend down the negative branch of the tree\n # recursively call if the merge onto graph has a positive child\n if behind_child is not None:\n merge_onto_graph = merge_helper(merge_onto_graph, behind_child, tree_behind)\n\n # otherwise add the remaining ahead nodes to the merge_onto_graph and connect with an edge\n elif tree_behind.number_of_nodes() > 0 and keep_inner:\n # rename the nodes\n node_labels = {x: f\"b{merge_node}-{x}\" for x in tree_behind.nodes}\n nx.relabel.relabel_nodes(tree_behind, node_labels, copy=False)\n merge_onto_graph = nx.union(merge_onto_graph, tree_behind)\n\n # add an edge\n tree_behind_head = get_root(tree_behind)\n merge_onto_graph.add_edge(merge_node, tree_behind_head, position=-1)\n\n return merge_onto_graph\n\n # rename both graphs so there's not an error when adding nodes\n T1 = T1.copy()\n t1_root = get_root(T1)\n merged_graph = merge_helper(T1, t1_root, T0)\n if trim:\n trim_leaves(merged_graph)\n\n new_labels = {node: n for n, node in enumerate(nx.topological_sort(merged_graph))}\n return nx.relabel_nodes(merged_graph, new_labels)", "def find_project(xp, **kwargs):\n path = '/search/project'\n if 'schema' not in kwargs:\n kwargs['schema'] = ProjectCollection.SCHEMA\n tag_class = {'collection': ProjectCollection, 'project': ROProject}\n return _find(path, xp, tag_class, **kwargs)", "def _projections(self, nvar):\n min_var = self.proje_var.argsort()[:nvar]\n add_coeffs = 1 / self.proje_var[min_var]\n indp_est_proje = np.dot(add_coeffs, self.sep_proje_eval[min_var]) /\\\n np.sum(add_coeffs)\n\n # consider covariance\n coverr = []\n try:\n proje_cov_inv = np.linalg.inv(self.proje_cov[min_var][:, min_var])\n cov_weight = np.sum(proje_cov_inv, axis=0) / np.sum(proje_cov_inv)\n cov_est_proje = np.dot(cov_weight, self.sep_proje_eval[min_var])\n coverr.append(1/np.sum(proje_cov_inv))\n except:\n cov_est_proje = np.ones(self.sep_proje_eval.shape[1])\n cov_est_proje[:] = np.nan\n coverr.append(np.nan)\n return np.array([indp_est_proje, cov_est_proje])", "def projection(self):\n return self.dataset.GetProjection() if self.dataset else None", "def getProjections(self): \n x, y, z = self.XYZCoordinate\n origin = self.SkeletonPoints[0]\n self.coorOrigin = origin\n self.XYProjections = [GeometryToolBox.projected_point(p, origin, x, y) for p in self.SkeletonPoints]\n self.XZProjections = [GeometryToolBox.projected_point(p, origin, x, z) for p in self.SkeletonPoints]", "def _explore(self) -> collections.abc.Iterable:\n # get the workspace folder\n ws = self.pyre_fileserver[\"/workspace\"]\n # starting with it\n root = ws\n # navigate down to my {root} carefully one step at a time because the filesystem\n # may not have been fully explored by the time we get here\n for folder in self.root.parts:\n # if {root} is empty\n if not root.contents:\n # it probably just requires exploring; gently...\n root.discover(levels=1)\n # if the {folder} is already among the contents of the current directory,\n # someone else has visited and explored this level; mark this folder as\n # the place to explore and move on\n root = root[folder]\n\n # if we get his far, my {root} exists; all my sources live here\n # so let's explore this subtree\n root.discover()\n\n # for each asset, there are two interesting projections of its uri:\n # the first one is relative to the project root; we use this to form the asset name\n # because it is guaranteed to be globally unique within a given project; in addition,\n # it is an easily predictable name to use in configuration files\n # the second projection is relative to the root of the library, which gets folded\n # with {/prefix/include} and the library name to locate the installation location of\n # headers, and joined with some special character to form the unique name of the object\n # modules that form an archive\n\n # for the library {root}, these are trivial: the projection relative to the workspace\n # is its own {root}, by definition\n relWS = self.root\n # and the projection relative to the root is empty\n relLib = merlin.primitives.path()\n # use these to convert the library {root} into an asset and decorate it\n # the name must be a string, so coerce the root projection; these operations are trivial\n # for the library root, but they set the pattern for building all of its assets\n top = self.folder(name=str(relWS / relLib), node=root, path=relLib)\n # build the asset recognizer\n classifier = self.languages.classifier\n # now, starting with my root\n todo = [top]\n # dive into the tree\n for folder in todo:\n # grab its contents\n for entry, node in folder.node.contents.items():\n # the projection of the asset relative to the library root is given by folding\n # its name onto the projection of its folder\n path = folder.path / entry\n # and the name of this asset is obtained by folding this onto the library root\n name = str(relWS / path)\n # folders\n if node.isFolder:\n # become directories\n asset = self.folder(name=name, node=node, path=path)\n # and get added to the pile of places to visit\n todo.append(asset)\n # everything else is assumed to be a regular file\n else:\n # so they become file based assets\n asset = self.file(\n name=name, node=node, path=path, classifier=classifier\n )\n # either way, assets are attached to their container\n folder.add(asset=asset)\n # publish the top level folder\n yield top\n # all done\n return", "def resolve_tree(self, path):\n\n try:\n node = self.cache.get_tree(self.userProjects)[path]\n\n if type(node) is gitlab.v4.objects.Group:\n # Groups API does not return a creation time\n return Entity(\n EntityType.GROUP,\n path,\n create_directory_attributes(self.initTime),\n {'group': node}\n )\n elif type(node) is gitlab.v4.objects.User:\n # Users API does not return a creation time\n return Entity(\n EntityType.USER,\n path,\n create_directory_attributes(self.initTime),\n {'user': node}\n )\n elif type(node) is gitlab.v4.objects.Project:\n projectTime = iso8601.parse_date(node.last_activity_at).timestamp()\n\n return Entity(\n EntityType.PROJECT,\n path,\n create_directory_attributes(projectTime),\n {'project': node}\n )\n else:\n return None\n except KeyError:\n return None", "def __init__(self, georecord, domain, GEOS_point=None, place=None):\n\n self.georecord = georecord\n self.domain = domain\n\tself.markers = []\n\n if GeoLevel.objects.filter(parent=georecord.level).count() == 0:\n # display siblings\n mapped_records = GeoRecord.objects.filter(parent=georecord.parent, geom__isnull=False)\n center = georecord.geom.centroid\n has_children = False\n else:\n # display children\n mapped_records = GeoRecord.objects.filter(parent=georecord, geom__isnull=False)\n center = mapped_records.collect().centroid\n has_children = True\n\n\n if mapped_records.count() == 0:\n return blank_map()\n\n polylines = None\n if has_children:\n # add an outline of the parent geo\n polylines = [google.Polyline(\n poly,\n stroke_color=\"#444\",\n stroke_weight=1.5,\n z_index=2\n ) for poly in georecord.geom]\n\n\tif GEOS_point:\n marker=google.Marker(geom=GEOS_point, title=place, clickable='true')\n self.markers.append(marker)\n\n super(GeoSelectMap, self).__init__(\n center=center,\n zoom=self._zoom(),\n markers=self.markers,\n polygons=self._polygons(mapped_records),\n polylines=polylines,\n )", "def extract_organization(self, root):\n organization = {}\n info = root.xpath('.//li/h4/a')\n if info:\n link = info[0].get('href', None)\n name = info[0].get('title', None)\n if link and name:\n stmt = select([\n func.count(self.organization_table.c.path)\n ]).where(\n self.organization_table.c.path == link\n )\n results = self.connection.execute(stmt).fetchall()\n if results[0][0] > 0:\n self.logger.debug('{} already exists'.format(name))\n return None\n self.logger.debug('Querying {1}: {0}'.format(link, name))\n response = self.session.get(self.PODEROPEDIA_BASE_URL + link)\n content = response.content\n html_tree = etree.HTML(content, parser=self.parser)\n connections = html_tree.xpath('//div[@id=\"conexiones\"]')\n if connections:\n organization_data = self.extract_element_data(connections[0])\n organization['organization_data'] = organization_data if organization_data else {}\n organization['organization_data']['path'] = link\n\n person = self.extract_persons(connections[0])\n organization['member'] = person if person else []\n for item in organization['member']:\n item.update({'source_path': link})\n\n related_organization = self.extract_participation(connections[0])\n organization['organization'] = related_organization if related_organization else []\n for item in organization['organization']:\n item.update({'source_path': link})\n return organization", "def __init__(self, projection_point):\n self.projection_point = vector(projection_point)\n self.dim = self.projection_point.degree()\n \n pproj = vector(RDF,self.projection_point)\n self.psize = norm(pproj)\n if (self.psize).is_zero():\n raise ValueError, \"projection direction must be a non-zero vector.\"\n v = vector(RDF, [0.0]*(self.dim-1) + [self.psize]) - pproj\n polediff = matrix(RDF,v).transpose()\n denom = RDF((polediff.transpose()*polediff)[0][0])\n if denom.is_zero():\n self.house = identity_matrix(RDF,self.dim)\n else:\n self.house = identity_matrix(RDF,self.dim) \\\n - 2*polediff*polediff.transpose()/denom # Householder reflector", "def project_drawn(cb, msg):\n stream = cb.streams[0]\n old_data = stream.data\n stream.update(data=msg['data'])\n element = stream.element\n stream.update(data=old_data)\n proj = cb.plot.projection\n if not isinstance(element, _Element) or element.crs == proj:\n return None\n crs = element.crs\n element.crs = proj\n return project(element, projection=crs)", "def _initWithProjection(self, unitsPerPixel=None):\n inProj = self._proj4Proj(NeededInitPrefix + 'epsg:4326')\n # Since we already converted to bytes decoding is safe here\n outProj = self._proj4Proj(self.projection)\n if outProj.crs.is_geographic:\n msg = ('Projection must not be geographic (it needs to use linear '\n 'units, not longitude/latitude).')\n raise TileSourceError(msg)\n if unitsPerPixel:\n self.unitsAcrossLevel0 = float(unitsPerPixel) * self.tileSize\n else:\n self.unitsAcrossLevel0 = ProjUnitsAcrossLevel0.get(self.projection)\n if self.unitsAcrossLevel0 is None:\n # If unitsPerPixel is not specified, the horizontal distance\n # between -180,0 and +180,0 is used. Some projections (such as\n # stereographic) will fail in this case; they must have a\n # unitsPerPixel specified.\n equator = pyproj.Transformer.from_proj(inProj, outProj, always_xy=True).transform(\n [-180, 180], [0, 0])\n self.unitsAcrossLevel0 = abs(equator[0][1] - equator[0][0])\n if not self.unitsAcrossLevel0:\n msg = 'unitsPerPixel must be specified for this projection'\n raise TileSourceError(msg)\n if len(ProjUnitsAcrossLevel0) >= ProjUnitsAcrossLevel0_MaxSize:\n ProjUnitsAcrossLevel0.clear()\n ProjUnitsAcrossLevel0[self.projection] = self.unitsAcrossLevel0\n # This was\n # self.projectionOrigin = pyproj.transform(inProj, outProj, 0, 0)\n # but for consistency, it should probably always be (0, 0). Whatever\n # renders the map would need the same offset as used here.\n self.projectionOrigin = (0, 0)\n # Calculate values for this projection\n self.levels = int(max(int(math.ceil(\n math.log(self.unitsAcrossLevel0 / self.getPixelSizeInMeters() / self.tileWidth) /\n math.log(2))) + 1, 1))\n # Report sizeX and sizeY as the whole world\n self.sizeX = 2 ** (self.levels - 1) * self.tileWidth\n self.sizeY = 2 ** (self.levels - 1) * self.tileHeight", "def _convert_product_projection(\n self, product: models.Product, staged: bool = False\n ) -> typing.Optional[models.ProductProjection]:\n if product.master_data is None:\n return None\n\n if staged:\n data: models.ProductData = product.master_data.staged\n else:\n data: models.ProductData = product.master_data.current\n\n if data is None:\n return None\n\n return models.ProductProjection(\n id=product.id,\n version=product.version,\n key=product.key,\n created_at=product.created_at,\n last_modified_at=product.last_modified_at,\n product_type=product.product_type,\n name=data.name,\n description=data.description,\n slug=data.slug,\n categories=data.categories,\n category_order_hints=data.category_order_hints,\n meta_title=data.meta_title,\n meta_description=data.meta_description,\n meta_keywords=data.meta_keywords,\n search_keywords=data.search_keywords,\n has_staged_changes=product.master_data.has_staged_changes,\n published=product.master_data.published,\n master_variant=data.master_variant,\n variants=data.variants,\n tax_category=product.tax_category,\n state=product.state,\n review_rating_statistics=product.review_rating_statistics,\n )", "def draw(self, projection, view, model, **param):\n # merge named parameters given at initialization with those given here\n param = dict(param, **self.param)\n model = model@self.transform\n for child in self.children:\n child.draw(projection, view, model, **param)", "def findSensibleProjection(geom):\n coords = getCoords(geom)\n y = coords[:, 1]\n x = coords[:, 0]\n yMin = y.min()\n yMax = y.max()\n if (yMax - yMin) > 90:\n # We are crossing a lot of latitude, which suggests that we have a \n # long strip> In this case, we don't even bother to suggest an EPSG. \n epsg = None\n elif yMin < -80:\n # We are nearing the south pole, so go with UPS south\n epsg = 32761\n elif yMax > 80:\n # Nearing north pole, so UPS North\n epsg = 32661\n else:\n # Work out a UTM zone. Note that we use the median value to get a rough \n # idea of the centre, rather than the mean, because the mean is subject to all \n # sorts of problems when crossing the date line\n xMedian = numpy.median(x)\n yMedian = numpy.median(y)\n zone = int((xMedian + 180)/6) % 60 + 1\n if yMedian < 0:\n epsgBase = 32700\n else:\n epsgBase = 32600\n epsg = epsgBase + zone\n return epsg", "def parse_layer(layer, elements, parents):\n for e in layer:\n if isinstance(e, rp.Shape):\n elements['Shapes'].append([e, parents])\n elif isinstance(e, rp.Stroke):\n elements['Strokes'].append([e, parents])\n elif isinstance(e, rp.Layer):\n parents_copy = parents[:]\n parents_copy.insert(0, e)\n elements = parse_layer(e, elements, parents_copy)\n return elements", "def projection_mode(self, mode):\n self.ptr.projection_mode(mode)", "def getProjections(self, schemaName: str = None, name: str = None) -> dict:\n path = \"/config/projections\"\n params = {}\n if name is not None and schemaName is None:\n raise AttributeError(\n \"You must specify a schema name when setting a projection name\"\n )\n if self.loggingEnabled:\n self.logger.debug(f\"Starting getProjections\")\n if schemaName is not None:\n params[\"schemaName\"] = schemaName\n if name is not None:\n params[\"name\"] = name\n res = self.connector.getData(\n self.endpoint + path, params=params, headers=self.headers\n )\n return res", "def get_grid_patch_collection(self, zpts, plotarray, **kwargs):\n if self.mg.grid_type == \"structured\":\n return self.__cls.get_grid_patch_collection(zpts=zpts, plotarray=plotarray,\n **kwargs)\n elif self.mg.grid_type == \"unstructured\":\n raise NotImplementedError()\n\n else:\n return self.__cls.get_grid_patch_collection(projpts=zpts, plotarray=plotarray,\n **kwargs)", "def classify(self, projections):\n res = []\n for proj in projections:\n n = self.root\n while len(n.children):\n found = False\n for i, b in enumerate(n.branch):\n if proj[n.axis] <= b:\n n = n.children[i]\n found = True\n if not found:\n if len(n.branch)+1==len(n.children):\n n = n.children[-1]\n else:\n break\n res.append(n.probabilities)\n return np.array(res)", "def get_projection(attrs):\n df = load_df()\n\n X = get_all_vectors(df, attrs)\n logger.info('- Data shape original: {}'.format(X.shape))\n\n X = X if isinstance(X, np.ndarray) else X.toarray()\n X = dimension_reduction(X, attrs['decomposition'], attrs['distanceMetric'])\n return X, df", "def setCrsIsProjection(self):\n self.isgeographic = False", "def build_layers(node: md.Document, *, disambiguate_names: bool = True) -> list[dict]:\n layers = []\n names = []\n for i, folder in enumerate(get(node, \"Folder\")):\n name = val(get1(folder, \"name\"))\n geojson = build_feature_collection(folder, name)\n if geojson[\"features\"]:\n layers.append(geojson)\n names.append(name)\n\n if not layers:\n # No folders, so use the root node\n name = val(get1(node, \"name\"))\n geojson = build_feature_collection(node, name)\n if geojson[\"features\"]:\n layers.append(geojson)\n names.append(name)\n\n if disambiguate_names:\n new_names = disambiguate(names)\n new_layers = []\n for i, layer in enumerate(layers):\n layer[\"name\"] = new_names[i]\n new_layers.append(layer)\n layers = new_layers\n\n return layers", "def extractProlateEllipse(kperp,kpar,aniso):\n\n if aniso > 1.:\n #print(\"Swapping axis for prolate ellipse\")\n aniso = 1. / aniso\n\n # Define the eccentricity of the ellipse\n e = np.sqrt( 1. - aniso**2 )\n\n\n # the prolate surface area\n surface = 2. * np.pi * kpar**2. * ( 1. + (1. / ( e * np.sqrt( 1. - e**2. ) ) ) * np.arcsin(e) )\n\n return surface", "def _get_collection(surface_type, surfaces, opacity, facecolor, edgecolors=\"black\"):\n if surface_type == \"shading\":\n coords = [getcoords(s) for s in surfaces if not hasattr(s, \"Surface_Type\")]\n else:\n coords = [\n getcoords(s)\n for s in surfaces\n if hasattr(s, \"Surface_Type\")\n and s.Surface_Type.lower() == surface_type.lower()\n ]\n trimmed_coords = [c for c in coords if c] # dump any empty surfaces\n collection = Poly3DCollection(\n trimmed_coords, alpha=opacity, facecolor=facecolor, edgecolors=edgecolors\n )\n return collection", "def get_organization(self):\n pos_or_org = self.position.to_object\n if pos_or_org is None:\n return None\n elif pos_or_org.portal_type == 'position':\n return pos_or_org.get_organization()\n elif pos_or_org.portal_type == 'organization':\n return pos_or_org", "def _extract_grid_graph_obj(obj_dict, reference_obj, grid, path):\n\n from chart_studio.grid_objs import Column\n\n for prop in list(obj_dict.keys()):\n propsrc = \"{}src\".format(prop)\n if propsrc in reference_obj:\n val = obj_dict[prop]\n if is_array(val):\n column = Column(val, path + prop)\n grid.append(column)\n obj_dict[propsrc] = \"TBD\"\n del obj_dict[prop]\n\n elif prop in reference_obj:\n prop_validator = reference_obj._validators[prop]\n if isinstance(prop_validator, CompoundValidator):\n # Recurse on compound child\n _extract_grid_graph_obj(\n obj_dict[prop],\n reference_obj[prop],\n grid,\n \"{path}{prop}.\".format(path=path, prop=prop),\n )\n\n # Chart studio doesn't handle links to columns inside object\n # arrays, so we don't extract them for now. Logic below works\n # and should be reinstated if chart studio gets this capability\n #\n # elif isinstance(prop_validator, CompoundArrayValidator):\n # # Recurse on elements of object arary\n # reference_element = prop_validator.validate_coerce([{}])[0]\n # for i, element_dict in enumerate(obj_dict[prop]):\n # _extract_grid_graph_obj(\n # element_dict,\n # reference_element,\n # grid,\n # '{path}{prop}.{i}.'.format(path=path, prop=prop, i=i)\n # )", "def __convert(args):\n a, b, zone, ellipsoid, datum, inverse = args\n projection = Proj(\"+proj=utm +zone={}, +ellps={} +datum={} +units=m +no_defs\".format(zone, ellipsoid, datum))\n c, d = projection(a, b, inverse=inverse)\n\n return c, d", "def proj(self, X, G):\n raise NotImplementedError", "def render_2d(projection, **kwds):\n if isinstance(projection, Polyhedron): projection = Projection(projection)\n return \\\n projection.render_points_2d(zorder=2, pointsize=10, **kwds) + \\\n projection.render_outline_2d(zorder=1, **kwds) + \\\n projection.render_fill_2d(zorder=0, rgbcolor=(0,1,0), **kwds)", "def get_polygons(annotation):\n print(f\"Loadding: {annotation}\")\n tree = ET.parse(annotation)\n root = tree.getroot()\n polygons = {}\n for obj in root.findall('object'):\n name = obj.find('name').text\n id_ = obj.find('id').text\n polygon = []\n for pt in obj.find('polygon').findall('pt'):\n polygon.append([pt.find('x').text, pt.find('y').text])\n if name in polygons:\n x_ref= int(polygons[name]['left'][0][0])\n x = int(polygon[0][0])\n if x > x_ref:\n polygons[name]['right'] = polygons[name]['left']\n id_ = 'left'\n else:\n id_ = 'right'\n else:\n polygons[name] = {}\n id_ = 'left'\n polygons[name][id_] = polygon\n for i in list(polygons.keys()):\n if not('right' in polygons[i]):\n print(i,' only has one polygon: ',polygons[i]['left'])\n y = input('Do you wish to label it as \\'right\\'? (leave empy if No): ')\n if (y):\n polygons[i]['right'] = polygons[i]['left']\n polygons[i].pop('left')\n return polygons", "def _build_reprojection_graph(self):\n EPS = 1e-8\n depths = self.depths_placeholder[tf.newaxis]\n poses = self.poses_placeholder[tf.newaxis]\n intrinsics = self.intrinsics_placeholder[tf.newaxis]\n\n batch, num, ht, wd = tf.unstack(tf.shape(depths), num=4)\n Ts = VideoSE3Transformation(matrix=poses)\n intrinsics = intrinsics_vec_to_matrix(intrinsics)\n\n ii, jj = tf.meshgrid(tf.range(0, num), tf.range(num, num+1))\n ii = tf.reshape(ii, [-1])\n jj = tf.reshape(jj, [-1])\n\n Tij = Ts.gather(jj) * Ts.gather(ii).inv()\n X0 = projective_ops.backproject(depths, intrinsics)\n X1 = Tij(X0)\n\n coords = projective_ops.project(X1, intrinsics)\n depths = X1[..., 2]\n\n indicies = tf.cast(coords[..., ::-1] + .5, tf.int32)\n indicies = tf.reshape(indicies, [-1, 2])\n depths = tf.reshape(depths, [-1])\n\n depth = tf.scatter_nd(indicies, depths, [ht, wd])\n count = tf.scatter_nd(indicies, tf.ones_like(depths), [ht, wd])\n\n depth = depth / (count + EPS)\n self.outputs['depth_reprojection'] = depth", "def test_grdimage_central_meridians_and_standard_parallels(grid, proj_type, lon0, lat0):\n fig_ref, fig_test = Figure(), Figure()\n fig_ref.grdimage(\n \"@earth_relief_01d_g\", projection=f\"{proj_type}{lon0}/{lat0}/15c\", cmap=\"geo\"\n )\n fig_test.grdimage(grid, projection=f\"{proj_type}{lon0}/{lat0}/15c\", cmap=\"geo\")\n return fig_ref, fig_test", "def get_polygons(self, by_spec=False, depth=None):\n if depth is not None and depth < 0:\n if not (by_spec is False or by_spec is True):\n return []\n bb = self.get_bounding_box()\n if bb is None:\n return {} if by_spec else []\n pts = [\n numpy.array(\n [\n (bb[0, 0], bb[0, 1]),\n (bb[0, 0], bb[1, 1]),\n (bb[1, 0], bb[1, 1]),\n (bb[1, 0], bb[0, 1]),\n ]\n )\n ]\n polygons = {self.name: pts} if by_spec else pts\n else:\n if by_spec is True:\n polygons = {}\n for polyset in self.polygons:\n for ii in range(len(polyset.polygons)):\n key = (polyset.layers[ii], polyset.datatypes[ii])\n if key in polygons:\n polygons[key].append(numpy.array(polyset.polygons[ii]))\n else:\n polygons[key] = [numpy.array(polyset.polygons[ii])]\n for path in self.paths:\n path_polygons = path.get_polygons(True)\n for kk in path_polygons.keys():\n if kk in polygons:\n polygons[kk].extend(path_polygons[kk])\n else:\n polygons[kk] = path_polygons[kk]\n for reference in self.references:\n if depth is None:\n next_depth = None\n else:\n next_depth = depth - 1\n cell_polygons = reference.get_polygons(True, next_depth)\n for kk in cell_polygons.keys():\n if kk in polygons:\n polygons[kk].extend(cell_polygons[kk])\n else:\n polygons[kk] = cell_polygons[kk]\n elif by_spec is False:\n polygons = []\n for polyset in self.polygons:\n for points in polyset.polygons:\n polygons.append(numpy.array(points))\n for path in self.paths:\n polygons.extend(path.get_polygons())\n for reference in self.references:\n if depth is None:\n next_depth = None\n else:\n next_depth = depth - 1\n polygons.extend(reference.get_polygons(depth=next_depth))\n else:\n polygons = []\n layer, datatype = by_spec\n polygons.extend(\n numpy.array(polyset.polygons[ii])\n for polyset in self.polygons\n for ii in range(len(polyset.polygons))\n if polyset.layers[ii] == layer and polyset.datatypes[ii] == datatype\n )\n\n for path in self.paths:\n if any(ld == by_spec for ld in zip(path.layers, path.datatypes)):\n path_polygons = path.get_polygons(True)\n if by_spec in path_polygons:\n polygons.extend(path_polygons[by_spec])\n for reference in self.references:\n if depth is None:\n next_depth = None\n else:\n next_depth = depth - 1\n polygons.extend(reference.get_polygons(by_spec, next_depth))\n return polygons", "def plot_geometry ( ogr_geom_in, exterior_color, interior_color ) :\n if ogr_geom_in.GetGeometryName() == 'MULTIPOINT' or ogr_geom_in.GetGeometryName() == 'MULTILINESTRING' or ogr_geom_in.GetGeometryName() == 'MULTIPOLYGON' :\n for i in range(ogr_geom_in.GetGeometryCount()):\n plot_geometry ( ogr_geom_in.GetGeometryRef( i ), exterior_color, interior_color )\n if ogr_geom_in.GetGeometryName() == 'POINT' :\n x = []\n y = []\n x.append(ogr_geom_in.GetX())\n y.append(ogr_geom_in.GetY())\n pylab.plot(x,y,'o',color='y')\n if ogr_geom_in.GetGeometryName() == 'LINESTRING' :\n x = []\n y = []\n for i in range(ogr_geom_in.GetPointCount()) :\n x.append(ogr_geom_in.GetX(i))\n y.append(ogr_geom_in.GetY(i))\n pylab.plot(x,y,'-',color='g')\n if ogr_geom_in.GetGeometryName() == 'POLYGON' :\n polygon = ogr_geom_in\n ring_index = 0\n for nr_ring in range ( polygon.GetGeometryCount() ):\n ring = polygon.GetGeometryRef( nr_ring )\n x =[ring.GetX(i) for i in range(ring.GetPointCount()) ]\n y =[ring.GetY(i) for i in range(ring.GetPointCount()) ]\n if ring_index == 0 :\n pylab.plot(x,y,'-',color=str(exterior_color), linewidth=2.0, hold=True)\n else :\n pylab.plot(x,y,'-',color=str(interior_color), linewidth=2.0, hold=True)\n ring_index = ring_index + 1", "def _get_geoms(self, root, _parent=None):\n # Initialize return array\n geom_pairs = []\n # If the parent exists and this is a geom element, we add this current (parent, element) combo to the output\n if _parent is not None and root.tag == \"geom\":\n geom_pairs.append((_parent, root))\n # Loop through all children elements recursively and add to pairs\n for child in root:\n geom_pairs += self._get_geoms(child, _parent=root)\n # Return all found pairs\n return geom_pairs", "def _91_render_plant_data_to_projects(records, **params):\n for plant_record in [record for record in records if record[\"Type\"] == \"Plant\"]:\n key = (plant_record[\"Power Plant Name\"], plant_record[\"Project Name\"])\n project_records = [\n record\n for record in records\n if record[\"Type\"] == \"Project\"\n and record[\"Power Plant Name\"] == plant_record[\"Power Plant Name\"]\n ]\n if len(project_records) == 0:\n log.error(\"NO PROJECT RECORDS FOR %s\" % (key,))\n for project_record in project_records:\n for field in plant_record.keys():\n if project_record.get(field) in [None, '']:\n project_record[field] = plant_record[field]\n return records", "def wireframe(self, projection_type, canvas_dimensions):\n # Configure viewportself.screen_dimensions = {\n self.screen_dimensions = {\n \"width\": canvas_dimensions['width'],\n \"height\": canvas_dimensions['height']\n }\n\n self.projection.viewport = self.screen_dimensions\n self.projection.projection_type = projection_type\n self.projection.camera = self.cameras[0]\n self.projection.region_width = self.screen_dimensions.get('width')\n self.projection.region_height = self.screen_dimensions.get('height')\n\n # Draw polygons for each object\n projected_objects = []\n for obj in self.objects:\n print('Rendering: ', obj)\n\n world_transformation = obj.translate(\n obj.rotate(obj.scale(obj.vertices))\n )\n camera_transformation = obj.rotate(\n obj.translate(world_transformation, np.array(\n [\n -self.projection.camera.translation[0],\n -self.projection.camera.translation[1],\n -self.projection.camera.translation[2]\n ]\n )), np.array(\n [\n -self.projection.camera.rotation[0],\n -self.projection.camera.rotation[1],\n -self.projection.camera.rotation[2]\n ]\n \n )\n )\n projected_view = self.projection.project_all(camera_transformation)\n normalized_view = obj.normalize(\n projected_view, self.projection.viewport\n )\n projected_faces = []\n for face in obj.faces:\n poly = []\n for vertex_index in face:\n poly.append(\n [\n int(normalized_view[vertex_index][0]),\n int(normalized_view[vertex_index][1]),\n int(camera_transformation[vertex_index][2])\n ]\n )\n projected_faces.append(poly)\n center = list(obj.calculate_center(normalized_view))\n vertices = [ [int(p[0]), int(p[1]), int(p[2])] for p in normalized_view]\n # print('calculated_center: ', center)\n # print(''vertices)\n projected_objects.append({\n 'vertices': vertices,\n 'faces': obj.clip(self.projection.camera.translation, projected_faces),\n 'center': [ int(coord) for coord in obj.calculate_center(normalized_view) ],\n })\n print(projected_objects[0]['faces'][:20])\n return projected_objects", "def get_projector(self, key_string):\n if key_string in self._run_analysis_model.projectors:\n return self._run_analysis_model.projectors[key_string][0]\n return None", "def get_context_data(self,**kwargs):\n context = super().get_context_data(**kwargs)\n # reproject map markers\n for issue in context['object_list']:\n positionWidget = issue.position\n positionWidget.transform(settings.EPSG_WIDGET)\n issue.position_webmap = positionWidget.geojson\n return context", "def ComputeDPPrincipalProjection(data, projection_dims, orders, sigma):\n\n # Normalize each row.\n normalized_data = normalize(data, norm='l2', axis=1)\n covar = np.matmul(np.transpose(normalized_data), normalized_data)\n\n # Since the data is already normalized, there is no need to clip\n # the covariance matrix.\n\n gaussian_noise, rdp_budget = gaussian_rdp(covar.reshape([1,-1]), 1.0, orders, sigma)\n\n saned_covar = covar + gaussian_noise.reshape(covar.shape)\n\n # Symmetrize saned_covar. This also reduces the noise variance.\n saned_covar = 0.5 * (saned_covar + np.transpose(saned_covar))\n\n # Compute the eigen decomposition of the covariance matrix, and\n # return the top projection_dims eigen vectors, represented as columns of\n # the projection matrix.\n eigvals, eigvecs = np.linalg.eig(saned_covar)\n\n topk_indices = eigvals.argsort()[::-1][:projection_dims] \n topk_indices = np.reshape(topk_indices, [projection_dims])\n\n # Gather and return the corresponding eigenvectors.\n return np.transpose(np.take(np.transpose(eigvecs), topk_indices, axis=0)), rdp_budget", "def gluProject( baseFunction, objX, objY, objZ, model=None, proj=None, view=None ):\n if model is None:\n model = GL.glGetDoublev( GL.GL_MODELVIEW_MATRIX )\n if proj is None:\n proj = GL.glGetDoublev( GL.GL_PROJECTION_MATRIX )\n if view is None:\n view = GL.glGetIntegerv( GL.GL_VIEWPORT )\n winX = _simple.GLdouble( 0.0 )\n winY = _simple.GLdouble( 0.0 )\n winZ = _simple.GLdouble( 0.0 )\n result = baseFunction( \n objX,objY,objZ,\n model,proj,view,\n winX,winY,winZ,\n )\n # On Ubuntu 9.10 we see a None come out of baseFunction,\n # despite it having a return-type specified of GLint!\n if result is not None and result != _simple.GLU_TRUE:\n raise ValueError( \"\"\"Projection failed!\"\"\" )\n return winX.value, winY.value, winZ.value", "def any_geom2ogr_geom(geom, osr_sref):\n\n if isinstance(geom, (tuple, list)) and (not isinstance(geom[0], (tuple, list))) and \\\n (len(geom) == 4) and osr_sref:\n geom_ogr = geometry.bbox2polygon(geom, osr_sref)\n geom_ogr = swap_axis(geom_ogr) # ensure lon lat order\n elif isinstance(geom, (tuple, list)) and (isinstance(geom[0], (tuple, list))) and \\\n (len(geom) == 2) and osr_sref:\n edge = ogr.Geometry(ogr.wkbLinearRing)\n geom = [geom[0], (geom[0][0], geom[1][1]), geom[1], (geom[1][0], geom[0][1])]\n for point in geom:\n if len(point) == 2:\n edge.AddPoint(float(point[0]), float(point[1]))\n edge.CloseRings()\n geom_ogr = ogr.Geometry(ogr.wkbPolygon)\n geom_ogr.AddGeometry(edge)\n geom_ogr.AssignSpatialReference(osr_sref)\n geom_ogr = force_axis_mapping(geom_ogr)\n elif isinstance(geom, (tuple, list)) and isinstance(geom[0], (tuple, list)) and osr_sref:\n edge = ogr.Geometry(ogr.wkbLinearRing)\n for point in geom:\n if len(point) == 2:\n edge.AddPoint(float(point[0]), float(point[1]))\n edge.CloseRings()\n geom_ogr = ogr.Geometry(ogr.wkbPolygon)\n geom_ogr.AddGeometry(edge)\n geom_ogr.AssignSpatialReference(osr_sref)\n geom_ogr = force_axis_mapping(geom_ogr)\n elif isinstance(geom, shapely.geometry.Polygon):\n geom_ogr = ogr.CreateGeometryFromWkt(geom.wkt)\n geom_ogr.AssignSpatialReference(osr_sref)\n geom_ogr = swap_axis(geom_ogr) # ensure lon lat order\n elif isinstance(geom, ogr.Geometry):\n geom_sref = geom.GetSpatialReference()\n if geom_sref is None:\n geom.AssignSpatialReference(osr_sref)\n geom_ogr = geom\n geom_ogr = swap_axis(geom_ogr) # ensure lon lat order\n else:\n raise GeometryUnkown(geom)\n\n return geom_ogr", "def getCloseProjection(angleRot, angleTilt, projectionList):\n for projection in projectionList:\n if (abs(projection[0] - angleRot) <= 0.01 and\n abs(projection[1] - angleTilt) <= 0.01):\n return projection\n return None", "def evaluate_projections(self):\n # TODO turn this to batch computation\n\n if self.evaluated_projections:\n return\n\n print('Projecting entities in relations spaces.')\n\n for i in tqdm(range(self.number_entities)):\n ent_proj_vect = self.ent_proj_vects.data[i].view(1, -1)\n rel_proj_vects = self.rel_proj_vects.data.view(self.number_relations,\n self.rel_emb_dim, 1)\n\n projection_matrices = matmul(rel_proj_vects, ent_proj_vect)\n\n if projection_matrices.is_cuda:\n id_mat = eye(n=self.rel_emb_dim, m=self.ent_emb_dim, device='cuda')\n else:\n id_mat = eye(n=self.rel_emb_dim, m=self.ent_emb_dim)\n\n id_mat = id_mat.view(1, self.rel_emb_dim, self.ent_emb_dim)\n\n projection_matrices += id_mat.expand(self.number_relations, self.rel_emb_dim, self.ent_emb_dim)\n\n empty_cache()\n\n mask = tensor([i]).long()\n\n if self.entity_embeddings.weight.is_cuda:\n assert self.projected_entities.is_cuda\n empty_cache()\n mask = mask.cuda()\n\n entity = self.entity_embeddings(mask.cuda())\n projected_entity = matmul(projection_matrices, entity.view(-1)).detach()\n projected_entity = projected_entity.view(self.number_relations, self.rel_emb_dim, 1)\n self.projected_entities[:, :, i] = projected_entity.view(self.number_relations,\n self.rel_emb_dim)\n\n del projected_entity\n\n self.evaluated_projections = True", "async def test_entity_attribute_projection(self):\n test_name = 'test_entity_attribute_projection'\n entity_name = 'TestEntityAttributeProjection'\n\n corpus = TestHelper.get_local_corpus(self.tests_subpath, test_name)\n expected_output_path = TestHelper.get_expected_output_folder_path(self.tests_subpath, test_name)\n manifest = await corpus.fetch_object_async('local:/default.manifest.cdm.json')\n\n ent_test_entity_attribute_projection = await corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_name, entity_name), manifest)\n self.assertIsNotNone(ent_test_entity_attribute_projection)\n resolved_test_entity_attribute_projection = await ProjectionTestUtils.get_resolved_entity(corpus, ent_test_entity_attribute_projection, [])\n self.assertIsNotNone(resolved_test_entity_attribute_projection)\n await AttributeContextUtil.validate_attribute_context(self, corpus, expected_output_path, entity_name, resolved_test_entity_attribute_projection)", "def global_and_stereo_map(lat, lon, fld,\n plot_type='pcolormesh',\n cmap='YlOrRd',\n title=None,\n cmin=None,\n cmax=None,\n dpi=100,\n show_colorbar=True):\n\n # to do\n # -figsize option?\n # -cmin/cmax defaults handling with plot_proj ... \n # -colorbar defaults with diverging/sequential\n # -number of colors in plot\n # -suppress dask warnings\n # -get the subplot size \"just right\" no matter the figsize\n # -arrows for when colorbar is exceeded\n\n # handle colorbar limits\n cmin, cmax, extend_cbar = set_colorbar_limits(fld,cmin,cmax)\n\n # default figsize which seems to work for a laptop screen\n plt.figure(figsize=(12,6),dpi=dpi)\n\n # the big top global plot\n fig, ax1, p1, cb1 = ecco.plot_proj_to_latlon_grid(\n lat,lon,fld,\n cmap=cmap,\n plot_type=plot_type,\n subplot_grid=[2,1,1],\n projection_type='robin',\n show_colorbar=False,\n cmin=cmin,\n cmax=cmax,\n user_lon_0=0\n )\n\n # Arctic: bottom left\n fig, ax2, p2, cb2 = ecco.plot_proj_to_latlon_grid(\n lat,lon,fld,\n cmap=cmap,\n plot_type=plot_type,\n subplot_grid=[2,2,3],\n projection_type='stereo',\n show_colorbar=False,\n cmin=cmin,\n cmax=cmax,\n lat_lim=50,\n user_lon_0=0\n )\n\n\n # ACC: bottom right\n fig, ax3, p3, cb3 = ecco.plot_proj_to_latlon_grid(\n lat,lon,fld,\n cmap=cmap,\n plot_type=plot_type,\n subplot_grid=[2,2,4],\n projection_type='stereo',\n show_colorbar=False,\n cmin=cmin,\n cmax=cmax,\n lat_lim=-40,\n user_lon_0=180\n )\n\n # Set land color to gray\n ax1.add_feature(cart.feature.LAND,facecolor='0.7',zorder=2)\n ax2.add_feature(cart.feature.LAND,facecolor='0.7',zorder=2)\n ax3.add_feature(cart.feature.LAND,facecolor='0.7',zorder=2)\n\n # Make a single title\n if title is not None:\n fig.suptitle(title,verticalalignment='top',fontsize=24)\n\n # Make an overyling colorbar\n if show_colorbar:\n fig.subplots_adjust(right=0.9)\n cbar_ax = fig.add_axes([0.87, 0.1, 0.025, 0.8])\n fig.colorbar(p3, cax=cbar_ax, extend=extend_cbar)\n\n\n\n return fig, (ax1,ax2,ax3)", "def graph_obj(self):\r\n vizEngine = get_vizEngine().lower().strip()\r\n obj = None\r\n if self.levels > 0 and vizEngine=='bokeh':\r\n warnings.warn('Please switch the vizEngine to \"plotly\" to create contour plots.', UserWarning)\r\n if self.surface3D and vizEngine!='plotly':\r\n warnings.warn('Please switch the vizEngine to \"plotly\" to create 3D surface plots.', UserWarning)\r\n\r\n if vizEngine == 'bokeh':\r\n obj = MapBokeh(self.data, self.variable, self.levels, self.surface3D)\r\n elif vizEngine == 'plotly':\r\n obj = MapPlotly(self.data, self.variable, self.levels, self.surface3D)\r\n return obj", "def build_geometry(node: md.Document) -> dict:\n geoms = []\n times = []\n if get1(node, \"MultiGeometry\"):\n return build_geometry(get1(node, \"MultiGeometry\"))\n if get1(node, \"MultiTrack\"):\n return build_geometry(get1(node, \"MultiTrack\"))\n if get1(node, \"gx:MultiTrack\"):\n return build_geometry(get1(node, \"gx:MultiTrack\"))\n for geotype in GEOTYPES:\n geonodes = get(node, geotype)\n if not geonodes:\n continue\n for geonode in geonodes:\n if geotype == \"Point\":\n geoms.append(\n {\n \"type\": \"Point\",\n \"coordinates\": coords1(val(get1(geonode, \"coordinates\"))),\n }\n )\n elif geotype == \"LineString\":\n geoms.append(\n {\n \"type\": \"LineString\",\n \"coordinates\": coords(val(get1(geonode, \"coordinates\"))),\n }\n )\n elif geotype == \"Polygon\":\n rings = get(geonode, \"LinearRing\")\n coordinates = [coords(val(get1(ring, \"coordinates\"))) for ring in rings]\n geoms.append(\n {\n \"type\": \"Polygon\",\n \"coordinates\": coordinates,\n }\n )\n elif geotype in [\"Track\", \"gx:Track\"]:\n track = gx_coords(geonode)\n geoms.append(\n {\n \"type\": \"LineString\",\n \"coordinates\": track[\"coordinates\"],\n }\n )\n if track[\"times\"]:\n times.append(track[\"times\"])\n\n return {\"geoms\": geoms, \"times\": times}", "def test_query_projection(self):\n class Q(ndb.Model):\n a = ndb.StringProperty()\n b = ndb.IntegerProperty(repeated=True)\n\n q1 = Q(a='bar1', b=[1])\n k = q1.put()\n try:\n l = Q.query().get(projection=['b'], use_cache=False, use_memcache=False)\n self.assertEqual(l._to_dict(), {'b':[1]})\n finally:\n del Q\n k.delete()\n class Inner(ndb.Model):\n i = ndb.IntegerProperty()\n j = ndb.StringProperty(repeated=True)\n l = ndb.IntegerProperty(repeated=True)\n class Q(ndb.Model):\n s = ndb.StructuredProperty(Inner)\n a = ndb.StringProperty()\n q1 = Q(s=Inner(i=1, j=['a', 'b', 'foo'], l=[7,8]), a='blew')\n k1 = q1.put()\n try:\n # test simple get on structured property\n l = Q.query().get(projection=['s.i'], use_cache=False, use_memcache=False)\n self.assertEqual(l._to_dict(), {'s': {'i':1}})\n # test fetch on unrepeated property\n l = Q.query().fetch(projection=['s.i'], use_cache=False, use_memcache=False)\n self.assertTrue(len(l) == 1)\n self.assertEqual(l[0]._to_dict(), {'s': {'i':1}})\n # test fetch: one repeated property in structured property\n l = Q.query().fetch(projection=['s.j'], use_cache=False, use_memcache=False)\n self.assertTrue(len(l) == 3)\n self.assertEqual(l[0]._to_dict(), {'s': {'j':['a']}})\n self.assertEqual(l[1]._to_dict(), {'s': {'j':['b']}})\n self.assertEqual(l[2]._to_dict(), {'s': {'j':['foo']}})\n # test fetch: more projected properties, one repeated\n l = Q.query().fetch(projection=['s.j', 'a'], use_cache=False, use_memcache=False)\n self.assertTrue(len(l) == 3)\n self.assertEqual(l[0]._to_dict(), {'s': {'j':['a']}, 'a':'blew'})\n self.assertEqual(l[1]._to_dict(), {'s': {'j':['b']}, 'a':'blew'})\n self.assertEqual(l[2]._to_dict(), {'s': {'j':['foo']}, 'a': 'blew'})\n finally:\n k1.delete()", "def check_crs(crs, raise_on_error=False):\n\n try:\n crs = crs.salem.grid # try xarray\n except:\n pass\n\n if isinstance(crs, string_types):\n # necessary for python 2\n crs = str(crs)\n\n err1, err2 = None, None\n\n if isinstance(crs, pyproj.Proj) or isinstance(crs, Grid):\n out = crs\n elif isinstance(crs, crs_type):\n out = pyproj.Proj(crs.to_wkt(), preserve_units=True)\n elif isinstance(crs, dict) or isinstance(crs, string_types):\n if isinstance(crs, string_types):\n # quick fix for https://github.com/pyproj4/pyproj/issues/345\n crs = crs.replace(' ', '').replace('+', ' +')\n\n # A series of try-catch to handle the (too) many changes in pyproj\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', category=DeprecationWarning)\n warnings.filterwarnings('ignore', category=FutureWarning)\n try:\n out = pyproj.Proj(crs, preserve_units=True)\n except RuntimeError as e:\n err1 = str(e)\n try:\n out = pyproj.Proj(init=crs, preserve_units=True)\n except RuntimeError as e:\n err2 = str(e)\n out = None\n else:\n out = None\n\n if raise_on_error and out is None:\n msg = ('salem could not properly parse the provided coordinate '\n 'reference system (crs). This could be due to errors in your '\n 'data, in PyProj, or with salem itself. If this occurs '\n 'unexpectedly, report an issue to https://github.com/fmaussion/'\n 'salem/issues. Full log: \\n'\n 'crs: {} ; \\n'.format(crs))\n if err1 is not None:\n msg += 'Output of `pyproj.Proj(crs, preserve_units=True)`: {} ; \\n'\n msg = msg.format(err1)\n if err2 is not None:\n msg += 'Output of `pyproj.Proj(init=crs, preserve_units=True)`: {}'\n msg = msg.format(err2)\n raise ValueError(msg)\n\n return out", "def order_projections(model, connection_order):\n connection_list = [el if isinstance(el, tuple) else (el, None)\n for el in connection_order]\n\n for spec in model.projections:\n matches = [(i, el) for i, el in enumerate(connection_list)\n if el[0] == spec.matchname]\n if len(matches) == 0:\n raise Exception(\"Could not order projection %r\" % spec)\n elif len(matches) == 1:\n (i, (k, v)) = matches[0]\n spec.sort_precedence = i\n continue\n\n property_keys = [pdict.keys() for (_, (_, pdict)) in matches]\n if not all(len(pkeys)==1 for pkeys in property_keys):\n raise Exception(\"Please specify only a single property to sort on\")\n if not all(pkey[0]==property_keys[0][0] for pkey in property_keys):\n raise Exception(\"Please specify only a single property to sort on\")\n\n key = property_keys[0][0]\n spec_property_value = spec.src.properties[key]\n match = [ind for (ind, (_, pdict)) in matches if pdict[key] == spec_property_value]\n if len(match) != 1:\n raise Exception(\"Could not order projection %r by property %r\" % (spec, key))\n spec.sort_precedence = match[0]", "def loadViewerModel(self, rootNodeName=\"pinocchio\", color = None):\n\n # Set viewer to use to gepetto-gui.\n self.viewerRootNodeName = rootNodeName\n\n # Load robot meshes in MeshCat\n\n # Collisions\n # self.viewerCollisionGroupName = self.viewerRootNodeName + \"/\" + \"collisions\"\n self.viewerCollisionGroupName = None # TODO: collision meshes\n\n # Visuals\n self.viewerVisualGroupName = self.viewerRootNodeName + \"/\" + \"visuals\"\n\n for visual in self.visual_model.geometryObjects:\n self.loadViewerGeometryObject(visual,pin.GeometryType.VISUAL,color)", "def shape_element(element):\n node = {}\n if element.tag == \"node\" or element.tag == \"way\" :\n lat = str(element.get(\"lat\"))\n lon = str(element.get(\"lon\"))\n try:\n node[\"pos\"] = [float(lat),float(lon)]\n except:\n pass\n created = {}\n created[\"changeset\"] = element.get(\"changeset\")\n created[\"user\"] = element.get(\"user\")\n created[\"version\"] = element.get(\"version\")\n created[\"uid\"] = element.get(\"uid\")\n created[\"timestamp\"] = element.get(\"timestamp\")\n node[\"created\"] = created\n node[\"visible\"] = element.get(\"visible\")\n node[\"type\"] = element.tag\n node[\"id\"] = element.get(\"id\")\n \n ##Parse address elements\n address = {}\n for subelement in element.iter(\"tag\"):\n k_element = subelement.get(\"k\")\n v_element = subelement.get(\"v\")\n if not problemchars.match(k_element):\n if k_element.startswith(\"addr:\"):\n if is_street_name(subelement):\n v_element = update_name(v_element,mapping)\n k_elements = k_element.split(\":\")\n if(len(k_elements) < 3):\n address[k_elements[1]] = v_element\n else:\n node[k_element] = v_element\n if(bool(address)):\n node[\"address\"] = address\n \n if element.tag == \"way\":\n node_refs = []\n for subelement in element.iter(\"nd\"):\n node_refs.append(subelement.get(\"ref\"))\n node[\"node_refs\"] = node_refs\n \n return node\n else:\n return None", "def esp(CC, DD, bb, centered=False, abs_tol=1e-10, verbose=0):\n if 'glpk' not in solvers.installed_solvers:\n raise Exception(\n \"projection_esp error:\"\n \" Equality set projection requires `cvxopt.glpk` to run.\")\n # Remove zero columns and rows\n nonzerorows = np.nonzero(\n np.sum(np.abs(np.hstack([CC, DD])), axis=1) > abs_tol)[0]\n nonzeroxcols = np.nonzero(np.sum(np.abs(CC), axis=0) > abs_tol)[0]\n nonzeroycols = np.nonzero(np.sum(np.abs(DD), axis=0) > abs_tol)[0]\n C = CC[nonzerorows, :].copy()\n D = DD[nonzerorows, :].copy()\n C = C[:, nonzeroxcols]\n D = D[:, nonzeroycols]\n b = bb[nonzerorows].copy()\n # Make sure origo is inside polytope\n if not centered:\n xc0, yc0, trans = cheby_center(C, D, b)\n if trans:\n b = b - np.dot(C, xc0).flatten() - np.dot(D, yc0).flatten()\n else:\n b = b\n else:\n trans = False\n d = C.shape[1]\n k = D.shape[1]\n if verbose > 0:\n print(\"Projecting from dim \" + str(d + k) + \" to \" + str(d))\n if k == 0:\n # Not projecting\n return C, bb, []\n if d == 1:\n # Projection to 1D\n c = np.zeros(d + k)\n c[0] = 1\n G = np.hstack([C, D])\n sol = solvers.lpsolve(c, G, b, solver='glpk')\n if sol['status'] != \"optimal\":\n raise Exception(\n \"esp: projection to 1D is not full-dimensional, \"\n \"LP returned status \" + str(sol['status']))\n min_sol = np.array(sol['x']).flatten()\n min_dual_sol = np.array(sol['z']).flatten()\n sol = solvers.lpsolve(-c, G, b, solver='glpk')\n if sol['status'] != \"optimal\":\n raise Exception(\n \"esp: projection to 1D is not full-dimensional, \" +\n \"LP returned status \" + str(sol['status']))\n max_sol = np.array(sol['x']).flatten()\n max_dual_sol = np.array(sol['z']).flatten()\n # min, max\n x_min = min_sol[0]\n x_max = max_sol[0]\n y_min = min_sol[range(1, k + 1)]\n y_max = max_sol[range(1, k + 1)]\n if is_dual_degenerate(c, G, b, None, None, min_sol, min_dual_sol):\n # Min case, relax constraint a little to avoid infeasibility\n E_min = unique_equalityset(\n C, D, b, np.array([1.]), x_min + abs_tol / 3, abs_tol=abs_tol)\n else:\n E_min = np.nonzero(np.abs(np.dot(G, min_sol) - b) < abs_tol)[0]\n if is_dual_degenerate(c, G, b, None, None, max_sol, max_dual_sol):\n # Max case, relax constraint a little to avoid infeasibility\n E_max = unique_equalityset(\n C, D, b, np.array([1.]), x_max - abs_tol / 3, abs_tol=abs_tol)\n else:\n E_max = np.nonzero(np.abs(np.dot(G, max_sol) - b) < abs_tol)[0]\n G = np.array([[1.], [-1.]])\n g = np.array([x_max, -x_min])\n # Relocate\n if trans:\n g = g + np.dot(G, xc0)\n # Return zero cols/rows\n E_max = nonzerorows[E_max]\n E_min = nonzerorows[E_min]\n if verbose > 0:\n print(\n \"Returning projection from dim \" +\n str(d + k) + \" to dim 1 \\n\")\n return G, g, [E_max, E_min]\n E = []\n L = []\n E_0, af, bf = shoot(C, D, b, abs_tol=abs_tol)\n ridge_list = ridge(C, D, b, E_0, af, bf, abs_tol=abs_tol, verbose=verbose)\n for i in range(len(ridge_list)):\n r = ridge_list[i]\n L.append(Ridge_Facet(r.E_r, r.ar, r.br, E_0, af, bf))\n G = af.T\n g = bf\n if verbose > 0:\n print(\"\\nStarting eq set \" + str(E_0) + \"\\nStarting ridges \")\n for rr in L:\n print(str(rr.E_r))\n E.append(E_0)\n while len(L) > 0:\n rid_fac1 = L[0]\n if verbose > 0:\n print(\"\\nLooking for neighbors to \" + str(rid_fac1.E_0) +\n \" and \" + str(rid_fac1.E_r) + \" ..\")\n E_adj, a_adj, b_adj = adjacent(C, D, b, rid_fac1, abs_tol=abs_tol)\n if verbose > 0:\n print(\"found neighbor \" + str(E_adj) +\n \". \\n\\nLooking for ridges of neighbor..\")\n ridge_list = ridge(\n C, D, b, E_adj, a_adj, b_adj,\n abs_tol=abs_tol, verbose=verbose)\n if verbose > 0:\n print(\"found \" + str(len(ridge_list)) + \" ridges\\n\")\n found_org = False\n for i in range(len(ridge_list)):\n r = ridge_list[i]\n E_r = r.E_r\n ar = r.ar\n br = r.br\n found = False\n for j in range(len(L)):\n rid_fac2 = L[j]\n A_r = rid_fac2.E_r\n if len(A_r) != len(E_r):\n continue\n t1 = np.sort(np.array(A_r))\n t2 = np.sort(np.array(E_r))\n if np.sum(np.abs(t1 - t2)) < abs_tol:\n found = True\n break\n if found:\n if verbose > 0:\n print(\"Ridge \" + str(E_r) +\n \" already visited, removing from L..\")\n if rid_fac2 == rid_fac1:\n found_org = True\n L.remove(rid_fac2)\n else:\n if verbose > 0:\n print(\"Adding ridge-facet \" + str(E_adj) +\n \" \" + str(E_r) + \"\")\n L.append(Ridge_Facet(E_r, ar, br, E_adj, a_adj, b_adj))\n if not found_org:\n print(\"Expected ridge \" + str(rid_fac1.E_r))\n print(\"but got ridges \")\n for rid in ridge_list:\n print(rid.E_r)\n raise Exception(\n \"esp: ridge did not return neighboring ridge as expected\")\n G = np.vstack([G, a_adj])\n g = np.hstack([g, b_adj])\n E.append(E_adj)\n # Restore center\n if trans:\n g = g + np.dot(G, xc0)\n # Return zero rows\n for Ef in E:\n Ef = nonzerorows[Ef]\n return G, g, E" ]
[ "0.61008054", "0.57861453", "0.541124", "0.5349921", "0.5343465", "0.5273637", "0.5211582", "0.51546365", "0.5075276", "0.504999", "0.49605206", "0.49293154", "0.48868015", "0.48221928", "0.48099476", "0.4738513", "0.47346017", "0.47128424", "0.47019666", "0.46675426", "0.4598494", "0.45937777", "0.45818222", "0.45808133", "0.45644972", "0.4564285", "0.45509356", "0.45503727", "0.4542558", "0.45421666", "0.45310926", "0.4505303", "0.44913134", "0.44620633", "0.44596225", "0.4456293", "0.43879464", "0.43850616", "0.43827447", "0.43818986", "0.43817094", "0.43732888", "0.43629655", "0.43629655", "0.43625396", "0.4348193", "0.43293825", "0.43270296", "0.43243787", "0.43103534", "0.43063012", "0.4303112", "0.42924842", "0.4280991", "0.42722106", "0.4270327", "0.42687118", "0.42492318", "0.4248313", "0.42375597", "0.42374825", "0.42369834", "0.42286628", "0.4223593", "0.42231405", "0.4211546", "0.41983095", "0.4196234", "0.4193385", "0.4193339", "0.4192947", "0.41819534", "0.4174804", "0.41746914", "0.41730595", "0.415323", "0.41526723", "0.41524225", "0.4152343", "0.41454482", "0.4142558", "0.41395408", "0.4137861", "0.4133", "0.4131064", "0.4130905", "0.4127538", "0.41247186", "0.41242564", "0.41237208", "0.41198096", "0.41114774", "0.4110582", "0.41087782", "0.41057548", "0.40964857", "0.40913925", "0.40724152", "0.40679273", "0.40620553" ]
0.6758612
0
Refreshes the plot by rerendering it and then pushing the updated data if the plot has an associated Comm.
def refresh(self, **kwargs): traverse_setter(self, '_force', True) key = self.current_key if self.current_key else self.keys[0] stream_params = stream_parameters(self.streams) key = tuple(None if d in stream_params else k for d, k in zip(self.dimensions, key)) stream_key = util.wrap_tuple_streams(key, self.dimensions, self.streams) self.update(stream_key) if self.comm is not None: self.push()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_plot():\n pass", "def plot_refresh():\n figure.canvas.draw()", "def _UpdatePlot( self ):\n self._BusyDoOp( self._UpdatePlotImpl )", "def update_plot(self,ax):\n self.replot(ax)", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='red')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='gray')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.canvas.draw()", "def refresh_plot(self):\n self.ax.relim() # recompute the data limits\n self.ax.autoscale_view() # automatic axis scaling\n self.fig.canvas.flush_events()", "def update_plot(self, msg):\n if not self.plots_created:\n self.create_plots(msg.keys())\n self.plots_created = True\n\n for k, v in msg.iteritems():\n current = self.plotdata.get_data(k)\n self.plotdata.set_data(k, np.r_[current, v])", "def update_visualization(self) -> None:\n pass", "def force_plot_update(settings_dict):\n settings_dict[\"new_data\"] = True # Initiates the update of the plots\n settings_dict[\"last_plot_update\"] = settings_dict[\"update_counter\"]", "def UpdatePlot(self):\n\n if self.first_time:\n for ID, plt in self.plotIDs.iteritems():\n if plt:\n tmp = FellesBaseClass.FindInstance(ID)\n self.plot_panel.oplot(\n np.array(tmp.data['time']),\n np.array(tmp.data['data']),\n draw = True,\n side ='left',\n label = tmp['label'],\n color = tmp['color'],\n xlabel = None, ylabel = None, y2label = None,\n title = None,\n dy = None,\n ylog_scale = False,\n xmin = None, xmax = None, ymin = None, ymax = None,\n refresh = True,\n show_legend= True, legend_loc='ur', legend_on= True,\n delay_draw = False,\n marker = 'None', markersize = None,\n autoscale=True,\n linewidth = 3, # default 2\n drawstyle = 'line', style = 'solid',\n grid = True,\n bgcolor= None, framecolor= None, gridcolor= None,\n labelfontsize= 10, # default 9\n legendfontsize= 12, # default 7\n fullbox=None, # 'box', 'open', 'bottom'\n axes_style=None,\n zorder=None,\n )\n self.first_time = False\n\n else:\n i = 0\n for ID,plt in self.plotIDs.iteritems():\n if plt:\n tmp = FellesBaseClass.FindInstance(ID)\n self.plot_panel.update_line(\n i,\n np.array(tmp.data['time']),\n np.array(tmp.data['data']),\n draw=True,\n )\n i += 1\n\n self.plot_panel.set_xylims(\\\n [\\\n floor( min( [ min( FellesBaseClass.FindInstance(ID).data['time'] )\\\n for ID,plt in self.plotIDs.iteritems() if plt ] ) ),\\\n ceil( max( [ max( FellesBaseClass.FindInstance(ID).data['time'] )\\\n for ID,plt in self.plotIDs.iteritems() if plt ] ) ),\\\n floor( min( [ min( FellesBaseClass.FindInstance(ID).data['data'] )\\\n for ID,plt in self.plotIDs.iteritems() if plt ] ) ),\\\n ceil( max( [ max( FellesBaseClass.FindInstance(ID).data['data'] )\\\n for ID,plt in self.plotIDs.iteritems() if plt ] ) )\\\n ]\\\n )\n\n self.panel_sizer.Fit(self)", "def on_new_data(self):\n\n if self.connected:\n tab_open = self.tab_open()\n\n # Update plot data\n for i, series in enumerate(self.measurements_list):\n if i == tab_open:\n self.plotted_data[i].setData(self.data_indices, self.measurements_list[i])", "def _update_plot(self, *args):\n # type: (dict, dict, dict, dict, dict) -> None\n if len(args) != 5 and not any([isinstance(arg, dict) for arg in args]):\n raise ValueError('Illegal arguments for _update_plot of %s' % self.__name__)\n desvars, responses, objectives, constraints, metadata = args\n\n data = self._compute_new_data(desvars, responses, objectives, constraints, metadata)\n self.cs[:, self.iter] = data[:]\n self.quad.set_array(self.cs.ravel())\n self.ax.set_xlim([-.5, self.iter+.5])\n self.iter += 1", "def updatePlot(self):\n self.axes.clear()\n nc = len(self.curvelist)\n xpos = self.curvelist[0].xvinfo.vidx\n for i in range(nc):\n ypos = self.curvelist[i].yvinfo.vidx\n self.axes.plot(self.data[xpos],\n self.data[ypos], self.col[i])\n if self.idata is not None:\n self.axes.plot(self.idata[xpos],\n self.idata[ypos], self.col[i]+'.')\n self.canvas.draw()", "def _update_plot(self):\n\n self.T_ex[:-1] = self.T_ex[1:]\n self.T_ex[-1] = self.ensemble.T_ex\n self.plot_T_ex[0].set_ydata(self.T_ex)\n self.T_kin[:-1] = self.T_kin[1:]\n self.T_kin[-1] = self.ensemble.T_kin\n self.plot_T_kin[0].set_ydata(self.T_kin)\n self.canvas.draw()\n\n renderer = self.canvas.get_renderer()\n raw_data = renderer.tostring_rgb()\n surf = pygame.image.fromstring(raw_data,\n (self.plot_width, self.disp_height),\n \"RGB\")\n self.game_display.blit(surf, (self.disp_width, 0))", "def refresh(self):\n\t\tif len(self.curves) == 0: return\n\t\t\n\t\tmax_x = max((c[1][-1] for c in self.curves.itervalues()))\n\t\n\t\tstart_x = max_x - self.x_visible\n\t\t\n\t\tfor c, x, y in self.curves.itervalues():\n\t\t\twhile len(x) > 0 and x[0] < start_x:\n\t\t\t\tdel x[0]\n\t\t\t\tdel y[0]\n\n\t\tfor curve, x, y in self.curves.itervalues():\n\t\t\t# I guess this probably copies stuff, more\n\t\t\t# efficient way would of course to use a\n\t\t\t# ringbuffer, but I couldn't find a ready made\n\t\t\t# implementation and am too busy for doing things\n\t\t\t# right.\n\t\t\tcurve.setData(x, y)\n\t\t\n\t\t# The autoscale likes to hang on to integers without\n\t\t# this\n\t\tself.setAxisScale(self.xBottom, start_x, max_x)\n\t\tself.replot()", "def update_data(self):\n\n # Update all plots in the figure\n self.data = self.model.measurements.get_bokeh_vis_data()\n self.source.stream(self.data, len(self.data))\n self.line_source.stream(self.data[self.data.agent_type == 'system'])\n self.school_dropdown_func()\n\n # Update the utility histograms\n self.update_histograms()\n\n # Update the composition histograms\n to_update = [self.neighbourhood_composition_quads, \n self.school_composition_quads, self.distance_quads]\n\n for quads in to_update:\n\n # Grab the new data\n if quads == self.neighbourhood_composition_quads:\n hist_data = self.composition_data(agent_type='neighbourhood')\n elif quads == self.school_composition_quads:\n hist_data = self.composition_data(agent_type='school')\n else:\n hist_data = self.composition_data(agent_type='household')\n\n # Update the bars and edges\n for group in hist_data.keys():\n\n hist, edges = np.histogram(hist_data[group],\n density=True,\n bins=20)\n\n # Update histogram\n quads[group].data_source.data['top'] = hist\n quads[group].data_source.data['left'] = edges[:-1]\n quads[group].data_source.data['right'] = edges[1:]", "def redraw(self, **kwargs):\n #src_dict = self.data_sources\n #self.remove_sources(src_dict.keys())\n self.renderers = {}\n #self.renderers = {}\n self.figure = self.draw_figure(**kwargs)\n #self.add_sources(src_dict)\n # todo does the old figure linger on?\n self.render_sources(self.data_sources)\n self.bk_pane.object = self.figure", "def update_graph(self, data_list):\n #log.debug(\"render graph\")\n x_axis = range(len(data_list))\n\n mcd = self.main_curve_dialog\n mcd.curve.set_data(x_axis, data_list)\n\n if self.auto_scale:\n mcd.get_plot().do_autoscale()\n else:\n mcd.get_plot().replot()", "def update_figure(self):\n\n self.draw()", "def update(self):\n\t\tprint(\"Plotting \" + str(str(self.values[\"Trial\"][1]) + \" at \" + str(self.values[\"Trial\"][0]) + \"\\n\"))\n\t\tif self.clear:\n\t\t\tself.stream.write(dict(x=[], y=[]))\n\t\telse:\n\t\t\tself.stream.write(dict(x=self.values[\"Trial\"][0], y=self.values[\"Trial\"][1]))", "def redraw(self):\r\n self.c.update()", "def force_update_graph(self):\n self.updated_data = 1\n self.update_graph()", "def _refresh(self):\n self._need_display_update = True\n self._update()", "def update(self):\n self.redraw()\n self._changed = False", "def update(self):\n self.redraw()\n self._changed = False", "def on_plot(self, event=None):\n data_id, theory_id, state_id = self.set_data_helper()\n self.parent.plot_data(data_id=data_id,\n state_id=state_id,\n theory_id=theory_id,\n append=False)\n self.enable_remove_plot()", "def updateArrayPlotData(self):\n self.arrayPlotData.set_data(\"channel0\",self.array0)\n self.arrayPlotData.set_data(\"channel1\",self.array1)\n self.arrayPlotData.set_data(\"channel2\",self.array2)\n self.arrayPlotData.set_data(\"channel3\",self.array3)\n self.arrayPlotData.set_data(\"channel4\",self.array4)\n self.arrayPlotData.set_data(\"channel5\",self.array5)\n self.arrayPlotData.set_data(\"channel6\",self.array6)\n self.arrayPlotData.set_data(\"channel7\",self.array7)\n self.arrayPlotData.set_data(\"cursorXS\",self.cursorXS)\n #self.arrayPlotData.set_data(\"cursorVertical\",self.cursorVertical)", "def update_figure(self):\n # if number of kinetics in model did not change\n # update just last lines\n if self.N_lines - 1 == len(self.model.spectra.keys()) * 2:\n self.dataplot.lines[-1].remove()\n self.dataplot.lines[-1].remove()\n self.draw_figure_first()\n # delete all and redraw\n else:\n n = int((self.N_lines - 1) / 2)\n for _ in range(n):\n self.dataplot.lines[-1].remove()\n self.dataplot.lines[-1].remove()\n self.draw_figure_total()\n\n self.dataplot.relim()\n\n self.dataplot.autoscale_view(True, True, True)\n\n self.draw()", "def update_plot(frame):\n global plotdata\n while True:\n try:\n data = q.get_nowait()\n except queue.Empty:\n break\n shift = len(data)\n plotdata = np.roll(plotdata, -shift, axis=0)\n plotdata[-shift:, :] = data\n for column, line in enumerate(lines):\n line.set_ydata(plotdata[:, column])\n return lines", "def _update_plots(self):\n for dock in self.plotDocks:\n for widget in dock.widgets:\n if not self.dataList.findItems(dock.name(), QtCore.Qt.MatchExactly):\n # no data for this plot -> reset it\n widget.getPlotItem().clear()\n # TODO remove tab from dock and del instance\n else:\n widget.getPlotItem().clear()\n x_data = self.currentDataset[\"results\"][\"time\"]\n y_data = self._get_data_by_name(dock.name())\n widget.getPlotItem().plot(x=x_data, y=y_data)", "def refresh(self):\n self._list_of_points = []\n self._add_points()", "def updatePlot(self):\n if len(self.baslin):\n X = list(t[0] for t in self.baslin)\n Y = list(t[1] for t in self.baslin)\n self.BLplt.set_xdata(X)\n self.BLplt.set_ydata(Y)\n if self.BLtyp == 'S':\n if self.BL is None:\n self.BL, = self.axes.plot(self.data[0], self.data[2], linestyle='-', color='green')\n else:\n self.BL.set_ydata(self.data[2])\n self.canvas.draw()", "def on_append_plot(self, event=None):\n self._on_plot_selection()\n data_id, theory_id, state_id = self.set_data_helper()\n self.parent.plot_data(data_id=data_id,\n state_id=state_id,\n theory_id=theory_id,\n append=True)", "def myUpdate(self, stateDict=None):\n\n # store stateDict so we can replot on changing dark theme\n if stateDict is None and self.stateDict is not None:\n # re-use our stateDict\n stateDict = self.stateDict\n else:\n if stateDict is None:\n return\n self.stateDict = stateDict.copy()\n\n if stateDict is None:\n return\n \n dataType = stateDict['dataType']\n hue = stateDict['hue']\n groupByColumnName = stateDict['groupByColumnName']\n\n plotType = stateDict['plotType']\n #self.plotType = plotType\n\n xStatHuman = stateDict['xStatHuman']\n yStatHuman = stateDict['yStatHuman']\n\n xStat = stateDict['xStat']\n yStat = stateDict['yStat']\n\n '''\n print('=== myMplCanvas.myUpdate()')\n print(' ', plotType)\n print(' ', 'xStatHuman:', xStatHuman, 'yStatHuman:', yStatHuman)\n print(' ', 'xStat:', xStat, 'yStat:', yStat)\n '''\n\n xIsCategorical = stateDict['xIsCategorical']\n yIsCategorical = stateDict['yIsCategorical']\n\n masterDf = stateDict['masterDf']\n meanDf = stateDict['meanDf']\n\n self.plotDf = meanDf\n\n self.canvas.axes.clear()\n\n picker = 5\n if plotType in ['Scatter Plot', 'Scatter + Raw + Mean']:\n # scatter plot user selection\n self.scatterPlotSelection, = self.canvas.axes.plot([], [], 'oy',\n markersize=12, fillstyle='none')\n\n # main scatter\n try:\n self.whatWeArePlotting = sns.scatterplot(x=xStat, y=yStat, hue=hue,\n data=meanDf, ax=self.canvas.axes, picker=picker,\n zorder=0)\n except (ValueError) as e:\n self.fig.canvas.draw()\n print(' EXCEPTION: in myUpdate() \"Scatter Plot\", exception is:')\n print(' ', e)\n print(' ', 'plotType:', plotType)\n print(' ', 'xStat:', xStat)\n print(' ', 'yStat:', yStat)\n print(' ', 'hue:', hue)\n\n # sem in both x and y, pulling from masterDf\n if dataType=='File Mean' or plotType=='Scatter + Raw + Mean':\n # we need to do this for each hue???\n # if x or y is in categorical (e.g. a string) then do not do this ...\n if xIsCategorical or yIsCategorical:\n pass\n else:\n print(' grabbing mean +- sem for self.groupByColumnName:', groupByColumnName)\n color = 'k'\n xd = masterDf.groupby(groupByColumnName).mean()[xStat]\n xerrd = masterDf.groupby(groupByColumnName).sem()[xStat]\n yd = masterDf.groupby(groupByColumnName).mean()[yStat]\n yerrd = masterDf.groupby(groupByColumnName).sem()[yStat]\n \n # logger.info('2023 declan')\n # print(' groupByColumnName:', groupByColumnName)\n # print(' xd:', xd)\n # print(' yd:', yd)\n # print(' xerrd:', xerrd)\n # print(' yerrd:', yerrd)\n \n self.canvas.axes.errorbar(xd, yd, xerr=xerrd, yerr=yerrd,\n fmt='none', capsize=0, zorder=10, color=color, alpha=0.5);\n\n elif plotType == 'Histogram':\n yStatHuman = 'Count'\n doKde = False #stateDict['doKDE']\n try:\n g = sns.histplot(x=xStat, hue=hue, kde=doKde,\n data=meanDf, ax=self.canvas.axes, picker=picker)\n except (ValueError) as e:\n self.fig.canvas.draw()\n print('EXCEPTIONin Histogram:', e)\n\n elif plotType == 'Cumulative Histogram':\n yStatHuman = 'Probability'\n try:\n g = sns.histplot(x=xStat, hue=hue, cumulative=True, stat='density',\n element=\"step\", fill=False, common_norm=False,\n data=meanDf, ax=self.canvas.axes, picker=picker)\n except (ValueError) as e:\n self.fig.canvas.draw()\n print('EXCEPTION in Cumulative Histogram:', e)\n\n elif plotType == 'Cumulative Histogram':\n yStatHuman = 'Probability'\n try:\n g = sns.histplot(x=xStat, hue=hue, cumulative=True, stat='density',\n element=\"step\", fill=False, common_norm=False,\n data=meanDf, ax=self.canvas.axes, picker=picker)\n except (ValueError) as e:\n self.fig.canvas.draw()\n print('EXCEPTION in Cumulative Histogram:', e)\n\n elif plotType == 'Violin Plot':\n if not xIsCategorical:\n warningStr = 'Violin plot requires a categorical x statistic'\n else:\n g = sns.violinplot(x=xStat, y=yStat, hue=hue,\n data=meanDf, ax=self.canvas.axes)\n\n elif plotType == 'Box Plot':\n if not xIsCategorical:\n warningStr = 'Box plot requires a categorical x statistic'\n else:\n g = sns.boxplot(x=xStat, y=yStat, hue=hue,\n data=meanDf, ax=self.canvas.axes)\n\n elif plotType == 'Raw + Mean Plot':\n if not xIsCategorical:\n warningStr = 'Raw + Mean plot requires a categorical x statistic'\n else:\n try:\n # does not work here for categorical x\n #self.scatterPlotSelection, = self.canvas.axes[0].plot([], [], 'oy',\n # markersize=12, fillstyle='none')\n\n '''\n colorList = [('red'), ('green'), 'b', 'c', 'm', 'y']\n hueList = meanDf[hue].unique()\n palette = {}\n for idx, hue in enumerate(hueList):\n palette[hue] = colorList[idx]\n print(palette)\n '''\n\n palette = sns.color_palette(\"Paired\")\n #palette = ['r', 'g', 'b']\n\n # stripplot\n #g = sns.swarmplot(x=xStat, y=yStat,\n g = sns.stripplot(x=xStat, y=yStat,\n hue=hue,\n palette=palette,\n data=meanDf,\n ax=self.canvas.axes,\n #color = color,\n dodge=True,\n alpha=0.6,\n picker=picker,\n zorder=1)\n\n\n #logger.error('!!!!!!!!!!!! grabbing get_legend_handles_labels()')\n self.canvas.axes.legend().remove()\n\n #logger.error('!!!!!!!!!!!! grabbing get_legend_handles_labels()')\n logger.info(f' REMAKING LEGEND sns.pointplot() plotNumber:{self.plotNumber}')\n handles, labels = self.canvas.axes.get_legend_handles_labels()\n l = self.canvas.axes.legend(handles[0:2], labels[0:2], bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n #self.myLegend = self.canvas.axes.Legend(handles[0:2], labels[0:2], bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n\n '''\n if self.darkTheme:\n color = 'w'\n else:\n color = 'k'\n color = [color] * len(hueList)\n print('color:', color)\n '''\n\n self.whatWeArePlotting = sns.pointplot(x=xStat, y=yStat,\n hue=hue,\n #palette=palette,\n data=meanDf,\n estimator=np.nanmean,\n errorbar=('ci', 68),\n capsize=0.1,\n ax=self.canvas.axes,\n color='r',\n #legend='full',\n #zorder=10)\n )\n except (ValueError) as e:\n print('EXCEPTION in \"Raw + Mean Plot\":', e)\n traceback.print_exc()\n\n elif plotType == 'Regression Plot':\n # regplot does not have hue\n if xIsCategorical or yIsCategorical:\n warningStr = 'Regression plot requires continuous x and y statistics'\n else:\n # todo: loop and make a regplot\n # for each unique() name in\n # hue (like Region, Sex, Condition)\n hueList = masterDf[hue].unique()\n for oneHue in hueList:\n if oneHue == 'None':\n continue\n tmpDf = meanDf [ meanDf[hue]==oneHue ]\n #print('regplot oneHue:', oneHue, 'len(tmpDf)', len(tmpDf))\n sns.regplot(x=xStat, y=yStat, data=tmpDf,\n ax=self.canvas.axes);\n else:\n print(' did not understand plot type:', plotType)\n\n\n #\n # update\n self.canvas.axes.figure.canvas.mpl_connect(\"pick_event\", self.onPick)\n\n self.mplCursorHover = None\n if stateDict['doHover'] and self.whatWeArePlotting is not None:\n self.mplCursorHover = mplcursors.cursor(self.whatWeArePlotting, hover=True)\n @self.mplCursorHover.connect(\"add\")\n def _(sel):\n #sel.annotation.get_bbox_patch().set(fc=\"white\")\n sel.annotation.arrow_patch.set(arrowstyle=\"simple\", fc=\"white\", alpha=.5)\n # row in df is from sel.target.index\n #print('sel.target.index:', sel.target.index)\n ind = sel.target.index\n annotationDict = self.getAnnotation(ind)\n myText = ''\n for k,v in annotationDict.items():\n myText += f'{k}: {v}\\n'\n sel.annotation.set_text(myText)\n\n #\n #self.mySetStatusBar(warningStr)\n\n self.canvas.axes.spines['right'].set_visible(False)\n self.canvas.axes.spines['top'].set_visible(False)\n\n if not stateDict['showLegend']:\n #print('self.canvas.axes.legend():', self.canvas.axes.legend())\n #print('self.canvas.axes.legend:', self.canvas.axes.legend)\n #if self.canvas.axes.legend() is not None:\n if 1:\n #logger.error('!!!!!!!!!!!! grabbing get_legend_handles_labels()')\n self.canvas.axes.legend().remove()\n\n #print('myUpdate() self.plotSize:', self.plotSize)\n self.canvas.axes.set_xlabel(xStatHuman)\n self.canvas.axes.set_ylabel(yStatHuman)\n '''\n if self.plotSize == 'paper':\n fontsize = 10\n self.canvas.axes[0].set_xlabel(xStatHuman, fontsize=fontsize)\n self.canvas.axes[0].set_ylabel(yStatHuman, fontsize=fontsize)\n else:\n self.canvas.axes[0].set_xlabel(xStatHuman)\n self.canvas.axes[0].set_ylabel(yStatHuman)\n '''\n\n # subplots_adjust\n #self.fig.canvas.draw_idle()\n self.fig.canvas.draw()", "def _DoUpdatePlot( self, wd, ht ):\n self.ax.grid(\n True, 'both', 'both',\n\tcolor = '#c8c8c8', linestyle = ':', linewidth = 1\n\t)", "def update_data(self):\n # Just set data_changed, the component should do the rest.\n self.data_changed = True", "def update_graph(self, data):\n if (self.type == 'matplotlib'):\n pass\n else:\n pass", "def update(self):\n\t\tprint(\"Plotting \" + str(str(self.values[\"Trial1\"][1]) + \" at \" + str(self.values[\"Trial1\"][0]) + \"\\n\"))\n\t\tif self.clear:\n\t\t\tself.stream1.write(dict(x=[], y=[]))\n\t\t\tself.stream2.write(dict(x=[], y=[]))\n\t\t\tself.stream3.write(dict(x=[], y=[]))\n\t\telse:\n\t\t\tself.stream1.write(dict(x=self.values[\"Trial1\"][0], y=self.values[\"Trial1\"][1]))#, trace=Bar)\n\t\t\tself.stream2.write(dict(x=self.values[\"Trial2\"][0], y=self.values[\"Trial2\"][1]))\n\t\t\tself.stream3.write(dict(x=self.values[\"Trial3\"][0], y=self.values[\"Trial3\"][1]))", "def reload_processgraph_view(self):\n #widget = self.processgraphWidget\n #self.load_dict_into_widget(widget, self.processgraph.graph)\n self.processgraphEdit.setText(json.dumps(self.processgraph.graph, indent=2, sort_keys=True))\n #widget.show()", "def on_axes_update(self):\n\n if self.connected:\n tab_open = self.tab_open()\n\n # Update axes\n for i, series in enumerate(self.measurements_list):\n if i == tab_open:\n self.chart_list[i].setXRange(self.worker.start_range,\n self.worker.samples_count + NUM_GUI_SAMPLES, padding=0.075)\n\n # for i, series in enumerate(self.measurements_list):\n #\n # # An optimization to prevent unnecessary rendering\n # if i == tab_open:\n #\n # # Remove old x-axis\n # series.detachAxis(self.xaxis_list[i])\n # self.chart_list[i].chart().removeAxis(self.xaxis_list[i])\n # self.xaxis_list[i] = QValueAxis()\n #\n # # Add new x-axis\n # self.chart_list[i].chart().addAxis(self.xaxis_list[i], Qt.AlignBottom)\n # self.xaxis_list[i].setRange(self.worker.samples_count, self.worker.samples_count +\n # NUM_GUI_SAMPLES)\n # series.attachAxis(self.xaxis_list[i])", "def updateGraph(self, symbol=None):\n if symbol is None:\n return\n\n # Get all stock data back for the given symbol\n self.stock_data = self.db.queryAllData(table_name=symbol)\n\n # Create a list of prices and a list of dates\n self.prices = [x[1].strip('$') for x in self.stock_data]\n self.dates = [x[0] for x in self.stock_data]\n date_string = [x.strftime(\"%m/%d/%Y\") for x in self.dates]\n self.x = [datetime.datetime.strptime(d, '%m/%d/%Y').date()\n for d in date_string]\n\n # Create an instance of QtMpl\n self.mpl = self.central.mpl\n self.mpl.addLine(x=self.x, y=self.prices, title=symbol)", "def rDataChanged(self):\n\n self._queues.uResolutionTab.refreshData()\n self._layerManager.updateReviewLayer()", "def refresh(self):\n\n self.ax.relim()\n self.ax.autoscale_view()\n self.canvas.draw()", "def reinitialiseData(self):\n if self.arrayPlotData is not None:\n self.currentPosition = 0\n self.xs = scipy.linspace(0.0, self.numberOfPoints*self.resolution, self.numberOfPoints)\n self.cursorXS = self.getCurrentPositionArray()\n self.cursorVertical = scipy.array([self.verticalLimit,0.0])\n self.arrayPlotData.set_data(\"xs\",self.xs)\n self.array0 = scipy.zeros(self.numberOfPoints)\n self.array1 = scipy.zeros(self.numberOfPoints)\n self.array2 = scipy.zeros(self.numberOfPoints)\n self.array3 = scipy.zeros(self.numberOfPoints)\n self.array4 = scipy.zeros(self.numberOfPoints)\n self.array5 = scipy.zeros(self.numberOfPoints)\n self.array6 = scipy.zeros(self.numberOfPoints)\n self.array7 = scipy.zeros(self.numberOfPoints)\n self.channels = [self.array0,self.array1,self.array2,self.array3,\n self.array4,self.array5,self.array6,self.array7]\n self.updateArrayPlotData()", "def _redraw_graph(self) -> None:\n self._clear_drawing()\n self.draw_graph()", "def _plot_update(self):\n omit_log = ['sens_log']\n for log_group, log_arrays in self.qbpm.log_names.items():\n for log_array in log_arrays:\n if log_array not in omit_log:\n self.curves[log_array].setData(self.qbpm.log_time, self.qbpm.log_arrays[log_array],clear=True)\n # self.fill.setCurves(self.curves['posz_sens_low_log'], self.curves['posz_sens_high_log'])", "def redraw(self):\n raise NotImplementedError()", "def _update_plot(self) -> None:\n\n # Check if plotting is active\n if self._fig is None:\n return None\n LOG.debug(\"Updating plot.\")\n\n # Extract glaciated area\n hs_back = np.ma.masked_where(\n self.h <= 1,\n hillshade(\n self.ele, self.PLOT_HILLSHADE_AZIMUTH, self.PLOT_HILLSHADE_ALTITUDE\n ),\n )\n\n # Clear plot and draw axes\n self._fig.clear()\n ax = plt.subplot(121, facecolor=\"black\")\n ax.tick_params(axis=\"x\", colors=\"w\")\n ax.tick_params(axis=\"y\", colors=\"w\")\n ax.set(xlabel=\"X-coordinate [m]\", ylabel=\"Y-coordinate [m]\")\n ax.xaxis.label.set_color(\"w\")\n ax.yaxis.label.set_color(\"w\")\n title_text = f\"Year: {str(self.i)} ELA: {str(int(self.ela))} m.a.s.l.\"\n ax.set_title(title_text, color=\"white\", size=18)\n\n # Draw new image layers\n plt.imshow(self.hs, vmin=90, vmax=345, cmap=\"copper\", extent=self.extent)\n plt.imshow(255 - hs_back, vmin=1, vmax=150, cmap=\"Greys\", extent=self.extent)\n\n # Mass balance\n ax1 = plt.subplot(222, facecolor=\"black\")\n ax1.plot(self.mass_balance, color=\"w\")\n ax1.plot(self.mass_balance_trend, color=\"r\")\n ax1.set(ylabel=\"Mass balance [m]\")\n ax1.yaxis.label.set_color(\"w\")\n plt.setp(ax1.get_xticklabels(), visible=False)\n ax1.tick_params(axis=\"y\", colors=\"w\")\n ax1.set_title(f\"Gradient: {str(self.m)} m/m\", color=\"white\", size=18)\n\n # Plot mean thickness\n ax2 = plt.subplot(224, sharex=ax1, facecolor=\"black\")\n ax2.plot(self.mass, color=\"w\")\n ax2.set(xlabel=\"Year [a]\", ylabel=\"Mean thickness [m]\")\n ax2.xaxis.label.set_color(\"w\")\n ax2.yaxis.label.set_color(\"w\")\n ax2.tick_params(axis=\"x\", colors=\"w\")\n ax2.tick_params(axis=\"y\", colors=\"w\")\n\n # Draw new plot\n self._fig.canvas.draw()\n plt.pause(0.05)", "def update_graph(self):\n parameters = []\n dtype = {'Timestamp': 'str'}\n for header in self.headers:\n if self.top_plot.current_param in header or self.bottom_plot.current_param in header:\n parameters.append(header)\n dtype[header] = 'float'\n data = pd.read_csv(self.reactor.file,\n dtype=dtype,\n parse_dates=['Timestamp'], usecols=['Timestamp'] + parameters, low_memory=False,\n na_filter=False)\n start_time = data['Timestamp'][0]\n data.insert(loc=2, column='EFT', value=(data['Timestamp'] - start_time) / np.timedelta64(1, 'h'))\n\n for label, content in data.iteritems():\n if label == 'Timestamp' or label == 'EFT':\n continue\n elif self.top_plot.current_param in label:\n self.top_plot.clear()\n self.top_plot.plot(data['EFT'], content)\n else:\n self.bottom_plot.clear()\n self.bottom_plot.plot(data['EFT'], content)", "def redraw_figures(self):\n pass", "def redraw_figures(self):\n pass", "def _update_current_graph(self, **kwargs):\n\n self.current_graph.redraw()", "def refresh_all(self):\n\t\t\n\t\tself.symbolsList.set_datasource(self.source)\n\t\tself.symbolsList.refresh()\n\t\t\n\t\tself.plotFrame.set_datasource(self.source)\n\t\tself.plotFrame.refresh()", "def _update_plots(self):\n #Adding in new data to plots\n currSignal = self._ai_client.get_ai_voltage(self._ai_channel, max_range=self.max_input_voltage)\n self.measured_powers = np.append(self.measured_powers[1:], np.mean(currSignal))\n self.out_voltages = np.append(self.out_voltages[1:], self._curr_output_voltage)\n self.errors = np.append(self.errors[1:], (currSignal[-1] - self.voltageSetpoint))\n self.sp_data = np.append(self.sp_data[1:], self.voltageSetpoint)\n #Update power plots\n self.widgets['curve'][0].setData(self.measured_powers*self.gain)\n #Update setpoint plots\n self.widgets['curve'][1].setData(self.sp_data*self.gain)\n\n # Now update voltage polots\n self.widgets['curve'][2].setData(self.out_voltages)\n self.widgets['curve'][3].setData(self.errors*self.gain)", "def _UpdatePlotImpl( self ):\n if self.ax is not None:\n self.axline = None\n self.cursorLine = \\\n self.cursorLine2 = None\n\n# self.ax.clear()\n# if hasattr( self, 'ax2' ) and self.ax2 is not None:\n# self.ax2.clear()\n self.fig.clear()\n self._InitAxes()\n\n#\t\t-- Scale fonts\n#\t\t--\n wd, ht = self.GetClientSize()\n label_font_size = 14\n tick_font_size = 12\n self.titleFontSize = 16\n if 'wxMac' not in wx.PlatformInfo and wd < 800:\n\tdecr = (800 - wd) / 50.0\n\tlabel_font_size -= decr\n\ttick_font_size -= decr\n\tself.titleFontSize -= decr\n\n# self.ax.grid(\n# True, 'both', 'both',\n#\t color = '#c8c8c8', linestyle = ':', linewidth = 1\n#\t )\n self._DoUpdatePlot( wd, ht )\n self._DoUpdateRedraw()\n self.canvas.draw()\n #end if", "def _redraw_graph(self) -> None:\n self._clear_drawing()\n self.draw_graph(graph=self.graph, axes=self.subplot)\n self.draw_graph(graph=self.graph2, axes=self.subplot2)\n self.draw_mappings(self.mapping)", "def redraw(self):\n dummy_figure = plt.figure()\n new_manager = dummy_figure.canvas.manager\n new_manager.canvas.figure = self.figure\n self.figure.set_canvas(new_manager.canvas)\n plt.show(block=False)", "def refresh():\n curve_editor.refresh()", "def updatePlot(self,cost):\r\n timer = time.clock() - self.start_timer\r\n # Add new values to plot data set \r\n self.lines.set_xdata(np.append(self.lines.get_xdata(), timer))\r\n self.lines.set_ydata(np.append(self.lines.get_ydata(), cost))\r\n # Re-scale\r\n self.axCost.relim()\r\n self.axCost.autoscale_view()\r\n # Update plot\r\n self.canvasPlot.draw()\r\n self.canvasPlot.flush_events()", "def redraw(self):\n self._create()", "def updateGlobal(self):\n state = self.getState()\n n = len(self.myPlotCanvasList)\n for i in range(n):\n if self.myPlotCanvasList[i] is not None:\n self.myPlotCanvasList[i].myUpdateGlobal(state)", "def __refresh_grid(self):\n self.__log.debug(f\"Refreshing grid. Timer running: {self.timer.IsRunning()}\")\n\n # Update data\n self.table.data = self.__cor.filtered_coefficient_data.copy()\n\n # Format\n self.table.data.loc[:, 'Base Coefficient'] = self.table.data['Base Coefficient'].map('{:.5f}'.format)\n self.table.data.loc[:, 'Last Calculation'] = pd.to_datetime(self.table.data['Last Calculation'], utc=True)\n self.table.data.loc[:, 'Last Calculation'] = \\\n self.table.data['Last Calculation'].dt.strftime('%d-%m-%y %H:%M:%S')\n\n # Start refresh\n self.grid_correlations.BeginBatch()\n\n # Check if num rows in dataframe has changed, and send appropriate APPEND or DELETE messages\n cur_rows = len(self.__cor.filtered_coefficient_data.index)\n if cur_rows < self.__rows:\n # Data has been deleted. Send message\n msg = wx.grid.GridTableMessage(self.table, wx.grid.GRIDTABLE_NOTIFY_ROWS_DELETED,\n self.__rows - cur_rows, self.__rows - cur_rows)\n self.grid_correlations.ProcessTableMessage(msg)\n elif cur_rows > self.__rows:\n # Data has been added. Send message\n msg = wx.grid.GridTableMessage(self.table, wx.grid.GRIDTABLE_NOTIFY_ROWS_APPENDED,\n cur_rows - self.__rows) # how many\n self.grid_correlations.ProcessTableMessage(msg)\n\n self.grid_correlations.EndBatch()\n\n # Send updated message\n msg = wx.grid.GridTableMessage(self.table, wx.grid.GRIDTABLE_REQUEST_VIEW_GET_VALUES)\n self.grid_correlations.ProcessTableMessage(msg)\n\n # Update row count\n self.__rows = cur_rows", "def UpdateFrame(self, sender=None, args=None):\n # Update label for sensor: s['label']\n # with the most recent measurement: s().data['data'][-1]\n for s in self.sensors:\n self.gValue[s.GetID()].SetLabel( '{num} {unit}'.format(\n num = s().data['data'][-1],\n unit = str(s['unit'])) )\n try:\n pub.sendMessage( 'Plot.%s' %self.GetLabel() )\n except:\n self.plot_deleted = True\n\n \n self.top_sizer.Layout()", "def redraw(self):\n self.vispy_widget.canvas.update()", "def redraw(self):\n self.vispy_viewer.canvas.update()", "def on_update(self):\n if self.main.data is not None:\n font = {\n 'family': str(self.le_font.text()),\n 'size': int(str(self.le_font_size.text()))\n }\n\n mpl.rc('font', **font)\n\n # Clear the plot\n self.ax.clear()\n\n # Get the data and colormap\n x, y, z = self.main.data.get_pcolor()\n cmap = self.main.canvas.colormap.get_mpl_colormap()\n\n tri_checkboxes = [self.cb_tripcolor.checkState(),\n self.cb_triangulation.checkState()]\n\n # If we are going to need to plot triangulation data, prepare\n # the data so it can be plotted\n if QtCore.Qt.Checked in tri_checkboxes:\n if self.main.data.tri is None:\n self.main.data.generate_triangulation()\n\n xc, yc = self.main.data.get_triangulation_coordinates()\n\n tri = mpl.tri.Triangulation(xc, yc,\n self.main.data.tri.simplices)\n\n # Plot the data using either pcolormesh or tripcolor\n if self.cb_tripcolor.checkState() != QtCore.Qt.Checked:\n quadmesh = self.ax.pcolormesh(x, y, z,\n cmap=cmap,\n rasterized=True)\n\n quadmesh.set_clim(self.main.canvas.colormap.get_limits())\n else:\n quadmesh = self.ax.tripcolor(tri,\n self.main.data.z.ravel(),\n cmap=cmap, rasterized=True)\n\n quadmesh.set_clim(self.main.canvas.colormap.get_limits())\n\n # Plot the triangulation\n if self.cb_triangulation.checkState() == QtCore.Qt.Checked:\n self.ax.triplot(tri, 'o-', color='black',\n linewidth=0.5, markersize=3)\n\n self.ax.axis('tight')\n\n title = self.format_label(str(self.le_title.text()))\n title = '\\n'.join(textwrap.wrap(title, 40,\n replace_whitespace=False))\n\n # Set all the plot labels\n self.ax.set_title(title)\n self.ax.set_xlabel(self.format_label(self.le_x_label.text()))\n self.ax.set_ylabel(self.format_label(self.le_y_label.text()))\n\n # Set the axis tick formatters\n self.ax.xaxis.set_major_formatter(FixedOrderFormatter(\n str(self.le_x_format.text()), float(self.le_x_div.text())))\n self.ax.yaxis.set_major_formatter(FixedOrderFormatter(\n str(self.le_y_format.text()), float(self.le_y_div.text())))\n\n if self.cb is not None:\n self.cb.remove()\n\n # Colorbar layout\n orientation = str(self.cb_cb_orient.currentText())\n self.cb = self.fig.colorbar(quadmesh, orientation=orientation)\n\n self.cb.formatter = FixedOrderFormatter(\n str(self.le_z_format.text()), float(self.le_z_div.text()))\n\n self.cb.update_ticks()\n\n self.cb.set_label(self.format_label(self.le_z_label.text()))\n self.cb.draw_all()\n\n # Plot the current linecut if neccesary\n if self.cb_linecut.checkState() == QtCore.Qt.Checked:\n for linetrace in self.main.linecut.linetraces:\n if linetrace.type == 'horizontal':\n plt.axhline(linetrace.position, color='red')\n elif linetrace.type == 'vertical':\n plt.axvline(linetrace.position, color='red')\n\n self.fig.tight_layout()\n\n self.canvas.draw()", "def redraw(self):\n x2, y2 = [[] for i in range(len(self.x))], \\\n [[] for i in range(len(self.x))]\n game_point = [random.randint(1, 100),\n random.randint(1, 100)]\n for i in range(self.generations):\n x2, y2, game_point = self.move(x2, y2, game_point)\n for i in range(10): # Czyszczenie starych wykresow\n self.plots[i].set_xdata([])\n self.plots[i].set_ydata([])\n self.plots2[i].set_xdata([])\n self.plots2[i].set_ydata([])\n for i in range(len(self.x)): # Nowe dane wykresow\n self.plots[i].set_xdata(self.x[i])\n self.plots[i].set_ydata(self.y[i])\n self.plots2[i].set_xdata(x2[i])\n self.plots2[i].set_ydata(y2[i])\n self.fig.canvas.draw_idle()", "def plot_refresh_handler(args):\n stream_data, runlimits, runflags = args\n if runflags.exit:\n sys.exit(1)\n\n for line_name in stream_data:\n data = stream_data[line_name]\n curr_data_len = len(data['y'])\n if curr_data_len == 0:\n # no data yet\n continue\n\n if data['last_len'] >= curr_data_len:\n # no new data since last update\n continue\n\n # save length of last line draw\n data['last_len'] = curr_data_len\n\n if FLAGS.timestamp:\n x_data = numpy.array(data['x'])\n else:\n x_data = numpy.array(range(curr_data_len))\n y_data = numpy.array(data['y'])\n\n runlimits.x_max = max(max(x_data), runlimits.x_max)\n runlimits.x_min = runlimits.x_max-FLAGS.width\n\n if FLAGS.ymin is not None:\n runlimits.y_min = FLAGS.ymin\n else:\n runlimits.y_min = min(min(y_data), runlimits.y_min)\n\n if FLAGS.ymax is not None:\n runlimits.y_max = FLAGS.ymax\n else:\n runlimits.y_max = max(max(y_data), runlimits.y_max)\n\n data['line'].set_data(x_data, y_data)\n if runflags.update_axis:\n axes = data['line'].get_axes()\n axes.relim()\n axes.set_xlim(runlimits.x_min-1, runlimits.x_max+1)\n axes.autoscale_view(scaley=True, scalex=False)\n\n manager = pylab.get_current_fig_manager()\n manager.canvas.draw()", "def refresh_observation(self, measurement: EngineObsType) -> None:\n observation = self.observation\n observation[\"t\"][()] = measurement[\"t\"]\n _array_copyto(observation['states']['agent']['q'],\n measurement['states']['agent']['q'])\n _array_copyto(observation['states']['agent']['v'],\n measurement['states']['agent']['v'])\n sensors_data = observation['measurements']\n for key, value in dict.items(measurement['measurements']):\n _array_copyto(sensors_data[key], value)", "def refresh_chat(self):\n self.chat_container.noutrefresh()\n self.chat_win.noutrefresh()\n curses.doupdate()", "def redraw(self, state: EngineeringState) -> None:\n pass", "def refresh_self(self) -> None:\n self._logger.debug(\"running\")\n try:\n self.figure.canvas.draw()\n except Exception as e:\n self._logger.exception(\"issue with drawing canvas.\")\n self._logger.debug(\"done\")", "def updateChat(self, ):\n self.__redrawChat()", "def update_plot(self,ax):\n for i,line in enumerate(self.lines):\n line.set_ydata(self.data[i].f)\n for line in self.lines: \n ax.draw_artist(line)", "def plot_changed(self):\n self.plotType = self.ui.selectPlotType.currentText()\n self.value_changed()", "def refresh(self):\n self.__refresh()", "def refresh(self):\n pass", "def refresh(self):\n pass", "def drawChanges(self):\n self.draw(wait=False)\n draw(self.values,color='yellow',bbox=None,clear=False,shrink=self.shrink)", "def update_graph(self):\n if self.update_callback:\n self.update_callback()", "def push(self):\n if self.comm is None:\n raise Exception('Renderer does not have a comm.')\n diff = self.renderer.diff(self)\n self.comm.send(diff)", "def visual_attr_changed(self):\n if self.component:\n self.component.invalidate_draw()\n self.component.request_redraw()\n else:\n self.invalidate_draw()\n self.request_redraw()", "def update_plots(self, set_data, rep_stats):\n # Update plots\n self.t2.clear()\n y_pen = pg.mkPen(color='#FFC914', width=1.5)\n v_pen = pg.mkPen(color='#17BEEB', width=1.5)\n self.t1.plot(set_data['Time'].values, set_data['Y_m'].values, pen=y_pen, clear=True)\n self.t2.addItem(\n pg.PlotCurveItem(set_data['Time'].values, set_data['Velocity'].values, pen=v_pen, clear=True))\n\n m_pen = pg.mkPen(color='#FFFFFF', width=1)\n self.xy.plot(set_data['X_m'].values[20:], set_data['Y_m'].values[20:], pen=m_pen, clear=True)\n\n # Update rep highlighting in timeline and max velocity points in bar path\n reps_labeled, n_reps = label(set_data['Reps'].values)\n if n_reps != 0:\n for r in range(1, n_reps + 1):\n rep = f\"rep{r}\"\n if rep_stats[rep]['movement'] not in ['false', 'partial', 'fail']:\n idx = tuple([reps_labeled == r])\n t_l = set_data['Time'].values[idx][0]\n t_r = set_data['Time'].values[idx][-1]\n comparator = rep_stats[rep][self.lifts[rep_stats[rep]['movement']]['pf_metric']]\n condition = self.lifts[rep_stats[rep]['movement']]['pf_criterion']\n pass_rep = eval(f\"{comparator}{condition}\")\n if pass_rep is True:\n rep_color = COLOR_SCHEME['green']\n else:\n rep_color = COLOR_SCHEME['orange']\n lri_brush = pg.mkBrush(color=rep_color)\n lri_pen = pg.mkPen(color=rep_color)\n lri = pg.LinearRegionItem((t_l, t_r), brush=lri_brush, pen=lri_pen, movable=False)\n lri.setOpacity(0.3)\n rep_lri_label = self.lifts[rep_stats[rep]['movement']]['name']\n ti = pg.TextItem(text=rep_lri_label, color='#FFFFFF', anchor=(0.5, 0.5))\n rep_lri_pos = self.t1.getAxis('left').range[1] * 0.9\n ti.setPos((t_r + t_l) / 2, rep_lri_pos)\n self.t1.addItem(lri)\n self.t1.addItem(ti)\n max_y = rep_stats[rep]['peak_height']\n max_x = set_data['X_m'].values[idx][np.where(set_data['Y_m'].values[idx] == max_y)]\n self.xy.addItem(\n pg.ScatterPlotItem(x=[max_x], y=[max_y], symbol='o', pen=lri_pen, brush=lri_brush, size=12))", "def plot_finalize():\n global figure\n global axes\n\n plot_refresh()\n plt.ioff()\n plt.show()\n\n figure, axes = None, None", "def refresh(self):\n raise NotImplementedError", "def refresh(self):\n raise NotImplementedError", "def update_processgraph(self):\n graph = self.processgraphEdit.toPlainText()\n self.processgraph.graph = json.loads(graph)\n self.processgraph.builder.processes = json.loads(graph)\n #widget = self.processgraphWidget\n #self.load_dict_into_widget(widget, self.processgraph.graph)\n #widget.show()", "def _updated(self, event=None):\n if event == ItemChangedType.COMPLEX_MODE:\n self._syncDataWithParent()\n super(ComplexCutPlane, self)._updated(event)", "def refresh(self):\n self.Refresh()", "def __plot_rho__(self, refresh=False, *args):\n # If plot is not requested, return:\n if not self.plotRhoVar.get():\n return\n\n # Check for a closed window:\n if 'rho' in self.plots.keys() and not matplotlib.pyplot.fignum_exists(self.plots['rho'].number):\n del self.plots['rho']\n refresh = False\n # Update the existing plot, if it exists\n refresh = refresh or 'rho' in self.plots.keys()\n if refresh:\n if 'rho' in self.plots.keys():\n fig = self.plots['rho']\n fig = matplotlib.pyplot.figure(fig.number)\n fig.clear()\n else:\n return\n # Make a new window:\n else:\n fig = matplotlib.pyplot.figure(figsize=(4,3))\n fig.canvas.set_window_title('rho, time = ' + '{:.3f}'.format(1e9*self.imp.t(self.it)))\n ax = fig.add_subplot(111)\n\n # Plot:\n ax.plot(1e4*self.imp.r((self.it), self.ir)[0], self.imp.rho((self.it), self.ir)[0], 'k-')\n\n ax.set_xlabel('r (um)', fontsize=12)\n ax.set_ylabel('Rho (g/cc)', fontsize=12)\n\n if self.logxVar.get():\n ax.set_xscale('log')\n if self.logyVar.get():\n ax.set_yscale('log')\n\n matplotlib.pyplot.tight_layout()\n\n if not refresh:\n fig.show()\n fig.canvas.draw()\n if self.wm is not None:\n self.wm.addWindow(matplotlib.pyplot.get_current_fig_manager().window)\n self.plots['rho'] = fig", "def redraw(event):\n if np.size(plt.get_figlabels()):\n #Need to check if figure is closed or not and only then do the following\n #operations. Else, the following operations will create a new figure\n ax.clear()\n drawRectangle(ax)\n fig.canvas.draw()\n else:\n pass", "def refresh(self, _loop, data):\n try:\n if(self.model.mode == 'live'):\n self.updateGraphs()\n self.model.memory = int(self.dataClient.recv())\n self.model.cpu = float(self.dataClient.recv())\n except EOFError:\n pass\n except Exception as e:\n self.logger.error(e)\n\n self.view.refresh()\n _loop.set_alarm_in(guiRefreshTimer, self.refresh)", "def update(self):\n try:\n self.positions_list.set_odd_color(self._parent.settings.get_key('interface.odd_color'))\n self.positions_list.set_even_color(self._parent.settings.get_key('interface.even_color'))\n except od_exception_config_key_error:\n pass\n if not self._parent.connected():\n self.positions_list.update_rows([])\n return\n self._parent.positions_filter.update_filter()\n self.positions_list.update_rows(map(lambda a: (a['position_id'],\n a['open_date_formated'],\n a['open_time_formated'],\n a['close_date_formated'],\n a['close_time_formated'],\n a['duration_formated'],\n a['paper_name'],\n a['count'],\n a['direction_formated'],\n a['open_price_formated'],\n a['close_price_formated'],\n a['steps_range_abs_formated'],\n format_number(a['gross_before']),\n format_number(a['gross_after']),\n a['pl_gross_abs_formated'],\n format_number(a['net_before']),\n format_number(a['net_after']),\n a['pl_net_abs_formated'],\n a['percent_range_abs_formated']), self._parent.positions_filter.get_data(self.order_by)))", "def refresh(self):\n raise NotImplementedError(\"To be implemented\")", "def refresh(self) -> None:\n pass" ]
[ "0.7288777", "0.6962232", "0.6900487", "0.6737718", "0.6697221", "0.6682248", "0.6682248", "0.6682248", "0.6682248", "0.6682248", "0.66806316", "0.66681284", "0.6639615", "0.6597625", "0.6559798", "0.6359195", "0.63513744", "0.63109744", "0.6272057", "0.6253011", "0.62247986", "0.61837536", "0.61710083", "0.61612433", "0.61026186", "0.60806406", "0.60525817", "0.60322255", "0.6016168", "0.59593695", "0.59593695", "0.5941694", "0.5938772", "0.5929384", "0.5917051", "0.58962226", "0.587982", "0.5865281", "0.58619046", "0.58588755", "0.58531207", "0.5849845", "0.58465624", "0.58438915", "0.58258134", "0.5815153", "0.5809253", "0.580419", "0.5797645", "0.5794296", "0.5791501", "0.5768497", "0.57679445", "0.57519245", "0.57393485", "0.57268345", "0.57268345", "0.5721691", "0.57188314", "0.5702508", "0.5702273", "0.5700277", "0.56918573", "0.56809896", "0.56786084", "0.5664458", "0.5661812", "0.561909", "0.5613101", "0.560763", "0.5607315", "0.5595215", "0.5570656", "0.5556143", "0.5555316", "0.55539435", "0.55534726", "0.55484354", "0.5545512", "0.55440426", "0.55430734", "0.55412585", "0.55364794", "0.55364794", "0.5535755", "0.5523921", "0.55081654", "0.549152", "0.5490501", "0.5479815", "0.5466691", "0.5466691", "0.54620653", "0.54545677", "0.54444474", "0.5430413", "0.5427253", "0.5424772", "0.5418733", "0.541653", "0.541342" ]
0.0
-1
Pushes updated plot data via the Comm.
def push(self): if self.comm is None: raise Exception('Renderer does not have a comm.') diff = self.renderer.diff(self) self.comm.send(diff)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_plot():\n pass", "def on_new_data(self):\n\n if self.connected:\n tab_open = self.tab_open()\n\n # Update plot data\n for i, series in enumerate(self.measurements_list):\n if i == tab_open:\n self.plotted_data[i].setData(self.data_indices, self.measurements_list[i])", "def update_plot(self, msg):\n if not self.plots_created:\n self.create_plots(msg.keys())\n self.plots_created = True\n\n for k, v in msg.iteritems():\n current = self.plotdata.get_data(k)\n self.plotdata.set_data(k, np.r_[current, v])", "def _UpdatePlot( self ):\n self._BusyDoOp( self._UpdatePlotImpl )", "def updateArrayPlotData(self):\n self.arrayPlotData.set_data(\"channel0\",self.array0)\n self.arrayPlotData.set_data(\"channel1\",self.array1)\n self.arrayPlotData.set_data(\"channel2\",self.array2)\n self.arrayPlotData.set_data(\"channel3\",self.array3)\n self.arrayPlotData.set_data(\"channel4\",self.array4)\n self.arrayPlotData.set_data(\"channel5\",self.array5)\n self.arrayPlotData.set_data(\"channel6\",self.array6)\n self.arrayPlotData.set_data(\"channel7\",self.array7)\n self.arrayPlotData.set_data(\"cursorXS\",self.cursorXS)\n #self.arrayPlotData.set_data(\"cursorVertical\",self.cursorVertical)", "def update(self):\n\t\tprint(\"Plotting \" + str(str(self.values[\"Trial\"][1]) + \" at \" + str(self.values[\"Trial\"][0]) + \"\\n\"))\n\t\tif self.clear:\n\t\t\tself.stream.write(dict(x=[], y=[]))\n\t\telse:\n\t\t\tself.stream.write(dict(x=self.values[\"Trial\"][0], y=self.values[\"Trial\"][1]))", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='red')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='gray')\n self.canvas.draw()", "def update(self):\n\t\tprint(\"Plotting \" + str(str(self.values[\"Trial1\"][1]) + \" at \" + str(self.values[\"Trial1\"][0]) + \"\\n\"))\n\t\tif self.clear:\n\t\t\tself.stream1.write(dict(x=[], y=[]))\n\t\t\tself.stream2.write(dict(x=[], y=[]))\n\t\t\tself.stream3.write(dict(x=[], y=[]))\n\t\telse:\n\t\t\tself.stream1.write(dict(x=self.values[\"Trial1\"][0], y=self.values[\"Trial1\"][1]))#, trace=Bar)\n\t\t\tself.stream2.write(dict(x=self.values[\"Trial2\"][0], y=self.values[\"Trial2\"][1]))\n\t\t\tself.stream3.write(dict(x=self.values[\"Trial3\"][0], y=self.values[\"Trial3\"][1]))", "def update_plot(frame):\n global plotdata\n while True:\n try:\n data = q.get_nowait()\n except queue.Empty:\n break\n shift = len(data)\n plotdata = np.roll(plotdata, -shift, axis=0)\n plotdata[-shift:, :] = data\n for column, line in enumerate(lines):\n line.set_ydata(plotdata[:, column])\n return lines", "def on_append_plot(self, event=None):\n self._on_plot_selection()\n data_id, theory_id, state_id = self.set_data_helper()\n self.parent.plot_data(data_id=data_id,\n state_id=state_id,\n theory_id=theory_id,\n append=True)", "def updatePlot(self):\n self.axes.clear()\n nc = len(self.curvelist)\n xpos = self.curvelist[0].xvinfo.vidx\n for i in range(nc):\n ypos = self.curvelist[i].yvinfo.vidx\n self.axes.plot(self.data[xpos],\n self.data[ypos], self.col[i])\n if self.idata is not None:\n self.axes.plot(self.idata[xpos],\n self.idata[ypos], self.col[i]+'.')\n self.canvas.draw()", "def _update_plot(self, *args):\n # type: (dict, dict, dict, dict, dict) -> None\n if len(args) != 5 and not any([isinstance(arg, dict) for arg in args]):\n raise ValueError('Illegal arguments for _update_plot of %s' % self.__name__)\n desvars, responses, objectives, constraints, metadata = args\n\n data = self._compute_new_data(desvars, responses, objectives, constraints, metadata)\n self.cs[:, self.iter] = data[:]\n self.quad.set_array(self.cs.ravel())\n self.ax.set_xlim([-.5, self.iter+.5])\n self.iter += 1", "def update_plot(self,ax):\n self.replot(ax)", "def test_append():\n import matplotlib.pyplot as plt\n endpoints = [\"krstc_baseline\"]\n start = \"2019-09-30\"\n end = \"now\" \n end = '2019-10-03T16:01:56.699883'\n qmon = QueueMonitor(endpoints, start, end)\n \n # a record we would have gotten from 'qmon.listen'\n record = {\n 'msgtype': 4,\n 'payload': {\n 'value_cal': -5.32175, 'value_raw': 1.056375},\n 'sender_info': {\n 'commit': 'g7190b92',\n 'exe': '/home/pi/controls/latest/bin/dragonfly',\n 'hostname': 'scannerpi',\n 'package': 'dripline',\n 'service_name': 'scannerpi_service',\n 'username': 'pi',\n 'version': 'v3.7.3'},\n 'timestamp': '2019-10-03T16:02:20.543470Z'\n }\n\n # ts = datetime.fromisoformat(record[\"timestamp\"]) # can't handle the \"Z\"\n ts = parser.parse(record[\"timestamp\"]) # this works\n val = record[\"payload\"][\"value_cal\"]\n \n # append new values\n qmon.data_lists[\"krstc_baseline_ts\"].append(ts)\n qmon.data_lists[\"krstc_baseline\"].append(val)\n \n # make the plot\n xv = qmon.data_lists[\"krstc_baseline_ts\"]\n yv = qmon.data_lists[\"krstc_baseline\"]\n plt.plot(xv, yv, \"-r\")\n\n # superimpose the new point again\n plt.plot(ts, val, \".b\", ms='10')\n\n plt.gcf().autofmt_xdate() # rotates labels\n plt.show()", "def reinitialiseData(self):\n if self.arrayPlotData is not None:\n self.currentPosition = 0\n self.xs = scipy.linspace(0.0, self.numberOfPoints*self.resolution, self.numberOfPoints)\n self.cursorXS = self.getCurrentPositionArray()\n self.cursorVertical = scipy.array([self.verticalLimit,0.0])\n self.arrayPlotData.set_data(\"xs\",self.xs)\n self.array0 = scipy.zeros(self.numberOfPoints)\n self.array1 = scipy.zeros(self.numberOfPoints)\n self.array2 = scipy.zeros(self.numberOfPoints)\n self.array3 = scipy.zeros(self.numberOfPoints)\n self.array4 = scipy.zeros(self.numberOfPoints)\n self.array5 = scipy.zeros(self.numberOfPoints)\n self.array6 = scipy.zeros(self.numberOfPoints)\n self.array7 = scipy.zeros(self.numberOfPoints)\n self.channels = [self.array0,self.array1,self.array2,self.array3,\n self.array4,self.array5,self.array6,self.array7]\n self.updateArrayPlotData()", "def UpdateFrame(self, sender=None, args=None):\n # Update label for sensor: s['label']\n # with the most recent measurement: s().data['data'][-1]\n for s in self.sensors:\n self.gValue[s.GetID()].SetLabel( '{num} {unit}'.format(\n num = s().data['data'][-1],\n unit = str(s['unit'])) )\n try:\n pub.sendMessage( 'Plot.%s' %self.GetLabel() )\n except:\n self.plot_deleted = True\n\n \n self.top_sizer.Layout()", "def plot_refresh():\n figure.canvas.draw()", "def timer_plot_data_out(self, w):\n w.update_plot(self.getLaps())", "def on_plot(self, event=None):\n data_id, theory_id, state_id = self.set_data_helper()\n self.parent.plot_data(data_id=data_id,\n state_id=state_id,\n theory_id=theory_id,\n append=False)\n self.enable_remove_plot()", "def update_graph(self, data_list):\n #log.debug(\"render graph\")\n x_axis = range(len(data_list))\n\n mcd = self.main_curve_dialog\n mcd.curve.set_data(x_axis, data_list)\n\n if self.auto_scale:\n mcd.get_plot().do_autoscale()\n else:\n mcd.get_plot().replot()", "def _update_plot(self):\n\n self.T_ex[:-1] = self.T_ex[1:]\n self.T_ex[-1] = self.ensemble.T_ex\n self.plot_T_ex[0].set_ydata(self.T_ex)\n self.T_kin[:-1] = self.T_kin[1:]\n self.T_kin[-1] = self.ensemble.T_kin\n self.plot_T_kin[0].set_ydata(self.T_kin)\n self.canvas.draw()\n\n renderer = self.canvas.get_renderer()\n raw_data = renderer.tostring_rgb()\n surf = pygame.image.fromstring(raw_data,\n (self.plot_width, self.disp_height),\n \"RGB\")\n self.game_display.blit(surf, (self.disp_width, 0))", "def updatePlot(self):\n if len(self.baslin):\n X = list(t[0] for t in self.baslin)\n Y = list(t[1] for t in self.baslin)\n self.BLplt.set_xdata(X)\n self.BLplt.set_ydata(Y)\n if self.BLtyp == 'S':\n if self.BL is None:\n self.BL, = self.axes.plot(self.data[0], self.data[2], linestyle='-', color='green')\n else:\n self.BL.set_ydata(self.data[2])\n self.canvas.draw()", "def force_plot_update(settings_dict):\n settings_dict[\"new_data\"] = True # Initiates the update of the plots\n settings_dict[\"last_plot_update\"] = settings_dict[\"update_counter\"]", "def update_data():\n values = temp_serial_placeholder()\n time = current_time_milli() - __start\n points = [ [time, values[0]], [time, values[1]] ]\n __data.append(points)\n return points", "def UpdatePlot(self):\n\n if self.first_time:\n for ID, plt in self.plotIDs.iteritems():\n if plt:\n tmp = FellesBaseClass.FindInstance(ID)\n self.plot_panel.oplot(\n np.array(tmp.data['time']),\n np.array(tmp.data['data']),\n draw = True,\n side ='left',\n label = tmp['label'],\n color = tmp['color'],\n xlabel = None, ylabel = None, y2label = None,\n title = None,\n dy = None,\n ylog_scale = False,\n xmin = None, xmax = None, ymin = None, ymax = None,\n refresh = True,\n show_legend= True, legend_loc='ur', legend_on= True,\n delay_draw = False,\n marker = 'None', markersize = None,\n autoscale=True,\n linewidth = 3, # default 2\n drawstyle = 'line', style = 'solid',\n grid = True,\n bgcolor= None, framecolor= None, gridcolor= None,\n labelfontsize= 10, # default 9\n legendfontsize= 12, # default 7\n fullbox=None, # 'box', 'open', 'bottom'\n axes_style=None,\n zorder=None,\n )\n self.first_time = False\n\n else:\n i = 0\n for ID,plt in self.plotIDs.iteritems():\n if plt:\n tmp = FellesBaseClass.FindInstance(ID)\n self.plot_panel.update_line(\n i,\n np.array(tmp.data['time']),\n np.array(tmp.data['data']),\n draw=True,\n )\n i += 1\n\n self.plot_panel.set_xylims(\\\n [\\\n floor( min( [ min( FellesBaseClass.FindInstance(ID).data['time'] )\\\n for ID,plt in self.plotIDs.iteritems() if plt ] ) ),\\\n ceil( max( [ max( FellesBaseClass.FindInstance(ID).data['time'] )\\\n for ID,plt in self.plotIDs.iteritems() if plt ] ) ),\\\n floor( min( [ min( FellesBaseClass.FindInstance(ID).data['data'] )\\\n for ID,plt in self.plotIDs.iteritems() if plt ] ) ),\\\n ceil( max( [ max( FellesBaseClass.FindInstance(ID).data['data'] )\\\n for ID,plt in self.plotIDs.iteritems() if plt ] ) )\\\n ]\\\n )\n\n self.panel_sizer.Fit(self)", "def sendPreviousDataPoints(self):\n if self.ioLoopInst is not None:\n cmd = {'cmd': 'setDataPoints', 'value': self.dataPoints}\n self._sendMessageToWeb(cmd)\n else:\n print(\"sendPreviousDataPoints: \" + self.dataPoints)", "def update_display(self):\n self.lick_plot_0.setData(self.k+self.T,self.buffer[:,1]) \n self.lick_plot_1.setData(self.k+self.T,self.buffer[:,2]) \n self.breathing_plot.setData(self.k+self.T,self.buffer[:,0]) \n \n if self.settings.movie_on.value():\n self.camera_image.setImage(self.camera.read())\n if self.settings.save_movie.value():\n self.camera.write()\n \n #print(self.buffer_h5.size)", "def on_axes_update(self):\n\n if self.connected:\n tab_open = self.tab_open()\n\n # Update axes\n for i, series in enumerate(self.measurements_list):\n if i == tab_open:\n self.chart_list[i].setXRange(self.worker.start_range,\n self.worker.samples_count + NUM_GUI_SAMPLES, padding=0.075)\n\n # for i, series in enumerate(self.measurements_list):\n #\n # # An optimization to prevent unnecessary rendering\n # if i == tab_open:\n #\n # # Remove old x-axis\n # series.detachAxis(self.xaxis_list[i])\n # self.chart_list[i].chart().removeAxis(self.xaxis_list[i])\n # self.xaxis_list[i] = QValueAxis()\n #\n # # Add new x-axis\n # self.chart_list[i].chart().addAxis(self.xaxis_list[i], Qt.AlignBottom)\n # self.xaxis_list[i].setRange(self.worker.samples_count, self.worker.samples_count +\n # NUM_GUI_SAMPLES)\n # series.attachAxis(self.xaxis_list[i])", "def update(self):\n # pull all available chunks\n c, t = self.inlet.pull_chunk(timeout=0.0)\n new_c = []\n new_t = []\n while c:\n new_c += c\n new_t += t\n c, t = self.inlet.pull_chunk(timeout=0.0)\n\n # add samples to buffer\n if any(new_c):\n # add samples\n data_v = [item for sublist in new_c for item in sublist]\n self.gbuffer = np.roll(self.gbuffer, -len(data_v))\n self.gbuffer[-len(data_v):] = data_v\n # add timestamps\n if new_t:\n self.gtimes = np.roll(self.gtimes, -len(new_t))\n self.gtimes[-len(new_t):] = new_t\n\n # update graph handles\n if self.gbuffer.any():\n for k in range(0, self.channel_count):\n self.handles[k].setData(self.gtimes,\n self.gbuffer[k::self.channel_count])", "def update(self, datain):\r\n self.arraydata = datain\r\n self.layoutChanged.emit()", "def _plot_update(self):\n omit_log = ['sens_log']\n for log_group, log_arrays in self.qbpm.log_names.items():\n for log_array in log_arrays:\n if log_array not in omit_log:\n self.curves[log_array].setData(self.qbpm.log_time, self.qbpm.log_arrays[log_array],clear=True)\n # self.fill.setCurves(self.curves['posz_sens_low_log'], self.curves['posz_sens_high_log'])", "def _update_plots(self):\n #Adding in new data to plots\n currSignal = self._ai_client.get_ai_voltage(self._ai_channel, max_range=self.max_input_voltage)\n self.measured_powers = np.append(self.measured_powers[1:], np.mean(currSignal))\n self.out_voltages = np.append(self.out_voltages[1:], self._curr_output_voltage)\n self.errors = np.append(self.errors[1:], (currSignal[-1] - self.voltageSetpoint))\n self.sp_data = np.append(self.sp_data[1:], self.voltageSetpoint)\n #Update power plots\n self.widgets['curve'][0].setData(self.measured_powers*self.gain)\n #Update setpoint plots\n self.widgets['curve'][1].setData(self.sp_data*self.gain)\n\n # Now update voltage polots\n self.widgets['curve'][2].setData(self.out_voltages)\n self.widgets['curve'][3].setData(self.errors*self.gain)", "def _process_data(self, pattern, data, timestamps):\n self._plotbuffer[pattern] = (self._plotbuffer[pattern] + data)[-self._plotlength:]\n if timestamps:\n self._timestampbuffer[pattern] = (self._timestampbuffer[pattern] + timestamps)[-self._plotlength:]\n \n data = self._plotbuffer[pattern]\n# if timestamps:\n timestamps = self._timestampbuffer[pattern]\n _length = len( data ) \n\n self._plotbuffer[pattern] = data[-self._plotlength:] \n# if timestamps:\n self._timestampbuffer[pattern] = timestamps[-self._plotlength:]\n\n# if _length > self._plotlength:\n# # plot the last <plotlength> samples\n# self._plotbuffer[pattern] = data[-self._plotlength:] \n# if timestamps:\n# self._timestampbuffer[pattern] = timestamps[-self._plotlength:] \n# else:\n# # plot 0 line before signal starts\n# self._plotbuffer[pattern] = [ 0 for _ in range(self._plotlength - _length)] + data\n# if timestamps:\n# self._timestampbuffer[pattern] = [ 0 for _ in range(self._plotlength - _length)] + timestamps\n\n if not self.changed:\n self.changed = True\n\n self._process_plot_data( pattern, data, timestamps )", "def refresh_plot(self):\n self.ax.relim() # recompute the data limits\n self.ax.autoscale_view() # automatic axis scaling\n self.fig.canvas.flush_events()", "def updateGraph(self, symbol=None):\n if symbol is None:\n return\n\n # Get all stock data back for the given symbol\n self.stock_data = self.db.queryAllData(table_name=symbol)\n\n # Create a list of prices and a list of dates\n self.prices = [x[1].strip('$') for x in self.stock_data]\n self.dates = [x[0] for x in self.stock_data]\n date_string = [x.strftime(\"%m/%d/%Y\") for x in self.dates]\n self.x = [datetime.datetime.strptime(d, '%m/%d/%Y').date()\n for d in date_string]\n\n # Create an instance of QtMpl\n self.mpl = self.central.mpl\n self.mpl.addLine(x=self.x, y=self.prices, title=symbol)", "def plot_data(self):", "def update_visualization(self) -> None:\n pass", "def recive_data(self, data_waveformreceived):\r\n self.adcollector.save_as_binary(self.savedirectory)\r\n self.channel_number = len(data_waveformreceived)\r\n if self.channel_number == 1: \r\n if 'Vp' in self.readinchan:\r\n self.data_collected_0 = data_waveformreceived[0]\r\n \r\n self.PlotDataItem_patch_voltage = PlotDataItem(self.xlabelhere_all, self.data_collected_0)\r\n #use the same color as before, taking advantages of employing same keys in dictionary\r\n self.PlotDataItem_patch_voltage.setPen('w')\r\n self.pw_data.addItem(self.PlotDataItem_patch_voltage)\r\n \r\n self.textitem_patch_voltage = pg.TextItem(('Vp'), color=('w'), anchor=(1, 1))\r\n self.textitem_patch_voltage.setPos(0, 1)\r\n self.pw_data.addItem(self.textitem_patch_voltage)\r\n elif 'Ip' in self.readinchan:\r\n self.data_collected_0 = data_waveformreceived[0]\r\n \r\n self.PlotDataItem_patch_current = PlotDataItem(self.xlabelhere_all, self.data_collected_0)\r\n #use the same color as before, taking advantages of employing same keys in dictionary\r\n self.PlotDataItem_patch_current.setPen('c')\r\n self.pw_data.addItem(self.PlotDataItem_patch_current)\r\n \r\n self.textitem_patch_current = pg.TextItem(('Ip'), color=('w'), anchor=(1, 1))\r\n self.textitem_patch_current.setPos(0, 1)\r\n self.pw_data.addItem(self.textitem_patch_current) \r\n elif 'PMT' in self.readinchan: # repeatnum, PMT_data_index_array, averagenum, ScanArrayXnum\r\n self.data_collected_0 = data_waveformreceived[0]*-1\r\n self.data_collected_0 = self.data_collected_0[0:len(self.data_collected_0)-1]\r\n \r\n # pmt data could come from raster scanning mode or from contour scanning mode.\r\n try:\r\n for i in range(self.repeatnum):\r\n self.PMT_image_reconstructed_array = self.data_collected_0[np.where(self.PMT_data_index_array_repeated == i+1)]\r\n Dataholder_average = np.mean(self.PMT_image_reconstructed_array.reshape(self.averagenum, -1), axis=0)\r\n Value_yPixels = int(len(self.samples_1)/self.ScanArrayXnum)\r\n self.PMT_image_reconstructed = np.reshape(Dataholder_average, (Value_yPixels, self.ScanArrayXnum))\r\n \r\n # Stack the arrays into a 3d array\r\n if i == 0:\r\n self.PMT_image_reconstructed_stack = self.PMT_image_reconstructed\r\n else:\r\n self.PMT_image_reconstructed_stack = np.concatenate((self.PMT_image_reconstructed_stack, self.PMT_image_reconstructed), axis=0)\r\n \r\n Localimg = Image.fromarray(self.PMT_image_reconstructed) #generate an image object\r\n Localimg.save(os.path.join(self.savedirectory, datetime.now().strftime('%Y-%m-%d_%H-%M-%S')+'_PMT_'+self.saving_prefix+'_'+str(i)+'.tif')) #save as tif\r\n \r\n plt.figure()\r\n plt.imshow(self.PMT_image_reconstructed, cmap = plt.cm.gray)\r\n plt.show()\r\n except:\r\n np.save(os.path.join(self.savedirectory, datetime.now().strftime('%Y-%m-%d_%H-%M-%S')+'_PMT_'+self.saving_prefix+'_'+'flatten'), self.data_collected_0)\r\n \r\n elif self.channel_number == 2: \r\n if 'PMT' not in self.readinchan:\r\n self.data_collected_0 = data_waveformreceived[0]\r\n \r\n self.PlotDataItem_patch_voltage = PlotDataItem(self.xlabelhere_all, self.data_collected_0)\r\n #use the same color as before, taking advantages of employing same keys in dictionary\r\n self.PlotDataItem_patch_voltage.setPen('w')\r\n self.pw_data.addItem(self.PlotDataItem_patch_voltage)\r\n \r\n self.textitem_patch_voltage = pg.TextItem(('Vp'), color=('w'), anchor=(1, 1))\r\n self.textitem_patch_voltage.setPos(0, 1)\r\n self.pw_data.addItem(self.textitem_patch_voltage) \r\n \r\n self.data_collected_1 = data_waveformreceived[1]\r\n \r\n self.PlotDataItem_patch_current = PlotDataItem(self.xlabelhere_all, self.data_collected_1)\r\n #use the same color as before, taking advantages of employing same keys in dictionary\r\n self.PlotDataItem_patch_current.setPen('c')\r\n self.pw_data.addItem(self.PlotDataItem_patch_current)\r\n \r\n self.textitem_patch_current = pg.TextItem(('Ip'), color=('w'), anchor=(1, 1))\r\n self.textitem_patch_current.setPos(0, 1)\r\n self.pw_data.addItem(self.textitem_patch_current) \r\n elif 'PMT' in self.readinchan:\r\n self.data_collected_0 = data_waveformreceived[0]*-1\r\n self.data_collected_0 = self.data_collected_0[0:len(self.data_collected_0)-1]\r\n \r\n try:\r\n for i in range(self.repeatnum):\r\n self.PMT_image_reconstructed_array = self.data_collected_0[np.where(self.PMT_data_index_array_repeated == i+1)]\r\n Dataholder_average = np.mean(self.PMT_image_reconstructed_array.reshape(self.averagenum, -1), axis=0)\r\n Value_yPixels = int(len(self.samples_1)/self.ScanArrayXnum)\r\n self.PMT_image_reconstructed = np.reshape(Dataholder_average, (Value_yPixels, self.ScanArrayXnum))\r\n \r\n # Stack the arrays into a 3d array\r\n if i == 0:\r\n self.PMT_image_reconstructed_stack = self.PMT_image_reconstructed\r\n else:\r\n self.PMT_image_reconstructed_stack = np.concatenate((self.PMT_image_reconstructed_stack, self.PMT_image_reconstructed), axis=0)\r\n \r\n Localimg = Image.fromarray(self.PMT_image_reconstructed) #generate an image object\r\n Localimg.save(os.path.join(self.savedirectory, datetime.now().strftime('%Y-%m-%d_%H-%M-%S')+'_PMT_'+self.saving_prefix+'_'+str(i)+'.tif')) #save as tif\r\n \r\n plt.figure()\r\n plt.imshow(self.PMT_image_reconstructed, cmap = plt.cm.gray)\r\n plt.show()\r\n except:\r\n np.save(os.path.join(self.savedirectory, datetime.now().strftime('%Y-%m-%d_%H-%M-%S')+'_PMT_'+self.saving_prefix+'_'+'contourscanning'), self.data_collected_0)\r\n \r\n if 'Vp' in self.readinchan:\r\n self.data_collected_1 = data_waveformreceived[1]\r\n \r\n self.PlotDataItem_patch_voltage = PlotDataItem(self.xlabelhere_all, self.data_collected_1)\r\n #use the same color as before, taking advantages of employing same keys in dictionary\r\n self.PlotDataItem_patch_voltage.setPen('w')\r\n self.pw_data.addItem(self.PlotDataItem_patch_voltage)\r\n \r\n self.textitem_patch_voltage = pg.TextItem(('Vp'), color=('w'), anchor=(1, 1))\r\n self.textitem_patch_voltage.setPos(0, 1)\r\n self.pw_data.addItem(self.textitem_patch_voltage)\r\n elif 'Ip' in self.readinchan:\r\n self.data_collected_1 = data_waveformreceived[1]\r\n \r\n self.PlotDataItem_patch_current = PlotDataItem(self.xlabelhere_all, self.data_collected_1)\r\n #use the same color as before, taking advantages of employing same keys in dictionary\r\n self.PlotDataItem_patch_current.setPen('c')\r\n self.pw_data.addItem(self.PlotDataItem_patch_current)\r\n \r\n self.textitem_patch_current = pg.TextItem(('Ip'), color=('w'), anchor=(1, 1))\r\n self.textitem_patch_current.setPos(0, 1)\r\n self.pw_data.addItem(self.textitem_patch_current)", "def updatePlot(self,cost):\r\n timer = time.clock() - self.start_timer\r\n # Add new values to plot data set \r\n self.lines.set_xdata(np.append(self.lines.get_xdata(), timer))\r\n self.lines.set_ydata(np.append(self.lines.get_ydata(), cost))\r\n # Re-scale\r\n self.axCost.relim()\r\n self.axCost.autoscale_view()\r\n # Update plot\r\n self.canvasPlot.draw()\r\n self.canvasPlot.flush_events()", "def update_data(self):\n\n # Update all plots in the figure\n self.data = self.model.measurements.get_bokeh_vis_data()\n self.source.stream(self.data, len(self.data))\n self.line_source.stream(self.data[self.data.agent_type == 'system'])\n self.school_dropdown_func()\n\n # Update the utility histograms\n self.update_histograms()\n\n # Update the composition histograms\n to_update = [self.neighbourhood_composition_quads, \n self.school_composition_quads, self.distance_quads]\n\n for quads in to_update:\n\n # Grab the new data\n if quads == self.neighbourhood_composition_quads:\n hist_data = self.composition_data(agent_type='neighbourhood')\n elif quads == self.school_composition_quads:\n hist_data = self.composition_data(agent_type='school')\n else:\n hist_data = self.composition_data(agent_type='household')\n\n # Update the bars and edges\n for group in hist_data.keys():\n\n hist, edges = np.histogram(hist_data[group],\n density=True,\n bins=20)\n\n # Update histogram\n quads[group].data_source.data['top'] = hist\n quads[group].data_source.data['left'] = edges[:-1]\n quads[group].data_source.data['right'] = edges[1:]", "def do(self, market_data):\r\n self.data.history = self.data.history + market_data", "def update(self, pos = 0, msg = \"\"):\n if self.print_indicator and self.indicator and not self.video_model == None:\n C=pyqtgraph.hsvColor(1)\n pen=pyqtgraph.mkPen(color=C,width=1)\n data = np.zeros(10)\n\n pos = int(self.video_model.get_pos(datatype = self.model.get_datatype()))\n self.indicator.setData([pos,pos],[self.indicator_min,self.indicator_max]) #= self.plot_item.plot([pos,pos],[self.indicator_min,self.indicator_max],pen=pyqtgraph.mkPen(color=pyqtgraph.hsvColor(2),width=1))", "def start(self, data):\n # show the plotting and leave a handle\n handle = show(self.p, notebook_handle=True)\n \n # keep update the column data source with new data and push the \n # updating onto Jupyter notebook.\n while True:\n try: self.ds.stream(data, rollover=300)\n except ValueError: return\n push_notebook(handle=handle)\n time.sleep(self.itv)", "def update_data(self):\n # Just set data_changed, the component should do the rest.\n self.data_changed = True", "def update():\n global dragon, x, y, position, angle_left, angle_right, size, new\n x, y, position, angle_left, angle_right, new = modify_pos(x, y, position,\n angle_left,\n angle_right,\n size, new)\n dragon.setData(x, y) # update plot", "async def plot_device_data(self, axes, name) -> []:\n pass", "def _push(self):\n if len(self._stat_now):\n self._stat_now['epoch_num'] = self.epoch_num\n self._stat_now['global_step'] = self.global_step\n\n self._stats.append(self._stat_now)\n self._stat_now = {}\n self._write_stat()", "def updateGraphs(self):\n # first update all three buffers\n tuiBufferName = self.dataClient.recv() # receive 'error'\n while tuiBufferName != 'end buffers':\n tuiData = self.dataClient.recv()\n self.logger.debug(f'Appending {tuiData} to buffer {tuiBufferName}')\n\n if(tuiBufferName == 'error'):\n self.model.errorBuffer.append([float(tuiData.flat[0])])\n if(tuiBufferName == 'output'):\n self.model.outputBuffer.append([float(tuiData.flat[0])])\n if(tuiBufferName == 'reference'):\n self.model.referenceBuffer.append([float(tuiData.flat[0])])\n if(tuiBufferName == 'output-error'):\n self.model.errorPercentage = tuiData.flat[0]\n\n tuiBufferName = self.dataClient.recv()", "def _set_data(self):\n\n # Remove old elements from plot\n if self.scatter is not None:\n self.scatter.remove()\n if self.oneoneline is not None:\n self.oneoneline.remove()\n\n # Get new data and plot\n self.slice = self.results.set_index('Location').loc[self.locnaam, [self.input_parameter, self.result_parameter]].values.T\n self.scatter = self.ax.scatter(*self.slice, s=5, alpha=0.7, color='C0')\n\n # Determine axes limits\n lowerlim, upperlim = self.slice.min(), self.slice.max()\n span = (upperlim - lowerlim)\n lowerlim = max(0, lowerlim - 0.05 * span)\n upperlim = upperlim + 0.05 * span\n\n # Plot a diagonal 1:1 line\n self.oneoneline, = self.ax.plot([lowerlim, upperlim], [lowerlim, upperlim], color='grey', dashes=(4, 3), lw=1.0)\n\n # Set the axes limits\n self.ax.set_xlim(lowerlim, upperlim)\n self.ax.set_ylim(lowerlim, upperlim)\n self.canvas.draw()", "def recive_data(self):\n # read all available data\n while self.ser.inWaiting() > self.INPUT_DATA_SIZE+1:\n data = array.array('c')\n # search the header\n data.append(self.ser.read(1))\n while data[0] != chr(1):\n data[0] = self.ser.read(1)\n \n # wait for all available data\n while self.ser.inWaiting() < (self.INPUT_DATA_SIZE-1):\n time.sleep(0.03);\n \n # recives data\n data = self.ser.read(self.INPUT_DATA_SIZE-1)\n \n # prove if you want graphical data\n if self.pushButton_monitor.isChecked():\n # decodes the data\n t = struct.unpack('I', data[3]+data[2]+data[1]+data[0])\n r = struct.unpack('f', data[4]+data[5]+data[6]+data[7])\n x0 = struct.unpack('f', data[8]+data[9]+data[10]+data[11])\n x1 = struct.unpack('f', data[12]+data[13]+data[14]+data[15])\n u = struct.unpack('f', data[16]+data[17]+data[18]+data[19])\n \n self.time = t[0]*25e-9\n \n # prepare the string output\n aux_str = \" t = \"+str(self.time)+\"\\t\"\n aux_str += \" r = \"+str(r[0])+\"\\t\"\n aux_str += \" u = \"+str(u[0])+\"\\t\"\n aux_str += \" x1 = \"+str(x1[0])+\"\\t\"\n aux_str += \" x0 = \"+str(x0[0])+\"\\n\"\n # print string output\n self.textBrowser.insertPlainText(aux_str)\n \n # append data to the arrays\n self.graf_t.append(self.time)\n self.graf_r.append(r[0])\n self.graf_x0.append(x0[0])\n self.graf_x1.append(x1[0])\n self.graf_u.append(u[0])\n \n # remove one value if the arrays have maximum length\n if self.graf_t.buffer_info()[1] >= NUM_SAMPLES:\n self.graf_t.pop(0)\n self.graf_r.pop(0)\n self.graf_x0.pop(0)\n self.graf_x1.pop(0)\n self.graf_u.pop(0)\n \n # reload number of samples lavel\n self.label_samples_value.setText(str(self.graf_t.buffer_info()[1]))\n # reload number of waiting chars in serial rx buffer\n self.label_rx_buff_value.setText(str(self.ser.inWaiting()))\n\n # reload mutex area\n self.updated_data = 1\n \n # prove if there are available id's\n if (self.actionPC_Monitor.isChecked() and data[20] == chr(2)):\n # if it is true, looks how much id's\n i = struct.unpack('B', data[21])\n\n if i[0] < STACK_SIZE:\n for z in range(i[0]):\n new_device = struct.unpack('B', data[z+22])\n new_string = str(new_device[0])\n \n llista = self.listWidget_link.findItems(new_string, QtCore.Qt.MatchExactly)\n if len(llista) == 0:\n self.listWidget_link.addItem(new_string)", "def updateGlobal(self):\n state = self.getState()\n n = len(self.myPlotCanvasList)\n for i in range(n):\n if self.myPlotCanvasList[i] is not None:\n self.myPlotCanvasList[i].myUpdateGlobal(state)", "def store(self, state):\n if self.interactive:\n self._fig.clear()\n fig = self._fig\n else:\n fig = plt.figure()\n\n self._plot_function(fig, copy_state(state))\n\n fig.canvas.draw()\n if not self.interactive:\n plt.show()", "def myUpdate(self, stateDict=None):\n\n # store stateDict so we can replot on changing dark theme\n if stateDict is None and self.stateDict is not None:\n # re-use our stateDict\n stateDict = self.stateDict\n else:\n if stateDict is None:\n return\n self.stateDict = stateDict.copy()\n\n if stateDict is None:\n return\n \n dataType = stateDict['dataType']\n hue = stateDict['hue']\n groupByColumnName = stateDict['groupByColumnName']\n\n plotType = stateDict['plotType']\n #self.plotType = plotType\n\n xStatHuman = stateDict['xStatHuman']\n yStatHuman = stateDict['yStatHuman']\n\n xStat = stateDict['xStat']\n yStat = stateDict['yStat']\n\n '''\n print('=== myMplCanvas.myUpdate()')\n print(' ', plotType)\n print(' ', 'xStatHuman:', xStatHuman, 'yStatHuman:', yStatHuman)\n print(' ', 'xStat:', xStat, 'yStat:', yStat)\n '''\n\n xIsCategorical = stateDict['xIsCategorical']\n yIsCategorical = stateDict['yIsCategorical']\n\n masterDf = stateDict['masterDf']\n meanDf = stateDict['meanDf']\n\n self.plotDf = meanDf\n\n self.canvas.axes.clear()\n\n picker = 5\n if plotType in ['Scatter Plot', 'Scatter + Raw + Mean']:\n # scatter plot user selection\n self.scatterPlotSelection, = self.canvas.axes.plot([], [], 'oy',\n markersize=12, fillstyle='none')\n\n # main scatter\n try:\n self.whatWeArePlotting = sns.scatterplot(x=xStat, y=yStat, hue=hue,\n data=meanDf, ax=self.canvas.axes, picker=picker,\n zorder=0)\n except (ValueError) as e:\n self.fig.canvas.draw()\n print(' EXCEPTION: in myUpdate() \"Scatter Plot\", exception is:')\n print(' ', e)\n print(' ', 'plotType:', plotType)\n print(' ', 'xStat:', xStat)\n print(' ', 'yStat:', yStat)\n print(' ', 'hue:', hue)\n\n # sem in both x and y, pulling from masterDf\n if dataType=='File Mean' or plotType=='Scatter + Raw + Mean':\n # we need to do this for each hue???\n # if x or y is in categorical (e.g. a string) then do not do this ...\n if xIsCategorical or yIsCategorical:\n pass\n else:\n print(' grabbing mean +- sem for self.groupByColumnName:', groupByColumnName)\n color = 'k'\n xd = masterDf.groupby(groupByColumnName).mean()[xStat]\n xerrd = masterDf.groupby(groupByColumnName).sem()[xStat]\n yd = masterDf.groupby(groupByColumnName).mean()[yStat]\n yerrd = masterDf.groupby(groupByColumnName).sem()[yStat]\n \n # logger.info('2023 declan')\n # print(' groupByColumnName:', groupByColumnName)\n # print(' xd:', xd)\n # print(' yd:', yd)\n # print(' xerrd:', xerrd)\n # print(' yerrd:', yerrd)\n \n self.canvas.axes.errorbar(xd, yd, xerr=xerrd, yerr=yerrd,\n fmt='none', capsize=0, zorder=10, color=color, alpha=0.5);\n\n elif plotType == 'Histogram':\n yStatHuman = 'Count'\n doKde = False #stateDict['doKDE']\n try:\n g = sns.histplot(x=xStat, hue=hue, kde=doKde,\n data=meanDf, ax=self.canvas.axes, picker=picker)\n except (ValueError) as e:\n self.fig.canvas.draw()\n print('EXCEPTIONin Histogram:', e)\n\n elif plotType == 'Cumulative Histogram':\n yStatHuman = 'Probability'\n try:\n g = sns.histplot(x=xStat, hue=hue, cumulative=True, stat='density',\n element=\"step\", fill=False, common_norm=False,\n data=meanDf, ax=self.canvas.axes, picker=picker)\n except (ValueError) as e:\n self.fig.canvas.draw()\n print('EXCEPTION in Cumulative Histogram:', e)\n\n elif plotType == 'Cumulative Histogram':\n yStatHuman = 'Probability'\n try:\n g = sns.histplot(x=xStat, hue=hue, cumulative=True, stat='density',\n element=\"step\", fill=False, common_norm=False,\n data=meanDf, ax=self.canvas.axes, picker=picker)\n except (ValueError) as e:\n self.fig.canvas.draw()\n print('EXCEPTION in Cumulative Histogram:', e)\n\n elif plotType == 'Violin Plot':\n if not xIsCategorical:\n warningStr = 'Violin plot requires a categorical x statistic'\n else:\n g = sns.violinplot(x=xStat, y=yStat, hue=hue,\n data=meanDf, ax=self.canvas.axes)\n\n elif plotType == 'Box Plot':\n if not xIsCategorical:\n warningStr = 'Box plot requires a categorical x statistic'\n else:\n g = sns.boxplot(x=xStat, y=yStat, hue=hue,\n data=meanDf, ax=self.canvas.axes)\n\n elif plotType == 'Raw + Mean Plot':\n if not xIsCategorical:\n warningStr = 'Raw + Mean plot requires a categorical x statistic'\n else:\n try:\n # does not work here for categorical x\n #self.scatterPlotSelection, = self.canvas.axes[0].plot([], [], 'oy',\n # markersize=12, fillstyle='none')\n\n '''\n colorList = [('red'), ('green'), 'b', 'c', 'm', 'y']\n hueList = meanDf[hue].unique()\n palette = {}\n for idx, hue in enumerate(hueList):\n palette[hue] = colorList[idx]\n print(palette)\n '''\n\n palette = sns.color_palette(\"Paired\")\n #palette = ['r', 'g', 'b']\n\n # stripplot\n #g = sns.swarmplot(x=xStat, y=yStat,\n g = sns.stripplot(x=xStat, y=yStat,\n hue=hue,\n palette=palette,\n data=meanDf,\n ax=self.canvas.axes,\n #color = color,\n dodge=True,\n alpha=0.6,\n picker=picker,\n zorder=1)\n\n\n #logger.error('!!!!!!!!!!!! grabbing get_legend_handles_labels()')\n self.canvas.axes.legend().remove()\n\n #logger.error('!!!!!!!!!!!! grabbing get_legend_handles_labels()')\n logger.info(f' REMAKING LEGEND sns.pointplot() plotNumber:{self.plotNumber}')\n handles, labels = self.canvas.axes.get_legend_handles_labels()\n l = self.canvas.axes.legend(handles[0:2], labels[0:2], bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n #self.myLegend = self.canvas.axes.Legend(handles[0:2], labels[0:2], bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)\n\n '''\n if self.darkTheme:\n color = 'w'\n else:\n color = 'k'\n color = [color] * len(hueList)\n print('color:', color)\n '''\n\n self.whatWeArePlotting = sns.pointplot(x=xStat, y=yStat,\n hue=hue,\n #palette=palette,\n data=meanDf,\n estimator=np.nanmean,\n errorbar=('ci', 68),\n capsize=0.1,\n ax=self.canvas.axes,\n color='r',\n #legend='full',\n #zorder=10)\n )\n except (ValueError) as e:\n print('EXCEPTION in \"Raw + Mean Plot\":', e)\n traceback.print_exc()\n\n elif plotType == 'Regression Plot':\n # regplot does not have hue\n if xIsCategorical or yIsCategorical:\n warningStr = 'Regression plot requires continuous x and y statistics'\n else:\n # todo: loop and make a regplot\n # for each unique() name in\n # hue (like Region, Sex, Condition)\n hueList = masterDf[hue].unique()\n for oneHue in hueList:\n if oneHue == 'None':\n continue\n tmpDf = meanDf [ meanDf[hue]==oneHue ]\n #print('regplot oneHue:', oneHue, 'len(tmpDf)', len(tmpDf))\n sns.regplot(x=xStat, y=yStat, data=tmpDf,\n ax=self.canvas.axes);\n else:\n print(' did not understand plot type:', plotType)\n\n\n #\n # update\n self.canvas.axes.figure.canvas.mpl_connect(\"pick_event\", self.onPick)\n\n self.mplCursorHover = None\n if stateDict['doHover'] and self.whatWeArePlotting is not None:\n self.mplCursorHover = mplcursors.cursor(self.whatWeArePlotting, hover=True)\n @self.mplCursorHover.connect(\"add\")\n def _(sel):\n #sel.annotation.get_bbox_patch().set(fc=\"white\")\n sel.annotation.arrow_patch.set(arrowstyle=\"simple\", fc=\"white\", alpha=.5)\n # row in df is from sel.target.index\n #print('sel.target.index:', sel.target.index)\n ind = sel.target.index\n annotationDict = self.getAnnotation(ind)\n myText = ''\n for k,v in annotationDict.items():\n myText += f'{k}: {v}\\n'\n sel.annotation.set_text(myText)\n\n #\n #self.mySetStatusBar(warningStr)\n\n self.canvas.axes.spines['right'].set_visible(False)\n self.canvas.axes.spines['top'].set_visible(False)\n\n if not stateDict['showLegend']:\n #print('self.canvas.axes.legend():', self.canvas.axes.legend())\n #print('self.canvas.axes.legend:', self.canvas.axes.legend)\n #if self.canvas.axes.legend() is not None:\n if 1:\n #logger.error('!!!!!!!!!!!! grabbing get_legend_handles_labels()')\n self.canvas.axes.legend().remove()\n\n #print('myUpdate() self.plotSize:', self.plotSize)\n self.canvas.axes.set_xlabel(xStatHuman)\n self.canvas.axes.set_ylabel(yStatHuman)\n '''\n if self.plotSize == 'paper':\n fontsize = 10\n self.canvas.axes[0].set_xlabel(xStatHuman, fontsize=fontsize)\n self.canvas.axes[0].set_ylabel(yStatHuman, fontsize=fontsize)\n else:\n self.canvas.axes[0].set_xlabel(xStatHuman)\n self.canvas.axes[0].set_ylabel(yStatHuman)\n '''\n\n # subplots_adjust\n #self.fig.canvas.draw_idle()\n self.fig.canvas.draw()", "def plot(self):\n if self.tabWidget.count() == 0:\n return\n\n # Error if not enough slabs\n plotType = str(self.plotOptions.getPlotType()) \n if len(self.selectedVars) < 2 and self.requiresTwoSlabs(plotType):\n self.showError('Error Message to User', 'Vector, Scatter, Meshfill or XvsY plots \\nmust have two data variables. The data \\nvariables must be selected in the \\n\"Defined Variables\" window.')\n return\n\n # Create & Update the graphics method / CDATCell vistrails modules\n # *** IMPORTANT ***\n # Everytime plot is pressed, this will create a new Graphics Method and\n # CDATCell Module. Instead it should ONLY create a new graphics method\n # and CDATCell module if the variable isn't already connected to an\n # existing Graphics Method / CDATCell module. This results in plots \n # being plotted multiple times.\n axisList = self.tabWidget.currentWidget()\n self.emit(QtCore.SIGNAL('createModule'), gm_name)\n self.emit(QtCore.SIGNAL('createModule'), cdatcell_name) \n self.setVistrailsGraphicsMethod() \n self.setVistrailsCDATCell()\n\n # Get the names of the 2 slabs so we can connect their modules in vistrails\n if self.requiresTwoSlabs(plotType):\n var1 = self.selectedVars[-1].id\n var2 = self.selectedVars[-2].id\n else:\n var1 = self.currentTabName()\n var2 = None\n\n # Emit signal to GuiController to connect ports and plot\n self.emit(QtCore.SIGNAL('plot'), var1, var2)\n\n # If a quickplot is plotted, define current variable under 'quickplot'\n if (self.currentTabName() == 'quickplot'):\n var = self.getUpdatedVar()\n self.emit(QtCore.SIGNAL('plotPressed'), axisList.getFile(), var)\n\n # Record plot teaching commands\n self.recordPlotTeachingCommand()", "def fillData(self):\n self.graphColors = c.getGraphColors()\n self._tupleListToStrings()\n self.colorlist.SetSelection(0)\n self.delayvalue.SetValue(str(c.getGraphDelay()))\n self._updateButtons(None)", "def _DoUpdatePlot( self, wd, ht ):\n self.ax.grid(\n True, 'both', 'both',\n\tcolor = '#c8c8c8', linestyle = ':', linewidth = 1\n\t)", "def update_plot(self,ax):\n for i,line in enumerate(self.lines):\n line.set_ydata(self.data[i].f)\n for line in self.lines: \n ax.draw_artist(line)", "def update(self, open_tick, high_tick, low_tick, close_tick):\n\n self.open.push(open_tick)\n self.high.push(high_tick)\n self.low.push(low_tick)\n self.close.push(close_tick)", "def plot_refresh_handler(args):\n stream_data, runlimits, runflags = args\n if runflags.exit:\n sys.exit(1)\n\n for line_name in stream_data:\n data = stream_data[line_name]\n curr_data_len = len(data['y'])\n if curr_data_len == 0:\n # no data yet\n continue\n\n if data['last_len'] >= curr_data_len:\n # no new data since last update\n continue\n\n # save length of last line draw\n data['last_len'] = curr_data_len\n\n if FLAGS.timestamp:\n x_data = numpy.array(data['x'])\n else:\n x_data = numpy.array(range(curr_data_len))\n y_data = numpy.array(data['y'])\n\n runlimits.x_max = max(max(x_data), runlimits.x_max)\n runlimits.x_min = runlimits.x_max-FLAGS.width\n\n if FLAGS.ymin is not None:\n runlimits.y_min = FLAGS.ymin\n else:\n runlimits.y_min = min(min(y_data), runlimits.y_min)\n\n if FLAGS.ymax is not None:\n runlimits.y_max = FLAGS.ymax\n else:\n runlimits.y_max = max(max(y_data), runlimits.y_max)\n\n data['line'].set_data(x_data, y_data)\n if runflags.update_axis:\n axes = data['line'].get_axes()\n axes.relim()\n axes.set_xlim(runlimits.x_min-1, runlimits.x_max+1)\n axes.autoscale_view(scaley=True, scalex=False)\n\n manager = pylab.get_current_fig_manager()\n manager.canvas.draw()", "def _PlotGraph(self, event):\n self._rcvLock.acquire()\n for j in event.data[0].keys():\n data = event.data[0][j]\n #print data\n line = []\n for k in data.keys():\n if k in COLORS.keys():\n c = COLORS[k]\n else:\n c = 'black'\n line.append(plot.PolyLine(data[k], colour=c, width=1,\n legend=\"Node %d\"%(k,)))\n # To draw markers: default colour = black, size = 2\n # shapes = 'circle', 'cross', 'square', 'dot', 'plus'\n #marker = plot.PolyMarker(event.data[1], marker='triangle')\n\n # set up text, axis and draw\n if j == ERRORPLOT:\n t = \"Synchronization Error\"\n xa = \"Time [s]\"\n ya = \"Error [ms]\"\n elif j == TEMPPLOT:\n t = \"Temperature Index\"\n xa = \"Time [s]\"\n ya = \"Index\"\n elif j == SKEWPLOT:\n t = \"Frequency Error\"\n xa = \"Time [s]\"\n ya = \"Frequency Error [ppm]\"\n gc = plot.PlotGraphics(line, t, xa, ya)\n # Draw graphs for each plot\n self.plotter[j].Draw(gc, xAxis=(self._x_lower,\n self._x_upper), yAxis=(float(self._y_lower[j]),\n float(self._y_upper[j])))\n self._rcvLock.release()", "async def plot(self, new=False) -> None:\n self._logger.debug(\"running\")\n self.figure.clear()\n self.figure.set_tight_layout(True)\n num_plots = len(self._plots)\n axes = None\n for i in range(num_plots):\n plot = self._plots[i]\n name = plot[0]\n active = plot[2]\n if active:\n if i == 0:\n axes = self.figure.add_subplot(1, 1, 1)\n axes.tick_params(axis='x', labelrotation=30)\n axes.set_ylabel(name, color='#1f77b4')\n await sleep(.001)\n if not new:\n await create_task(self.plot_device_data(axes, name))\n else:\n alt_axes = axes.twinx()\n alt_axes.set_ylabel(name, color='#ff7f0e')\n alt_axes.tick_params(axis='y', labelcolor='#ff7f0e')\n alt_axes.set_yticks(np.arange(0, 6, step=1))\n await sleep(.001)\n if not new:\n await create_task(self.plot_device_data(alt_axes, name))\n\n if not new:\n self.add_vert_lines()\n await sleep(.001)\n self.figure.canvas.draw()\n self._logger.debug(\"done\")", "def update_plot_xy_values(session_id, plot_name, test_name, xy_values):\n return Plot.update_plot_xy_values(session_id, plot_name, test_name, xy_values)", "def update_graph(self):\n parameters = []\n dtype = {'Timestamp': 'str'}\n for header in self.headers:\n if self.top_plot.current_param in header or self.bottom_plot.current_param in header:\n parameters.append(header)\n dtype[header] = 'float'\n data = pd.read_csv(self.reactor.file,\n dtype=dtype,\n parse_dates=['Timestamp'], usecols=['Timestamp'] + parameters, low_memory=False,\n na_filter=False)\n start_time = data['Timestamp'][0]\n data.insert(loc=2, column='EFT', value=(data['Timestamp'] - start_time) / np.timedelta64(1, 'h'))\n\n for label, content in data.iteritems():\n if label == 'Timestamp' or label == 'EFT':\n continue\n elif self.top_plot.current_param in label:\n self.top_plot.clear()\n self.top_plot.plot(data['EFT'], content)\n else:\n self.bottom_plot.clear()\n self.bottom_plot.plot(data['EFT'], content)", "def _add_data_to_model(self, qinfos):\n if len(qinfos) == 0:\n return\n new_points = np.empty((0, self.domain_dim))\n new_vals = np.empty(0)\n for i in range(len(qinfos)):\n new_points = np.concatenate((new_points,\n qinfos[i].point.reshape(-1, self.domain_dim)), axis=0)\n new_vals = np.append(new_vals, [qinfos[i].val], axis=0)\n if self.gp is not None:\n self.gp.add_data(new_points, new_vals)", "def updateplot(self):\n plotfiles = []\n try:\n self.plotter.reset()\n self.plotter.set_xrange(self.xrangemin.value(), self.xrangemax.value())\n self.plotter.set_yrange(self.yrangemin.value(), self.yrangemax.value())\n self.plotter.set_bgirange(self.bgintmin.value(), self.bgintmax.value())\n self.plotter.set_pkrange(self.halphamin.value(), self.halphamax.value())\n for n,pf in enumerate(self.selecteddata):\n tf = os.path.join(self.tempdir, \"tf%d\" % n)\n self.dfparser.writefile(tf, pf)\n plotfiles.append(tf)\n self.plotter.set_plot(plotfiles)\n except datafile.Datafile_error as e:\n self.warningmsg.setText(e.args[0])\n self.plotter.clear()\n except plotter.Plotter_error as e:\n self.warningmsg.setText(e.args[0])\n self.plotter.clear()", "def publishRandomData():\n\t# A multiprocessing Connection is a good way to pass the data\n\t# around. I haven't found a good way to pass a Queue into a\n\t# bokeh server. Doesn't mean there isn't a way though.\n\t# Note that this is hardcoded. If you change it, you'll need\n\t# to update it in bokeh_plotter.py as well.\n\tlistener = multiprocessing.connection.Listener(\n\t\t('localhost', 9999), authkey=b'supersecure')\n\t\t\n\t# Wait until the connection is open, then dump data on the\n\t# connection.\n\twith listener.accept() as conn:\n\t\twhile True:\n\t\t\t# Control the update rate of the plot.\n\t\t\ttime.sleep(0.1)\n\t\t\tconn.send((random.random(), random.random()))", "def updateArrays(self):\n for channelNumber in range(0, 8):\n self.channels[channelNumber][self.currentPosition]=self._voltage_get(channelNumber)#update next element in each array\n self.currentPosition+=1\n if self.currentPosition>=self.numberOfPoints:#reset position to beginning when we hit max number of points (like rolling oscilloscope)\n self.currentPosition=0\n self.cursorXS = self.getCurrentPositionArray()\n #could also set the next points to NaN's to make a gap!", "def send_chart_data(_, changes):\n for model, change in changes:\n if isinstance(model, Temperature) and change == 'insert':\n temp_data = model\n socketio.emit('chart_data', json.dumps(temp_data.serialize))", "def _UpdatePlotImpl( self ):\n if self.ax is not None:\n self.axline = None\n self.cursorLine = \\\n self.cursorLine2 = None\n\n# self.ax.clear()\n# if hasattr( self, 'ax2' ) and self.ax2 is not None:\n# self.ax2.clear()\n self.fig.clear()\n self._InitAxes()\n\n#\t\t-- Scale fonts\n#\t\t--\n wd, ht = self.GetClientSize()\n label_font_size = 14\n tick_font_size = 12\n self.titleFontSize = 16\n if 'wxMac' not in wx.PlatformInfo and wd < 800:\n\tdecr = (800 - wd) / 50.0\n\tlabel_font_size -= decr\n\ttick_font_size -= decr\n\tself.titleFontSize -= decr\n\n# self.ax.grid(\n# True, 'both', 'both',\n#\t color = '#c8c8c8', linestyle = ':', linewidth = 1\n#\t )\n self._DoUpdatePlot( wd, ht )\n self._DoUpdateRedraw()\n self.canvas.draw()\n #end if", "def _update_plots(self):\n for dock in self.plotDocks:\n for widget in dock.widgets:\n if not self.dataList.findItems(dock.name(), QtCore.Qt.MatchExactly):\n # no data for this plot -> reset it\n widget.getPlotItem().clear()\n # TODO remove tab from dock and del instance\n else:\n widget.getPlotItem().clear()\n x_data = self.currentDataset[\"results\"][\"time\"]\n y_data = self._get_data_by_name(dock.name())\n widget.getPlotItem().plot(x=x_data, y=y_data)", "def _send_raw_data(self, data_dict: dict) -> None:\n data = np.array(([point[\"secs\"] for point in data_dict[0][\"data\"]],\n [point[\"val\"] for point in data_dict[0][\"data\"]]))\n self.new_value_signal[np.ndarray].emit(data)", "def update(self, i):\n\n self.current_position = self.mediaPlayer.position()\n \t\n \n\n \"\"\"\n \"Record mode\" and \"wide x-axis mode\" shouls not work together. Wide mode is only for reading data, not writing data. \n The user is not allowed to write data when 16 000 points are displayed (wide mode) on tha diagram. If he does so, the frequency of the graph points decreases with time. \n \"\"\"\n \n if self.checkbox.isChecked():\n self.wideRadio.setEnabled(False)\n if not self.checkbox.isChecked():\n self.wideRadio.setEnabled(True)\n if self.wideRadio.isChecked():\n self.checkbox.setEnabled(False)\n if not self.wideRadio.isChecked():\n self.checkbox.setEnabled(True)\n \n\n\n if self.checkbox.isChecked() and self.mediaPlayer.state() == QMediaPlayer.PlayingState:\n \n self.savedRecently = False\n\n\n self.current_position = self.mediaPlayer.position()\n\n \n if self.xValues == []:\n # \"If the list of xValues is empty\". This happens only in the start of the plotting process.\n self.xValues.append(self.current_position)\n self.yValues.append(self.mouseY)\n self.colors.append(self.currentColor)\n\n #self.position_index = self.xValues.index(self.current_position)\n \n\n if self.xValues != []:\n\n if self.current_position > max(self.xValues):\n # \"If the point is bigger than the last point\". I.e if the point will be plotted in the end of the current graph.\n\n self.xValues.append(self.current_position)\n self.yValues.append(self.mouseY)\n self.colors.append(self.currentColor)\n\n self.position_index = self.xValues.index(self.current_position)\n\n if self.current_position < max(self.xValues):\n # \"If the point is smaller than the last point\". I.e if the point will be plotted in the middle of the current graph.\n\n \n if self.mediaPlayer.position() < 100:\n # The program has a problem of removing a point if x=0. This if-statement solves the problem.\n self.xValues.pop(0)\n self.yValues.pop(0)\n self.colors.pop(0)\n \n\n\n # Clearing all the points that are 100 ms (or less) in front of the current position. \n for number in range(self.current_position, self.current_position + 100):\n if number in self.xValues:\n self.yValues.pop(self.xValues.index(number))\n self.colors.pop(self.xValues.index(number))\n self.xValues.remove(number)\n \n \n \n # Plot new points\n bisect.insort(self.xValues,self.current_position) # Through this method, the element is inserted in order.\n self.yValues.insert(self.xValues.index(self.current_position), self.mouseY)\n self.colors.insert(self.xValues.index(self.current_position), self.currentColor)\n\n self.position_index = self.xValues.index(self.current_position)\n \n\n\n # View modes: zoom or wide.\n\n if self.zoomRadio.isChecked():\n self.canvas.axes.set_ylim(0, 100)\n self.canvas.axes.set_xlim(self.current_position-5000, self.current_position+5000)\n\n self.update_tempLists()\n\n self.curve = self.canvas.axes.scatter(self.tempXList, self.tempYList, s=10 , c=self.tempCList)\n\n\n\n if self.wideRadio.isChecked():\n self.canvas.axes.set_ylim(0, 100)\n\n if self.mediaPlayer.duration() != 0:\n self.canvas.axes.set_xlim(0, self.mediaPlayer.duration())\n elif self.xValues != []:\n self.canvas.axes.set_xlim(0, max(self.xValues))\n\n self.curve = self.canvas.axes.scatter(self.xValues, self.yValues, s=10 , c=self.colors)\n\n \n\n # I remove the previous vertical and horizontal lines. If I do not remove them, the program gets slower and slower, and the frequency of the points decreases with time.\n self.hline.remove()\n self.vline.remove()\n \n # New vertical and horizontal lines are created and updated to the correct values.\n self.vline = self.canvas.axes.axvline(x=self.mediaPlayer.position(), color='gray',linestyle=\":\")\n self.hline = self.canvas.axes.axhline(y=self.mouseY, color='gray',linestyle=\":\")\n\n\n\n return [self.curve] + [self.vline] + [self.hline]", "def update_figure(self):\n\n self.draw()", "def initialiseData(self):\n self.currentPosition = 0\n self.xs = scipy.linspace(0.0, self.numberOfPoints*self.resolution, self.numberOfPoints)\n self.cursorXS = self.getCurrentPositionArray()\n self.cursorVertical = scipy.array([self.verticalLimit,0.0])\n self.array0 = scipy.zeros(self.numberOfPoints)\n self.array1 = scipy.zeros(self.numberOfPoints)\n self.array2 = scipy.zeros(self.numberOfPoints)\n self.array3 = scipy.zeros(self.numberOfPoints)\n self.array4 = scipy.zeros(self.numberOfPoints)\n self.array5 = scipy.zeros(self.numberOfPoints)\n self.array6 = scipy.zeros(self.numberOfPoints)\n self.array7 = scipy.zeros(self.numberOfPoints)\n self.channels = [self.array0,self.array1,self.array2,self.array3,\n self.array4,self.array5,self.array6,self.array7]\n self.arrayPlotData = chaco.ArrayPlotData(xs=self.xs,channel0=self.array0,channel1=self.array1,\n channel2=self.array2,channel3=self.array3,\n channel4=self.array4,channel5=self.array5,\n channel6=self.array6,channel7=self.array7,\n cursorXS = self.cursorXS, cursorVertical=self.cursorVertical)#will be the ArrayPlotData We need", "def plot_changed(self):\n self.plotType = self.ui.selectPlotType.currentText()\n self.value_changed()", "def update_graph(self, data):\n if (self.type == 'matplotlib'):\n pass\n else:\n pass", "def processData(self):\n recordSet = AresChartsService.toMultiSeries(self.vals, self.chartKeys, self.selectedX , self.chartVals, extKeys=self.extKeys)\n self.aresObj.jsGlobal.add(\"data_%s = %s\" % (self.htmlId, json.dumps(recordSet)))", "def on_update(self):\n if self.main.data is not None:\n font = {\n 'family': str(self.le_font.text()),\n 'size': int(str(self.le_font_size.text()))\n }\n\n mpl.rc('font', **font)\n\n # Clear the plot\n self.ax.clear()\n\n # Get the data and colormap\n x, y, z = self.main.data.get_pcolor()\n cmap = self.main.canvas.colormap.get_mpl_colormap()\n\n tri_checkboxes = [self.cb_tripcolor.checkState(),\n self.cb_triangulation.checkState()]\n\n # If we are going to need to plot triangulation data, prepare\n # the data so it can be plotted\n if QtCore.Qt.Checked in tri_checkboxes:\n if self.main.data.tri is None:\n self.main.data.generate_triangulation()\n\n xc, yc = self.main.data.get_triangulation_coordinates()\n\n tri = mpl.tri.Triangulation(xc, yc,\n self.main.data.tri.simplices)\n\n # Plot the data using either pcolormesh or tripcolor\n if self.cb_tripcolor.checkState() != QtCore.Qt.Checked:\n quadmesh = self.ax.pcolormesh(x, y, z,\n cmap=cmap,\n rasterized=True)\n\n quadmesh.set_clim(self.main.canvas.colormap.get_limits())\n else:\n quadmesh = self.ax.tripcolor(tri,\n self.main.data.z.ravel(),\n cmap=cmap, rasterized=True)\n\n quadmesh.set_clim(self.main.canvas.colormap.get_limits())\n\n # Plot the triangulation\n if self.cb_triangulation.checkState() == QtCore.Qt.Checked:\n self.ax.triplot(tri, 'o-', color='black',\n linewidth=0.5, markersize=3)\n\n self.ax.axis('tight')\n\n title = self.format_label(str(self.le_title.text()))\n title = '\\n'.join(textwrap.wrap(title, 40,\n replace_whitespace=False))\n\n # Set all the plot labels\n self.ax.set_title(title)\n self.ax.set_xlabel(self.format_label(self.le_x_label.text()))\n self.ax.set_ylabel(self.format_label(self.le_y_label.text()))\n\n # Set the axis tick formatters\n self.ax.xaxis.set_major_formatter(FixedOrderFormatter(\n str(self.le_x_format.text()), float(self.le_x_div.text())))\n self.ax.yaxis.set_major_formatter(FixedOrderFormatter(\n str(self.le_y_format.text()), float(self.le_y_div.text())))\n\n if self.cb is not None:\n self.cb.remove()\n\n # Colorbar layout\n orientation = str(self.cb_cb_orient.currentText())\n self.cb = self.fig.colorbar(quadmesh, orientation=orientation)\n\n self.cb.formatter = FixedOrderFormatter(\n str(self.le_z_format.text()), float(self.le_z_div.text()))\n\n self.cb.update_ticks()\n\n self.cb.set_label(self.format_label(self.le_z_label.text()))\n self.cb.draw_all()\n\n # Plot the current linecut if neccesary\n if self.cb_linecut.checkState() == QtCore.Qt.Checked:\n for linetrace in self.main.linecut.linetraces:\n if linetrace.type == 'horizontal':\n plt.axhline(linetrace.position, color='red')\n elif linetrace.type == 'vertical':\n plt.axvline(linetrace.position, color='red')\n\n self.fig.tight_layout()\n\n self.canvas.draw()", "def update_got_plt_table_data(self, new_data):\n\n self.update_got_plt_table.emit([], True)\n for entry in new_data:\n self.update_got_plt_table.emit(entry, False)", "def update_visuals(self):\n\n result, data = self.dev.grab_pipe()\n if not result:\n log.critical(\"Problem grabbing pipe\")\n\n if self.live_updates == True:\n self.update_graph(data)\n self.curve_render += 1\n self.update_image(data)\n self.check_image(self.curve_render)\n\n self.update_fps()\n self.data_timer.start(0)", "def update(self, i):\n print(\"progress {}\".format(i))\n self.scat.remove()\n # data = (np.c_[np.squeeze(self.lons[:, i]), np.squeeze(self.lats[:, i])])\n colors = plt.cm.viridis(self.z[:,i])\n self.scat = self.config.ax.scatter(self.lons[:, i], self.lats[:, i], marker=\"o\",\n facecolors=colors, s=4,\n transform=self.config.projection, edgecolor='none')\n\n date_object = self.time[i]\n d = pd.to_datetime(date_object)\n plt.title('{}.{}.{} {}:{}'.format(d.year, d.month, d.day, d.hour, d.minute))\n\n return self.scat", "def XPLMDataChanged_f(inRefcon):", "def push_data(self, wave_data, finish_processing=False):\n self._parent_conn.send((wave_data, finish_processing))", "def store(self):\n store_moments = self.steps_performed % self.meas_every[0] == 0\n store_coords = self.steps_performed % self.meas_every[1] == 0\n if not (store_moments or store_coords):\n return\n Xp = np.copy(self.bunch.X[:, [1, 3]])\n self.kick(+0.5 * self.ds) # sync positions/slopes\n if store_moments:\n self.history.store_moments(self.s)\n if store_coords:\n self.history.store_coords(self.s)\n self.bunch.X[:, [1, 3]] = Xp", "def update_data():\n pass", "def refresh(self):\n self._list_of_points = []\n self._add_points()", "def set_layout( self ):\n self.main_frame = QtGui.QWidget()\n #self.main_frame.setFixedHeight(500)\n \n self.plot = mtp.mtp( self.main_frame )\n \n \n #actual layout \n #******************\n \n # layout left:\n #-------------\n vboxleft = QtGui.QVBoxLayout()\n \n # the matplotlib canvas\n vboxleft.addWidget(self.plot.canvas) \n \n # the play and stop button\n hbox = QtGui.QHBoxLayout()\n self.play_button = QtGui.QPushButton(\"&Play\")\n hbox.addWidget(self.play_button)\n self.stop_button = QtGui.QPushButton(\"&Stop\")\n hbox.addWidget(self.stop_button)\n vboxleft.addLayout(hbox)\n\n #layout right:\n #------------\n\n # prepare the buttons, the horizontal lines and the spacing\n \n # Serial port configuration\n self.select_serial_box = QtGui.QComboBox()\n self.select_serial_box.addItems( self.data_source.si.getListOfSerialPorts() )\n \n self.select_speed_box = QtGui.QComboBox()\n self.select_speed_box.addItems( self.data_source.si.bitrates )\n self.select_speed_box.setCurrentIndex( 1 )\n \n # Configuration of the output plot\n self.plotname_box = QtGui.QComboBox()\n self.plotname_box.addItems( self.plot.plotnames )\n \n # select the time interval to update the plot\n labelspinbox = QtGui.QLabel(\"Update (ms):\")\n self.spinbox_timestep = QtGui.QSpinBox()\n self.spinbox_timestep.setRange(100,2000) #from 0.1 to 5 seconds\n self.spinbox_timestep.setSingleStep(50)\n self.spinbox_timestep.setValue( self.timestep)\n labelspinbox.setBuddy(self.spinbox_timestep )\n \n # show last N data points\n self.showlastsCheckBox = QtGui.QCheckBox(\"Show last:\")\n self.spinbox_showlast= QtGui.QSpinBox()\n self.spinbox_showlast.setRange(10,1000) #from 0.1 to 5 seconds\n self.spinbox_showlast.setValue( 50)\n \n \n # Save plot button\n self.save_button = QtGui.QPushButton(\"S&ave plot\")\n\n # Reset data button\n self.resetdata_button = QtGui.QPushButton(\"&Reset data\")\n \n # Close button\n self.close_button = QtGui.QPushButton(\"&Close\")\n \n # in order to fix the width of the right layout\n # one needs to put the boxlayout in a widget\n vboxrightWidget = QtGui.QWidget()\n vboxright = QtGui.QVBoxLayout(vboxrightWidget)\n \n # inserting the widgets in the layout\n \n # serial configuration\n label = QtGui.QLabel(\"Serial configuration\")\n vboxright.addWidget(label)\n \n hbox = QtGui.QHBoxLayout()\n label = QtGui.QLabel(\"Port:\")\n hbox.addWidget( label )\n hbox.addWidget( self.select_serial_box)\n vboxright.addLayout( hbox )\n \n hbox = QtGui.QHBoxLayout()\n label = QtGui.QLabel(\"Bitrate:\")\n hbox.addWidget( label )\n hbox.addWidget( self.select_speed_box)\n vboxright.addLayout( hbox )\n \n # horizontal line\n line = QtGui.QFrame(self)\n line.setFrameShape(QtGui.QFrame.HLine)\n line.setFrameShadow(QtGui.QFrame.Sunken)\n vboxright.addWidget(line)\n \n # plot type\n label = QtGui.QLabel(\"Plot type\")\n vboxright.addWidget(label)\n vboxright.addWidget(self.plotname_box)\n \n # plot interval\n hbox_spinbox = QtGui.QHBoxLayout()\n hbox_spinbox.addWidget(labelspinbox)\n hbox_spinbox.addWidget(self.spinbox_timestep)\n vboxright.addLayout(hbox_spinbox)\n \n # horizontal line\n line = QtGui.QFrame(self)\n line.setFrameShape(QtGui.QFrame.HLine)\n line.setFrameShadow(QtGui.QFrame.Sunken)\n vboxright.addWidget(line)\n \n # Plot confifuration\n hboxshowlast = QtGui.QHBoxLayout()\n hboxshowlast.addWidget( self.showlastsCheckBox )\n hboxshowlast.addWidget( self.spinbox_showlast )\n vboxright.addLayout( hboxshowlast )\n \n # vertical space\n vboxright.addItem(QtGui.QSpacerItem(20,40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding))\n \n # buttons\n vboxright.addWidget(self.save_button)\n vboxright.addWidget(self.resetdata_button)\n vboxright.addWidget(self.close_button)\n \n #fix the width of the right layout through its enclosing widget\n vboxright.setContentsMargins(0,0,0,0)\n vboxrightWidget.setFixedWidth(180)\n \n \n # Global horizontal layout: takes the two vertical box layouts\n #-------------------------------------------------------------\n hboxmain = QtGui.QHBoxLayout()\n hboxmain.addLayout(vboxleft)\n hboxmain.addWidget(vboxrightWidget)\n \n # setting the global horizontal box as the main_frame layout\n #-----------------------------------------------------------\n self.main_frame.setLayout( hboxmain )\n self.setCentralWidget( self.main_frame )", "def updateDataStorage(infoToPlot, directory):\n \n \n dataStored=pickle.load(open(os.path.join(directory,'data.pkl'), 'rb'))\n \n for key, value in dataStored.items():\n value += infoToPlot[key]\n infoToPlot[key]=[] \n \n \n with open(os.path.join(directory,'data.pkl'), 'wb') as f:\n pickle.dump(dataStored, f, pickle.HIGHEST_PROTOCOL)", "def on_worker_started(self):\n\n # Old backend:\n #\n # self.measurements_list = [QLineSeries() for x in range(8)]\n #\n # Add a legend to each chart, and connect data (series) to charts\n # for i, series in enumerate(self.measurements_list):\n # self.chart_list[i].chart().legend().setVisible(False)\n # self.chart_list[i].chart().addSeries(series)\n #\n # Add axes to each chart\n # self.xaxis_list = [QValueAxis() for x in range(8)]\n # self.yaxis_list = [QValueAxis() for x in range(8)]\n\n # for i, series in enumerate(self.measurements_list):\n # series.attachAxis(self.xaxis_list[i])\n # series.attachAxis(self.yaxis_list[i])\n\n #\n # Prepare EMG visualization\n #\n for i, series in enumerate(self.measurements_list):\n # self.chart_list[i].chart().addAxis(self.xaxis_list[i], Qt.AlignBottom)\n # self.chart_list[i].chart().addAxis(self.yaxis_list[i], Qt.AlignLeft)\n # self.xaxis_list[i].setRange(0, NUM_GUI_SAMPLES)\n # self.yaxis_list[i].setRange(-128, 127) # EMG values are signed 8-bit\n # self.chart_list[i].setXRange(0, NUM_GUI_SAMPLES)\n\n # Generate an initial, empty plot --> update data later\n self.plotted_data[i] = self.chart_list[i].plot(self.data_indices, self.measurements_list[i],\n pen=pg.functions.mkPen(\"08E\", width=2),\n symbol='o', symbolSize=SYMBOL_SIZE)\n\n # Update states\n self.enable_text.setText(\"Disable: \")\n self.connected = True\n self.enable_box.setEnabled(True)", "def append(self, sensorData):\n \n self.ringBuffer.append(sensorData)", "def listen_and_send(self):\n hadEvent = False\n\n if not self.axis_data:\n self.axis_data = {}\n\n if not self.button_data:\n self.button_data = {}\n for i in range(self.controller.get_numbuttons()):\n self.button_data[i] = False\n\n if not self.hat_data:\n self.hat_data = {}\n for i in range(self.controller.get_numhats()):\n self.hat_data[i] = (0, 0)\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.JOYAXISMOTION:\n self.axis_data[event.axis] = round(event.value, 2)\n elif event.type == pygame.JOYBUTTONDOWN:\n self.button_data[event.button] = True\n elif event.type == pygame.JOYBUTTONUP:\n self.button_data[event.button] = False\n elif event.type == pygame.JOYHATMOTION:\n self.hat_data[event.hat] = event.value\n\n if event.type == pygame.JOYBUTTONDOWN:\n # A button on the joystick just got pushed down\n hadEvent = True\n elif event.type == pygame.JOYAXISMOTION:\n # A joystick has been moved\n hadEvent = True\n\n if hadEvent:\n\n # If platform is linux we need to change some values in axis_data\n os.system('clear')\n print(\"Axis before\")\n pprint.pprint(self.axis_data)\n if sys.platform == 'linux':\n #self.axis_data[2], self.axis_data[3], self.axis_data[4] = self.axis_data[4], self.axis_data[2], self.axis_data[3]\n temp2 = self.axis_data[2]\n temp3 = self.axis_data[3]\n temp4 = self.axis_data[4]\n self.axis_data[2] = temp4\n self.axis_data[3] = temp2\n self.axis_data[4] = temp3\n\n\n self.event_dict['axis'] = self.axis_data\n self.event_dict['button'] = self.button_data\n message = pickle.dumps(self.event_dict, protocol=4)\n message = bytes(f\"{len(message):<{HEADERSIZE}}\", 'utf-8') + message\n self.sock.sendall(message)\n\n #if self.button_data[4]:\n # self.verbose = not self.verbose\n\n if self.verbose:\n\n # print(\"Button \")\n # pprint.pprint(self.button_data)\n print(\"Axis \")\n pprint.pprint(self.axis_data)\n # print(\"Motion \")\n # pprint.pprint(self.hat_data)", "def __plot_pres__(self, refresh=False, *args):\n # If plot is not requested, return:\n if not self.plotPressureVar.get():\n return\n\n # Check for a closed window:\n if 'pressure' in self.plots.keys() and not matplotlib.pyplot.fignum_exists(self.plots['pressure'].number):\n del self.plots['pressure']\n refresh = False\n # Update the existing plot, if it exists\n refresh = refresh or 'pressure' in self.plots.keys()\n if refresh:\n if 'pressure' in self.plots.keys():\n fig = self.plots['pressure']\n fig = matplotlib.pyplot.figure(fig.number)\n fig.clear()\n else:\n return\n # Make a new window:\n else:\n fig = matplotlib.pyplot.figure(figsize=(4,3))\n fig.canvas.set_window_title('pressure, time = ' + '{:.3f}'.format(1e9*self.imp.t(self.it)))\n ax = fig.add_subplot(111)\n\n # Plot:\n ax.plot(1e4*self.imp.r((self.it), self.ir)[0], self.imp.P((self.it), self.ir)[0], 'k-')\n\n ax.set_xlabel('r (um)', fontsize=12)\n ax.set_ylabel('Pressure (GBar)', fontsize=12)\n\n if self.logxVar.get():\n ax.set_xscale('log')\n if self.logyVar.get():\n ax.set_yscale('log')\n\n matplotlib.pyplot.tight_layout()\n\n if not refresh:\n fig.show()\n fig.canvas.draw()\n if self.wm is not None:\n self.wm.addWindow(matplotlib.pyplot.get_current_fig_manager().window)\n self.plots['pressure'] = fig", "def update_plot(axes):\n axes.clear()\n\n i = C.i\n C.i += di # globale Zählvariable erhöhen\n if C.i >= len(tt):\n time.sleep(2)\n C.i = 0\n\n t = tt[i]\n q1 = qq1[i]\n q2 = qq2[i]\n q3 = qq3[i]\n CCframe(q1, q2, q3)\n\n # Ausgabe der aktuellen Zeit\n pl.text(0.06, 0.05, \"t = %3.2fs\" % t, transform = axes.transAxes)\n pl.axis([-3, 3, -3, 3])\n axes.figure.canvas.draw()", "def replot_history(self):\n nopen = pg.mkPen(style=QtCore.Qt.NoPen)\n self.history_plot.setData(\n pos=self.history,\n pen=nopen,\n brush=self.history_brushes[-len(self.history):])" ]
[ "0.6883734", "0.68772954", "0.6770715", "0.6723177", "0.6713831", "0.66170454", "0.6524996", "0.6524996", "0.6524996", "0.6524996", "0.6524996", "0.65054625", "0.6487483", "0.6401084", "0.6326303", "0.6279867", "0.622038", "0.60897267", "0.60824525", "0.6045853", "0.6021864", "0.6018936", "0.60125655", "0.60030687", "0.6002355", "0.59990627", "0.59869075", "0.5959131", "0.5925568", "0.5907027", "0.5870748", "0.58625025", "0.5841206", "0.58289635", "0.58145636", "0.5804401", "0.5791306", "0.57866627", "0.5760279", "0.57279474", "0.57227206", "0.5694862", "0.5684806", "0.5673158", "0.56659156", "0.56588066", "0.5651867", "0.5648323", "0.5646562", "0.5639234", "0.5639208", "0.56361234", "0.56281906", "0.5626481", "0.56252277", "0.5623483", "0.5578319", "0.55724734", "0.55614656", "0.556033", "0.55288994", "0.5510605", "0.550624", "0.5502436", "0.5499612", "0.5489244", "0.5475383", "0.5471225", "0.5470195", "0.54661125", "0.5462015", "0.54525787", "0.54486257", "0.54401153", "0.54356545", "0.54341584", "0.5428018", "0.5424648", "0.5424259", "0.542051", "0.540769", "0.538858", "0.5387982", "0.5385751", "0.5385301", "0.5363264", "0.53489256", "0.5338774", "0.5329636", "0.5321339", "0.5321026", "0.532019", "0.53168684", "0.5311377", "0.53083074", "0.530748", "0.53062415", "0.52864033", "0.5279996", "0.5274696" ]
0.63146627
15
Initializes comm and attaches streams.
def init_comm(self, obj): comm = None if self.dynamic or self.renderer.widget_mode == 'live': comm = self.renderer.comms[self.renderer.mode][0](self) attach_streams(self, obj) return comm
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n # Open stata as pipe; make a queue for non-blocking. Start the thread.\n self.proc = sp.Popen(['stata-mp'], stdin=sp.PIPE, stdout=sp.PIPE, bufsize=1)\n\n self.qu = Queue()\n\n self.thread = Thread(target = self.enqueue_output, args = (self.proc.stdout,\n self.qu))\n self.thread.daemon = True\n self.thread.start()\n\n # Read the initial stdout content.\n self.genout()", "def __init__(self):\n self.write_queue = Manager().Queue()\n\n BaseManager.register('Arduino',Arduino)\n BaseManager.register('Algorithm',Algorithm)\n BaseManager.register('Android',Android)\n BaseManager.register('ImageCV', ImageCV)\n manager = BaseManager()\n manager.start()\n shared_ard = manager.Arduino()\n shared_alg = manager.Algorithm()\n shared_and = manager.Android()\n shared_icv = manager.ImageCV()\n \n p1 = Process(target=self.read_algorithm, args=(shared_alg, shared_icv))\n p1.start()\n p2 = Process(target=self.read_arduino, args=[shared_ard])\n p2.start()\n p3 = Process(target=self.read_android, args=[shared_and])\n p3.start()\n p4 = Process(target=self.read_imagecv, args=[shared_icv])\n p4.start()\n p5 = Process(target=self.write_target, args=(shared_ard, shared_alg, shared_and, shared_icv))\n p5.start()\n p5.join()", "def init_com(self):\r\n self.__ser = serial.Serial(\r\n self.__dev_no, self.__baudrate, timeout=self.__timeout)\r\n\r\n # Stop the Continious Stream, avoid error\r\n self.__ser.write(self.__api.esc_cmd())\r\n self.__ser.write(self.__api.devid_cmd())\r\n tmp = self.__ser.readline().decode()\r\n\r\n # Get Dev ID\r\n if \"ID= \" in tmp:\r\n self.__api.devid = tmp.split(\"ID= \")[1].replace(\"\\r\", \"\")\r\n rospy.loginfo(self.__api.devid)\r\n\r\n init_cmds = [self.__api.factory_settings_cmd, self.__api.format_cmd(self.__format),\r\n self.__api.sample_rate_cmd(100), self.__api.continuous_stream_cmd]\r\n\r\n for cmd in init_cmds:\r\n self.__ser.write(self.__api.write_enable_cmd)\r\n rospy.loginfo(self.__ser.readline().decode())\r\n time.sleep(self.init_sleep)\r\n rospy.loginfo(cmd)\r\n self.__ser.write(cmd)\r\n if cmd != self.__api.continuous_stream_cmd:\r\n rospy.loginfo(self.__ser.readline().decode())\r\n time.sleep(self.init_sleep)\r\n return True\r\n return False", "def connect(self):\n # Streams can be queried by name, type (xdf file format spec), and\n # other metadata.\n\n # NOTE: According to the documentation this is a blocking call that can\n # only be performed on the main thread in Linux systems. So far testing\n # seems fine when done in a separate multiprocessing.Process.\n eeg_streams = pylsl.resolve_stream('type', 'EEG')\n marker_streams = pylsl.resolve_stream('type', 'Markers')\n\n assert eeg_streams, \"One or more EEG streams must be present\"\n assert marker_streams, \"One or more Marker streams must be present\"\n self._inlet = pylsl.StreamInlet(eeg_streams[0])\n\n self._marker_inlets = [pylsl.StreamInlet(inlet)\n for inlet in marker_streams]\n\n # initialize the current_markers for each marker stream.\n for inlet in self._marker_inlets:\n self.current_markers[inlet_name(inlet)] = Marker.empty()", "def initialize(self, logger, loop, netconf_ip, netconf_port, statistics,\n xml_to_json_translator):\n self.init_stream_handler(logger, loop, \n netconf_ip, netconf_port, statistics, xml_to_json_translator)", "def initialize( self, logger, loop, netconf_ip, netconf_port, statistics,\n xml_to_json_translator):\n self.init_stream_handler(logger, loop, \n netconf_ip, netconf_port, statistics, xml_to_json_translator)", "def start_stream(self):\n pass", "def __init__(self):\n \n self._read_pipe_name = ''\n self._write_pipe_name = ''\n self._thread: Optional[Thread] = None", "def initialize(self, config: DataConsumerConfig) -> None:\n super().initialize(config)\n self.server_socket = PipeSocket.INPUT\n # High water mark optimization\n chn: Channel = self.mngr.channels[PIPE_CHN]\n chn.sock_opts['rcvhwm'] = int(self.batch_size / 2) + 5\n chn.sock_opts['sndhwm'] = 5", "def initialize(self, config: DataProviderConfig) -> None:\n super().initialize(config)\n self.server_socket = PipeSocket.OUTPUT\n # High water mark optimization\n chn: Channel = self.mngr.channels[PIPE_CHN]\n chn.sock_opts['rcvhwm'] = 5\n chn.sock_opts['sndhwm'] = int(self.batch_size / 2) + 5", "def __init__(self, proc_args: Optional[List[str]]):\n if proc_args:\n self.proc = subprocess.Popen(\n proc_args,\n universal_newlines=True,\n stdin=subprocess.PIPE, # pipe STDIN and STDOUT to send and receive messages\n stdout=subprocess.PIPE\n )\n self.outward_comm_stream = self.proc.stdin\n self.inward_comm_stream = self.proc.stdout\n else:\n self.proc = None\n self.outward_comm_stream = sys.stdout\n self.inward_comm_stream = sys.stdin", "def setup(self):\n # create the pull socket (to communicate with this actor, others\n # process have to connect a push socket to this socket)\n self.pull_socket, pull_port = self._create_socket(zmq.PULL, -1)\n\n # create the control socket (to control this actor, a process have to\n # connect a pair socket to this socket with the `control` method)\n self.control_socket, ctrl_port = self._create_socket(zmq.PAIR, 0)\n\n self.pull_socket_address = LOCAL_ADDR + ':' + str(pull_port)\n self.control_socket_address = LOCAL_ADDR + ':' + str(ctrl_port)\n\n self._pull_port.value = pull_port\n self._ctrl_port.value = ctrl_port\n self._values_available.set()", "def __init__(self, sock_obj, cmd_channel):\n asyncore.dispatcher.__init__(self, sock_obj)\n # we toss the use of the asynchat's \"simple producer\" and\n # replace it with a pure deque, which the original fifo\n # was a wrapping of\n self.producer_fifo = deque()\n\n self.cmd_channel = cmd_channel\n self.file_obj = None\n self.receive = False\n self.transfer_finished = False\n self.tot_bytes_sent = 0\n self.tot_bytes_received = 0", "def __init__(self, stream, device, device_ptr, sticky):\n assert stream is not None\n assert device is not None\n assert device_ptr is not None\n\n self._stream = stream\n self._device = device\n self._device_ptr = device_ptr\n self._sticky = sticky", "def __init__(self, console_output, console_stream):\n self.console_output = console_output\n self.console_stream = console_stream", "def acquisition_init(self):\n assert self._inlet is not None, \"Connect call is required.\"\n metadata = self._inlet.info()\n log.debug(metadata.as_xml())\n for marker_inlet in self._marker_inlets:\n log.debug(\"Streaming from marker inlet: %s\",\n inlet_name(marker_inlet))\n\n info_channels = self._read_channels(metadata)\n info_fs = metadata.nominal_srate()\n\n # If channels are not initially provided, set them from the metadata.\n # Otherwise, confirm that provided channels match metadata, or meta is\n # empty.\n if not self.channels:\n self.channels = info_channels\n assert self.channels, \"Channels must be provided\"\n else:\n if info_channels and self.channels != info_channels:\n raise Exception(\"Channels read from the device do not match \"\n \"the provided parameters\")\n assert len(self.channels) == (metadata.channel_count() +\n len(self._appended_channels) +\n len(self._marker_inlets)),\\\n \"Channel count error\"\n\n if not self.fs:\n self.fs = info_fs\n elif self.fs != info_fs:\n raise Exception(\"Sample frequency read from device does not match \"\n \"the provided parameter\")", "def _comm_open(self, comm, msg):\n self.calling_comm_id = comm.comm_id\n self._register_comm(comm)\n self._set_pickle_protocol(\n msg['content']['data']['pickle_highest_protocol'])\n\n # IOPub might not be connected yet, keep sending messages until a\n # reply is received.\n self._pending_comms[comm.comm_id] = comm\n self._notify_comm_ready(comm)\n self.kernel.io_loop.call_later(.3, self._check_comm_reply)", "def setupTcp(self):\n \tself.tcpManager = QueuedConnectionManager()\n \tself.tcpReader = QueuedConnectionReader(self.tcpManager, 0)\n \tself.tcpWriter = ConnectionWriter(self.tcpManager, 0)", "def init_recording(self):\n self.statusBar().showMessage('Initialising...')\n self.streams = resolve_stream('type', 'EEG')\n self.inlet = StreamInlet(self.streams[0])\n self.timeObj = []\n self.sampleObj = []", "def __init__(self):\n \n # Initialize logger\n self._log = logging.getLogger(\"OemGateway\")\n \n # Initialize variables\n self._data_buffer = []\n self._settings = {}", "def open(self):\n if self.__stream is None:\n self.__open() # instantiate stream object\n self.__stream.start_stream() # reactivate collecting samples", "def __init__(self, com: AbsCommunicationProcess):\n super().__init__()\n self.__com = com\n self.__is_started = False", "def __init__(self):\n \n # Initialize logger\n self._log = logging.getLogger(\"OemGateway\")\n \n # Initialize variables\n self._data_buffer = []\n self._last_send = time.time()\n self._settings = {}", "def __init__(self):\n super().__init__()\n\n # Will only reply to every 3rd or so tweet, defined in settings\n self.received_tweet_count = 0\n\n # Twitter api init\n self.auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n self.auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n self.twitter_api = tweepy.API(self.auth)\n\n print('Authenticated, creating stream...')\n\n self._init_stream()", "def __init__(self):\n # zu Beginn ist noch kein Modus gesetzt\n self.mode = None\n # zu Beginn sind noch keine Channels/ Pins konfiguriert\n self.channels = {}\n # es sind zu Beginn auch noch keine callbacks fuer events hinzugefuegt\n self.events = []", "def initiate_connection(self) -> None:\n self._stop_waiting_for_opening.clear()\n self._make_fifo_file(self._fifo_out_path)\n self._make_fifo_file(self._fifo_in_path)\n if self._open_fifo_thread is None:\n self._open_fifo_thread = threading.Thread(target=self._open_fifo, daemon=True)\n self._open_fifo_thread.start()\n if self._opening_monitor_thread is None:\n self._opening_monitor_thread = threading.Thread(target=self._opening_monitor, daemon=True)\n self._opening_monitor_thread.start()", "def _start_streaming_ram_to_host(self):\n self.regs.SDRAM_HOST_READ_GO = 1\n self.regs.CSTREAM_CFG = 1", "def __attrs_post_init__(self):\n self.state_changes_send_channel, self.state_changes_receive_channel = trio.open_memory_channel(\n math.inf\n )\n self._nursery = self._nursery_manager = None", "def __init__(self, *args, **kwargs):\n super(RpkiListener, self).__init__(*args, **kwargs)\n RpkiBase.__init__(self)\n self.p_err, self.c_err = multiprocessing.Pipe(duplex=False)\n self.c_data, self.p_data = multiprocessing.Pipe(duplex=False)", "def _init_streams(self) -> None:\n assert self._is_root\n assert torch.cuda.is_available()\n # Stream for all-gathering parameters.\n self._streams[\"all_gather\"] = torch.cuda.Stream()\n # Stream for overlapping grad reduction with the backward pass.\n self._streams[\"post_backward\"] = torch.cuda.Stream()\n # Stream for pre-all-gather copies (e.g. H2D or precision cast).\n self._streams[\"pre_all_gather\"] = torch.cuda.Stream()", "def __init__(self, params={}):\n self.lt_ses = lt.session() # pylint: disable=no-member\n self.lt_ses.listen_on(6881, 6891)\n\n self.params = params\n self.queue = deque()\n self.stream_thread = None\n self.handle = None", "def start(self):\n\t\tself.stream.start_stream()", "def __init__(self, ard_dictionary, ard_commands, t_sensors=[], p_sensors=[], heaters=[], pumps=[]):\n\n # Have 2 mp Pools, one reading the serial and writing to the ard_dictionary and writing values to the\n # sensor objects, the other should convert the ard_dictionary to a json file for the website\n # Have an input dictionary with two levels of mp.manager.dict(), which the user can use to send\n # new setpoints to the objects. Make sure the logic will pick up on those\n # TODO: Add logic to determine whether the child process died and to revive it\n\n self.ARD_RETURNALL = b'!'\n self.WRITETIMEOUT = 0.25\n self.ard_dictionary = ard_dictionary\n self.ard_commands = ard_commands\n self.t_sensors = t_sensors\n self.p_sensors = p_sensors\n self.heaters = heaters\n self.pumps = pumps", "def prepare(self) -> None:\n for name, step, kwargs in self.steps:\n self.stream = step(self.stream, **kwargs)", "def init(self, HOST, PORT, BACKLOG):\n s = socket(AF_INET, SOCK_STREAM)\n s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n #s.setblocking(0)\n s.bind((HOST, PORT))\n s.listen(BACKLOG)\n # Add socket to list of available inputs\n self.server = s\n self.inputs.append(s)\n self.log(\"Bound socket to port: %s\", PORT)\n self.log(\"Sequence initialized to: %s\", self.seqNum)", "def init_stream_handler(\n self, \n logger, \n loop, \n netconf_ip, \n netconf_port,\n statistics,\n xml_to_json_translator):\n self._logger = logger\n self._asyncio_loop = loop\n self._encoding = \"xml\"\n self._netconf_ip = netconf_ip\n self._netconf_port = netconf_port\n self._stat = statistics\n self._xml_to_json_translator = xml_to_json_translator", "def _initialize(self):\n self.send_init_command()", "def __init__(self, process=None, parent=None, **kwargs):\n super(ProcessIO, self).__init__(**kwargs)\n self.process = process\n self.parent = parent\n self.default_output = process.default_output", "def __init__(self):\n self.running = False\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock_udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.p1 = None\n self.p2 = None", "def __init__(self, stream):\n self.stream = stream\n self.queue = Queue()\n self.start_thread()", "def __init__(self):\n\n super(_IOManager, self).__init__()\n self.__poll = iopoll.IOPoll()\n self.__et = self.__poll.set_edge_triggered(True)\n self.__wrappers = {}\n self.__disconnected_wrappers = []\n self.__running = True\n\n self.__wrapper_lock = threading.RLock()\n self.__poll_lock = threading.RLock()\n self.__logger = logging.getLogger('Borg.Brain.Util.IOManager')\n self.__logger.addHandler(nullhandler.NullHandler())\n self.__empty_time = time.time() + 10\n self.__next_tick = time.time() + 0.1\n self.__parent_thread = threading.current_thread()\n\n self.__read_list = set([])\n self.__write_list = set([])\n\n self.__ticker = Ticker(10)\n self.__saved_time = 0\n\n # Set up wake up pipe in non-blocking mode\n self.__wakeup_read, self.__wakeup_write = os.pipe()\n fl = fcntl.fcntl(self.__wakeup_read, fcntl.F_GETFL)\n fcntl.fcntl(self.__wakeup_read, fcntl.F_SETFL, fl | os.O_NONBLOCK)\n fl = fcntl.fcntl(self.__wakeup_write, fcntl.F_GETFL)\n fcntl.fcntl(self.__wakeup_write, fcntl.F_SETFL, fl | os.O_NONBLOCK)\n self.__poll.register(self.__wakeup_read, True, False, True)\n\n # Start IOManager thread\n self.start()", "def __init__(self, config):\n self.config = config\n self.received_messages = []\n self.processed_messages = []\n\n self.setup()", "def init_comms(self):\n mess = b'SHO' + self.end_mess_bytes\n self.sock.sendall(mess)", "def init_drone(self):\n #if self.log_level:\n self.drone.log.set_level(0)\n self.drone.connect()\n self.set_video_encoder_rate(3)\n self.drone.start_video()\n\n self.drone.subscribe(self.drone.EVENT_FLIGHT_DATA,\n self.flight_data_handler)\n self.drone.subscribe(self.drone.EVENT_LOG_DATA,\n self.log_data_handler)\n self.drone.subscribe(self.drone.EVENT_FILE_RECEIVED,\n self.handle_flight_received)", "def __init__(self, callback, parent):\n IPC.__init__(self, UPSTREAM_CHANNEL, DOWNSTREAM_CHANNEL, callback)\n self.parent = parent\n self.controller = parent.controller\n self.logger = get_logger(\"ipc_server\")", "def Open(self):\n if self.writer is None:\n self.writer = self.IO.Open(self.channel_name, adios2.Mode.Write)", "async def async_setup(self):\n locks = (\n await self._api.async_get_operable_locks(self._august_gateway.access_token)\n or []\n )\n doorbells = (\n await self._api.async_get_doorbells(self._august_gateway.access_token) or []\n )\n\n self._doorbells_by_id = {device.device_id: device for device in doorbells}\n self._locks_by_id = {device.device_id: device for device in locks}\n self._house_ids = {\n device.house_id for device in itertools.chain(locks, doorbells)\n }\n\n await self._async_refresh_device_detail_by_ids(\n [device.device_id for device in itertools.chain(locks, doorbells)]\n )\n\n # We remove all devices that we are missing\n # detail as we cannot determine if they are usable.\n # This also allows us to avoid checking for\n # detail being None all over the place\n self._remove_inoperative_locks()\n self._remove_inoperative_doorbells()\n\n self.activity_stream = ActivityStream(\n self._hass, self._api, self._august_gateway, self._house_ids\n )\n await self.activity_stream.async_setup()", "def init():\n get_writer_session()", "def _setUpStdStreams(self):\n if self.options.buffer:\n if self._stdout_buffer is None:\n self._stdout_buffer = self._makeBufferedStdStream()\n if self._stderr_buffer is None:\n self._stderr_buffer = self._makeBufferedStdStream()\n sys.stdout = self._stdout_buffer\n sys.stderr = self._stderr_buffer", "def _initialize_buffers(self) -> None:", "def set_stream(self):\n\n if not self.auth:\n raise AccessError(\n \"Please use the remote() method to set rsync authorization or use remote(public=True) for public data\")\n elif not self.initial_stream.task:\n raise AccessError(\"No files to download.\")\n else:\n self.stream = self.get_stream()\n\n # set stream source based on access mode\n if self.access_mode == 'rsync':\n self.stream.source = self.remote_base\n elif self.access_mode == 'curl':\n self.stream.source = join(self.remote_base, 'sas').replace(sep, '/')\n\n # set stream destination\n self.stream.destination = self.base_dir\n\n # set client env dict based on access mode\n if self.access_mode == 'rsync':\n key = 'RSYNC_PASSWORD'\n elif self.access_mode == 'curl':\n key = 'CURL_PASSWORD'\n self.stream.cli.env = {key: self.auth.password} if self.auth.ready() else None\n\n if self.stream.source and self.stream.destination:\n for task in self.initial_stream.task:\n self.set_stream_task(task)\n ntask = len(self.stream.task)\n if self.stream.stream_count > ntask:\n if self.verbose:\n print(\"SDSS_ACCESS> Reducing the number of streams from %r to %r, the number of download tasks.\" % (\n self.stream.stream_count, ntask))\n self.stream.stream_count = ntask\n self.stream.streamlet = self.stream.streamlet[:ntask]", "def _init_io(self):\n GPIO.setwarnings(False)\n GPIO.setmode( GPIO.BCM )\n pins = [ self._spi_dc ]\n for pin in pins:\n GPIO.setup( pin, GPIO.OUT )", "def init(self):\n self.dispatcher.start()\n self.replyer.start()", "def __init__(self, fd=None):", "def _initialize_metadata(self) -> None:\n\n # Write the pidfile. The SchedulerService will monitor it after a grace period.\n self.write_pid()\n self.write_process_name()\n self.write_fingerprint(ensure_text(self.options_fingerprint))\n self._logger.info(f\"pantsd {VERSION} running with PID: {self.pid}\")\n self.write_socket(self._server.port())", "def __init__(self, stream):\n self.stream = stream", "def __init__(self, stream):\n self.stream = stream", "def __init__(self, stream, address, server):\n logger.info(\"connection - address: %s\", address)\n self.stream = stream\n self.address = address\n self.server = server\n self.stream.set_close_callback(self._on_disconnect)\n self.wait()", "def __init__(self, env, name, num_ports):\n NetworkDevice.__init__(self, env, name, num_ports)\n self.reception_records = {}\n self.env.process(self.listen_for_messages(self.do_timestamp_messages))", "def setup(self):\n raise NotImplementedError(\"DataStream does not implement setup.\")", "def __init__(self,\n comms_address,\n args=\"\",\n auto_reopen=False,\n open_on_start=True):\n self.comms_address = comms_address\n working_directory = os.path.dirname(comms_address)\n if working_directory:\n comms_address = comms_address.replace(working_directory, \"./\")\n else:\n working_directory = None\n super(PtyTransport, self).__init__(\n command=comms_address,\n args=args,\n auto_reopen=auto_reopen,\n open_on_start=open_on_start,\n working_directory=working_directory)\n self.primary = None\n self.secondary = None", "def __init__(self, env, name):\n NetworkDevice.__init__(self, env, name, 1)\n self.env.process(self.listen_for_messages(self.echo))", "def __init__(self):\n log.msg(\"Initializing Twitch parser.\")\n\n # initialize our data members\n self.streams = tuple()\n self.crc32 = 0", "def __init__(self, cfg, commands, audio_record, audio_play, close_event):\n\n multiprocessing.Process.__init__(self)\n\n self.cfg = cfg\n self.acc = None\n self.acc_cb = None\n self.call = None\n\n self.commands = commands\n self.local_commands = deque()\n\n self.audio_record = audio_record\n self.audio_recording = False\n\n self.audio_play = audio_play\n self.audio_playing = False\n self.local_audio_play = deque()\n\n self.last_frame_id = 1\n self.message_queue = []\n\n self.close_event = close_event\n\n self.black_list = defaultdict(int)", "def setup(self):\n # Instrument names\n instruments = list(self.features_df[\"instrument\"].unique())\n\n # Get Muxes for each instrument.\n inst_muxes = [self._instrument_mux(i) for i in instruments]\n\n # Construct the streams for each mux.\n mux_streams = [pescador.Streamer(x) for x in inst_muxes\n if x is not None]\n\n # Construct the master mux\n master_mux = pescador.mux(mux_streams, **self.master_mux_params)\n # We have to wrap the mux in a stream so that the buffer\n # knows what to do with it.\n self.master_stream = pescador.Streamer(master_mux)\n\n # Now construct the final streamer\n if self.use_zmq:\n self.buffered_streamer = zmq_buffered_stream(\n self.master_stream, self.batch_size)\n else:\n self.buffered_streamer = buffer_stream(\n self.master_stream, self.batch_size)", "def __init__(self):\r\n self.client_socket = socket.socket() # the socket of the client.\r\n self.communicator = Communicator()\r\n self.events_handler = EventsHandler(self.client_socket)\r\n self.running = True\r\n self.display_resolution = DEFAULT_DISPLAY_RESOLUTION\r\n self.screen = self.get_display()", "def initialize(self):\n self.ros.enable()\n self.phone_link.enable()", "def SetUp(self):\n self.buffer_file = buffer_file_common.BufferFile(\n self.args, self.logger.name, self.GetDataDir())\n\n self.attachments_tmp_dir = os.path.join(self.GetDataDir(),\n _TEMPORARY_ATTACHMENT_DIR)\n # Remove the attachments tmp dir, if Instalog terminated last time.\n if os.path.exists(self.attachments_tmp_dir):\n shutil.rmtree(self.attachments_tmp_dir)\n file_utils.TryMakeDirs(self.attachments_tmp_dir)", "def __init__(self):\n\t\tself._logger = None\n\t\tself._instanciate_logger()\n\t\tself._video_manager = VideoManager(self, self._logger)\n\t\tself._video_thread = None\n\t\tself._audio_manager = AudioManager(self, self._logger)\n\t\tself._audio_thread = None\n\t\tself._input_thread = None\n\t\tself._trigger_manager = None\n\t\tself.is_running = False", "def open(self):\n broker = os.path.join(getsitepackages()[0], 'pynq_networking', 'rsmb',\n 'rsmb', 'src', 'broker_mqtts')\n\n self.close()\n os.system(f\"nohup {broker} > {self.log} &\")\n\n for t in MQTT_PACKET_TYPES:\n bind_layers(MQTT, t, {'type': t.type})\n\n bind_layers(TCP, MQTT_Stream, {'dport': self.mqtt_port})\n bind_layers(TCP, MQTT_Stream, {'sport': self.mqtt_port})\n\n for t in MQTTSN_PACKET_TYPES:\n bind_layers(MQTTSN, t, {'type': t.type})\n\n bind_layers(UDP, MQTTSN, {'dport': self.mqttsn_port})\n bind_layers(UDP, MQTTSN, {'sport': self.mqttsn_port})", "def __init__(self, devPath=None, debugOut=None, noProto=False, connectNow=True):\n\n if devPath is None:\n ports = util.findPorts()\n if len(ports) == 0:\n raise Exception(\"No Meshtastic devices detected\")\n elif len(ports) > 1:\n raise Exception(\n f\"Multiple ports detected, you must specify a device, such as {ports[0]}\")\n else:\n devPath = ports[0]\n\n logging.debug(f\"Connecting to {devPath}\")\n\n # Note: we provide None for port here, because we will be opening it later\n self.stream = serial.Serial(\n None, 921600, exclusive=True, timeout=0.5)\n\n # rts=False Needed to prevent TBEAMs resetting on OSX, because rts is connected to reset\n self.stream.port = devPath\n # OS-X/Windows seems to have a bug in its serial driver. It ignores that we asked for no RTSCTS\n # control and will always drive RTS either high or low (rather than letting the CP102 leave\n # it as an open-collector floating pin). Since it is going to drive it anyways we want to make\n # sure it is driven low, so that the TBEAM won't reset\n # Linux does this properly, so don't apply this hack (because it makes the reset button not work)\n if platform.system() != 'Linux':\n self.stream.rts = False\n self.stream.open()\n\n StreamInterface.__init__(\n self, debugOut=debugOut, noProto=noProto, connectNow=connectNow)", "def _initialize(self):\n self.flush()\n print(\"Initializing sensor...\")\n try:\n self.get_sample()\n print('Initialization successful')\n except:\n print('Initialization failed. Please disconnect and reconnect sensor.')", "def __init__(self):\n self.data0 = [] # This will hold data from ADC0\n self.data1 = [] # This will hold data from ADC1\n self.dev = _configure_device()", "def __init__(self, mpi_comm, remote_rank=0):\n self.mpi_comm = mpi_comm\n self.remote_rank = remote_rank\n self.status = MPI.Status()\n self._outbox = []\n self.recv_buffer = None", "def setup_own_producer(self) -> None:\n producer = Producer(self.name, self.call, self.input, self.output, self.scopes)\n for scope in self.scopes:\n self.producers[scope].append(producer)", "def start(self):\n print(\"Init the programm...\")\n\n # We need a lock to prevent too fast save and load actions of the config\n self._config_lock = Lock()\n\n # Create the instance of the config\n self._config_instance = ConfigService.instance(self._config_lock)\n self._config = self._config_instance.config \n\n # Prepare the queue for the output\n self._output_queue_lock = Lock()\n self._output_queue = Queue(2)\n self._effects_queue = Queue(2)\n self._audio_queue_lock = Lock()\n self._audio_queue = Queue(2)\n self._server_queue_lock = Lock()\n self._server_queue = Queue(2)\n\n # Prepare all notification queues\n self._notification_queue_output_in = Queue(2)\n self._notification_queue_output_out = Queue(2)\n\n self._notification_queue_audio_in = Queue(2)\n self._notification_queue_audio_out = Queue(2)\n\n self._notification_queue_effects_in = Queue(2)\n self._notification_queue_effects_out = Queue(2)\n\n self._notification_queue_webserver_in = Queue(2)\n self._notification_queue_webserver_out = Queue(2)\n\n self._notification_queue_server_in = Queue(2)\n self._notification_queue_server_out = Queue(2)\n\n # Only activate the output if I'm inside the output mode.\n if(not self._config[\"development_config\"][\"deactivate_output\"]):\n #Start Output Service\n self._output = Output()\n self._output_process = Process(\n target=self._output.start, \n args=(\n self._config_lock, \n self._notification_queue_output_in, \n self._notification_queue_output_out, \n self._output_queue, \n self._output_queue_lock, \n ))\n self._output_process.start()\n else:\n # Start Output Dummy Service\n self._output = Output()\n self._output_process = Process(\n target=self._output.start_dummy, \n args=(\n self._config_lock, \n self._notification_queue_output_in, \n self._notification_queue_output_out, \n self._output_queue, \n self._output_queue_lock\n ))\n self._output_process.start()\n\n # Start the Effect Service\n self._effects = Effects()\n self._effects_process = Process(\n target=self._effects.start, \n args=(\n self._config_lock, \n self._notification_queue_effects_in, \n self._notification_queue_effects_out, \n self._output_queue, \n self._output_queue_lock,\n self._effects_queue,\n self._server_queue,\n self._server_queue_lock,\n self._audio_queue,\n self._audio_queue_lock\n ))\n self._effects_process.start()\n\n # Start Notification Service\n self._notification_service = NotificationService()\n self._notification_service_process = Process(\n target=self._notification_service.start, \n args=(\n self._config_lock, \n self._notification_queue_output_in, \n self._notification_queue_output_out, \n self._notification_queue_effects_in, \n self._notification_queue_effects_out, \n self._notification_queue_webserver_in, \n self._notification_queue_webserver_out, \n ))\n self._notification_service_process.start()\n\n #Start Webserver\n self._webserver = Webserver()\n self._webserver_process = Process(\n target=self._webserver.start, \n args=(\n self._config_lock, \n self._notification_queue_webserver_in, \n self._notification_queue_webserver_out,\n self._effects_queue, \n ))\n self._webserver_process.start()\n \n #Start Server\n self._server = ServerService()\n self._server_process = Process(\n target=self._server.start, \n args=(\n self._config_lock, \n self._notification_queue_server_in, \n self._notification_queue_server_out,\n self._server_queue,\n self._server_queue_lock\n ))\n self._server_process.start()\n\n #Start audio process\n self._audio = AudioProcessService()\n self._audio_process = Process(\n target=self._audio.start, \n args=(\n self._config_lock, \n self._notification_queue_server_in, \n self._notification_queue_server_out,\n self._audio_queue,\n self._audio_queue_lock\n ))\n self._audio_process.start()\n\n print(\"Init finished\")\n\n try:\n\n print(\"Programm started...\")\n\n self._cancel_token = False\n\n # Do nothing with this thread. Just wait for the exit.\n while not self._cancel_token:\n sleep(10)\n \n\n\n except KeyboardInterrupt:\n\n print(\"Stop the programm...\")\n \n self._output_process.terminate()\n self._effects_process.terminate()\n self._notification_service_process.terminate()\n self._webserver_process.terminate()\n\n print(\"Programm stopped\")", "def __init__(self, manager, device_config, log_file_name, log_directory):\n super().__init__(\n manager,\n device_config,\n log_file_name=log_file_name,\n log_directory=log_directory)\n self._commands.update(COMMANDS)\n self._regexes.update(REGEXES)\n self._timeouts.update(TIMEOUTS)\n self._serial_port = None", "def starting_stream(self, stream):\n self.cur_stream_observations = 0\n self.stream = stream", "def init_communications(self):\n from os.path import exists\n from serial import Serial\n import serial.tools.list_ports\n if self.ser is not None:\n try:\n info(\"Checking whether device is still responsive...\")\n self.ser.write(self.id_query)\n debug(\"%s: Sent %r\" % (self.ser.name,self.id_query))\n reply = self.read(count=self.id_reply_length)\n if not self.id_reply_valid(reply):\n debug(\"%s: %r: invalid reply %r\" % (self.ser.name,self.id_query,reply))\n info(\"%s: lost connection\" % self.ser.name)\n self.ser = None\n else: info(\"Device is still responsive.\")\n except Exception as msg:\n debug(\"%s: %s\" % (Exception,msg))\n self.ser = None\n\n if self.ser is None:\n devices = serial.tools.list_ports.comports()\n debug('devices: %r' % devices)\n for item in devices:\n debug('device: %r' % item)\n try:\n ser = Serial(item.device,baudrate=self.baudrate)\n ser.write(self.id_query)\n debug(\"%s: Sent %r\" % (ser.name,self.id_query))\n reply = self.read(count=self.id_reply_length,ser=ser)\n if self.id_reply_valid(reply):\n self.ser = ser\n info(\"Discovered device at %s based on reply %r\" % (self.ser.name,reply))\n break\n except Exception as msg:\n debug(\"%s: %s\" % (Exception,msg))\n if self.ser is not None: break", "def __init__(self):\n\tself.position1 = NavSatFix()\n\tself.position2 = NavSatFix()\n\n rospy.init_node(\"communicator\", anonymous=True)\n\n rospy.Subscriber('/whole/pirate/position', NavSatFix, self.update_position1)\n rospy.Subscriber('/whole/greenBoat/position', NavSatFix, self.update_position2)\n\n self.comu1_pub = rospy.Publisher('position1', NavSatFix, queue_size=10)\n self.comu2_pub = rospy.Publisher('position2', NavSatFix, queue_size=10)\n\n self.freq = rospy.get_param(\"config/rate\")\n self.rate = rospy.Rate(self.freq)\n\t\n\tself.position1_publisher()\n\t#self.position2_publisher()", "def __init__(self, args):\n self.args = args\n self.sender, receiver = mp.Pipe()\n self.plotter = RealPlotter()\n self.plot_process = mp.Process(\n target=self.plotter, args=(receiver,), daemon=True)\n self.plot_process.start()", "def initialize(self, config: BaseDataPipeConfig) -> None:\n super().initialize(config)\n # Configuration\n self.stop_on_close = config.stop_on_close.value\n self.pipe: str = config.pipe.value\n self.pipe_mode: SocketMode = config.pipe_mode.value\n self.pipe_address: ZMQAddress = config.pipe_address.value\n self.pipe_format: MIME = config.pipe_format.value\n self.batch_size: int = config.batch_size.value\n self.ready_schedule_interval: int = config.ready_schedule_interval.value\n # Set up FBDP protocol\n if self.pipe_mode == SocketMode.BIND:\n # server\n self.protocol = FBDPServer()\n self.protocol.on_exception = self.handle_exception\n self.protocol.on_accept_client = self.handle_accept_client\n self.protocol.on_schedule_ready = self.handle_schedule_ready\n # We have an endpoint to bind\n self.endpoints[PIPE_CHN] = [self.pipe_address]\n else:\n # client\n self.protocol = FBDPClient()\n # common parts\n self.protocol.log_context = self.logging_id\n self.protocol.batch_size = self.batch_size\n self.protocol.on_pipe_closed = self.handle_pipe_closed\n self.protocol.on_produce_data = self.handle_produce_data\n self.protocol.on_accept_data = self.handle_accept_data\n # Create pipe channel\n self.mngr.create_channel(DealerChannel, PIPE_CHN, self.protocol, wait_for=Direction.IN)", "def connection_made(self, transport):\n self.transport = transport\n self.buf = bytes()\n self.msgs_recvd = 0\n print('Reader connection created')", "def __init__(self, in_fd, out_fd, info_fd):\n self.in_fd = in_fd\n self.out_fd = out_fd\n self.info_fd = info_fd\n self.total_bytes = 0\n self.start_time = None\n self.total_time = 0.0\n self.average_speed = 0.0", "def teleopInit(self):\n # self.drive.setSafetyEnabled(True)\n self.compressor.start()\n pass", "def plugin_init(self):\n\n self.url_handlers = {'global': self._default_handler,\n 'jid': {}}\n \n self.streamSessions = []\n\n register_stanza_plugin(Iq, stanza.OOBTransfer)\n register_stanza_plugin(Message, stanza.OOB)\n register_stanza_plugin(Presence, stanza.OOB)\n\n self.xmpp.register_handler(\n Callback('OOB Transfer',\n StanzaPath('iq@type=set/oob_transfer'),\n self._handle_transfer))\n self.xmpp.register_handler(\n Callback('OOB Transfer',\n StanzaPath('iq@type=result/oob_transfer'),\n self._handle_finished))\n self.xmpp.register_handler(\n Callback('OOB Transfer',\n StanzaPath('iq@type=error/oob_transfer'),\n self._handle_finished))\n \n self.register_url_handler(handler=self._download_file)", "def create_stream(self):\n pass", "def open(self):\n # NOTE: caller MUST open for writing BEFORE opening for reading.\n self._fd_out = self._open_fifo(self._path_in, os.O_WRONLY)\n self._fd_in = self._open_fifo(self._path_out, os.O_RDONLY)", "def setup(self):\n # define misfit function and adjoint source generator\n self.misfit = getattr(misfit, PAR.MISFIT)\n self.adjoint = getattr(adjoint, PAR.MISFIT)\n\n # define seismic data reader and writer\n self.reader = getattr(readers, PAR.READER)\n self.writer = getattr(writers, PAR.WRITER)\n\n # prepare channels list\n self.channels = []\n for char in PAR.CHANNELS:\n self.channels += [char]", "def __init__(self, *args, **kwargs):\n mp.Process.__init__(self)\n self._args = args\n self._kwargs = kwargs\n self._host_conn, self._proc_conn = mp.Pipe()\n self.daemon = True\n self.start()\n reply = self._host_conn.recv()\n if isinstance(reply, Exception):\n raise reply", "def _init_streams(self):\n @self.streams_wrapper(\"networkx\")\n def get_nx_stream(extractor_context, graph):\n \"\"\"\n :param graph: networkx.Graph\n :returns: projx.nx_extractor\n \"\"\"\n return nx_xtrct.nx_stream(extractor_context, graph)\n\n @self.streams_wrapper(\"neo4j\")\n def get_neo4j_stream(extractor_context, graph):\n \"\"\"\n :param graph: networkx.Graph\n :returns: projx.nx_extractor\n \"\"\"\n return neo4j_xtrct.neo4j_stream(extractor_context, graph)\n\n @self.streams_wrapper(\"edgelist\")\n def get_edgelist_stream(extractor_context, graph):\n \"\"\"\n :param graph: networkx.Graph\n :returns: projx.nx_extractor\n \"\"\"\n return edgelist_xtrct.edgelist_stream(extractor_context, graph)", "def __init__(self):\n print(\"Starting picam Camera Object\")\n self.cam = picamera.PiCamera()\n self.stream = io.BytesIO()", "def __init__(\n self, env, link,\n transmitter_port, receiver_port):\n self.env = env\n self.link = link\n self._transmitter_port = transmitter_port\n self._receiver_port = receiver_port\n env.process(self.run())", "def __init__(self, output_file=None, output_size=None, output_format=None):\n super(StreamerClient, self).__init__()\n # Constants\n self.MCAST_GRP = '224.0.0.1'\n self.MCAST_PORT = 5007\n self.sync_start = '#!-START-!#'\n self.sync_kill = '#!-QUIT-!#'\n self.chunk_length = 4096 # or 1024\n self.sock = None\n # Will change throughout \n self.width = 0\n self.height = 0\n self.received_frames = 0\n self.halt = False\n self.output_file = output_file\n if output_size is None:\n self.output_size = (1280, 720)\n else:\n self.output_size = output_size\n if output_format is None:\n self.output_format = cv2.VideoWriter_fourcc(*'mpeg')\n else:\n self.output_format = cv2.VideoWriter_fourcc(*output_format)\n self.fps = 20\n self.video_writer = None", "def start_stream(self):\n self.handle = lt.add_magnet_uri(self.lt_ses, self.queue[0].magnet_link, # pylint: disable=no-member\n self.params)\n self.handle.set_sequential_download(True)\n\n self.stream_thread = threading.Thread(target=self._stream,\n name='stream')\n self.stream_thread.start()", "def __init__(self, session):\n self._session = session\n self._channel = None\n self._stdout = None\n self._stderr = None", "def __init__(self, loglevel=logging.INFO, file_logger=True, console_logger=True, mqtt_logger=None,\n config_filename=\"hyperstream_config.json\"):\n self.config_filename = config_filename\n self._session = None\n\n self.parameters = dict(\n loglevel=loglevel,\n file_logger=file_logger,\n console_logger=console_logger,\n mqtt_logger=mqtt_logger\n )\n\n self.logger = HyperStreamLogger(\n default_loglevel=loglevel, file_logger=file_logger, console_logger=console_logger, mqtt_logger=mqtt_logger)\n self.config = HyperStreamConfig(filename=config_filename)\n self.client = Client(self.config.mongo)\n\n # Define some managers\n self.channel_manager = ChannelManager(self.config.plugins)\n self.plate_manager = PlateManager()\n self.workflow_manager = WorkflowManager(channel_manager=self.channel_manager, plate_manager=self.plate_manager)\n self.plugins = PluginContainer()\n\n # The following are to keep pep happy - will be populated below\n self.tools = None\n self.factors = None\n\n self.current_workflow = None # Used in the new API - the current workflow being defined\n self.populate_tools_and_factors()", "def __init__(self):\n\n # For now, we'll connect to the target via the Apollo debug controller.\n # This should be replaced by a high-speed USB link soon; but for now\n # we'll use the slow debug connection.\n self._debugger = ApolloDebugger()\n self._serial = self._find_serial_connection()", "def __init__(self, runtime_dir=\"/tmp/tbots\"):\n\n # inputs to full_system\n self.robot_status_sender = ThreadedUnixSender(runtime_dir + ROBOT_STATUS_PATH)\n self.ssl_wrapper_sender = ThreadedUnixSender(runtime_dir + SSL_WRAPPER_PATH)\n self.ssl_referee_sender = ThreadedUnixSender(runtime_dir + SSL_REFEREE_PATH)\n self.sensor_proto_sender = ThreadedUnixSender(runtime_dir + SENSOR_PROTO_PATH)\n\n # outputs from full_system\n self.world_listener = ThreadedUnixListener(runtime_dir + WORLD_PATH, World)\n self.primitive_listener = ThreadedUnixListener(\n runtime_dir + PRIMITIVE_PATH, PrimitiveSet\n )\n\n # override the tactic\n self.tactic_override = ThreadedUnixSender(runtime_dir + TACTIC_OVERRIDE_PATH)\n\n # TODO (#2510) rename to full_system\n self.full_system_process = Popen([\"software/unix_full_system\"])", "def initialize_comm(self, config_options):\n try:\n self.comm = MPI.COMM_WORLD\n self.comm.Set_errhandler(MPI.ERRORS_ARE_FATAL)\n except AttributeError as ae:\n config_options.errMsg = \"Unable to initialize the MPI Communicator object\"\n raise ae\n\n try:\n self.size = self.comm.Get_size()\n except MPI.Exception as mpi_exception:\n config_options.errMsg = \"Unable to retrieve the MPI size.\"\n raise mpi_exception\n\n try:\n self.rank = self.comm.Get_rank()\n except MPI.Exception as mpi_exception:\n config_options.errMsg = \"Unable to retrieve the MPI processor rank.\"\n raise mpi_exception" ]
[ "0.62361443", "0.6131124", "0.6047162", "0.60436773", "0.59891397", "0.598821", "0.5979925", "0.59722435", "0.59679276", "0.59299994", "0.5921529", "0.5889616", "0.588344", "0.5872382", "0.58591175", "0.58533263", "0.5845079", "0.5841991", "0.582825", "0.57430446", "0.5731562", "0.5724728", "0.57136035", "0.570926", "0.56949186", "0.56889033", "0.56846404", "0.56775564", "0.5670981", "0.5658409", "0.56373334", "0.5631435", "0.5623691", "0.5621684", "0.56178504", "0.5609049", "0.5609033", "0.5607542", "0.5595911", "0.5587249", "0.5584505", "0.5578461", "0.5568508", "0.55607945", "0.55581504", "0.5557089", "0.5556575", "0.55558485", "0.5553933", "0.5536805", "0.5528691", "0.5527517", "0.5524914", "0.5523202", "0.5509585", "0.5502623", "0.5502623", "0.550179", "0.54990965", "0.54973036", "0.54937106", "0.5473095", "0.54729277", "0.5472699", "0.54663247", "0.54645914", "0.5464359", "0.5463475", "0.5453805", "0.5449484", "0.5447842", "0.54464084", "0.54432225", "0.5440453", "0.5434787", "0.54337287", "0.5431691", "0.5431411", "0.5430664", "0.54300976", "0.5427004", "0.5418849", "0.54173964", "0.541237", "0.5405524", "0.54039925", "0.54004866", "0.53967404", "0.5385982", "0.5384368", "0.5384301", "0.53788364", "0.5374756", "0.53737164", "0.5373335", "0.5367768", "0.5359859", "0.53497154", "0.5347975", "0.53435117" ]
0.5991638
4
Returns the total number of available frames.
def __len__(self): return len(self.keys)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_total_frames(self) -> int:\n return self.num_frames", "def size(self):\n if self.frames is None:\n return 0\n return self.frames.size", "def frames(self):\n frame_count = 0\n if self.is_video() or self.is_audio():\n if self.__dict__['nb_frames']:\n try:\n frame_count = int(self.__dict__['nb_frames'])\n except ValueError:\n raise FFProbeError('None integer frame count')\n return frame_count", "def FrameCount(self):\r\n\t\treturn self._get_attribute('frameCount')", "def get_num_frames(self):\n return self._frames.shape[0]", "def capacity(self):\r\n return len(self.frames)", "def num_frames(self):\n return len(self.video)", "def __calculate_number_of_frames(self):\n # Save current position\n current_pos = self.__file_object.tell()\n\n # Go to start of first frame\n self.__file_object.seek(self.__first_frame_raw_data_position)\n self.number_of_frames = 0\n\n while True:\n if not self.__file_object.read(self.__frame_raw_data_size):\n break\n\n self.__file_object.readline()\n self.number_of_frames += 1\n\n # Restore file pointer\n self.__file_object.seek(current_pos)\n print('Number of frames:', self.number_of_frames)", "def num_frames(self):\n return self._first_rgb.shape[1]", "def remaining_frames(self):\n return self.sound.nframes - self.current_frame", "def total_buffers_count(self) -> int:\n return int(self._pts / self._duration)", "def count_frames():\n frames = sentence.sem.frames.find_all('frame', {'name' : NEGATION_FRAME_NAME})\n frame_count = []\n for f_r in frames:\n frame_count.append(f_r)\n return len(frame_count)", "def __len__(self):\n return int(np.ceil(self.total_frame_count / self.batch_size))", "def get_frame_size(self) -> Tuple[int, int]:\n return self.__sim.frame_size()", "def frame_length(self):\r\n return self.config.frame_length", "def _frameLen(self):\n return self.numCols * self.numRows", "def get_frame_size(self):\n return self._frames.shape[-1]", "def num_available(self) -> int:\n return len(self)", "def getTotalFramesVid(srcVideoPath):\n cap = cv2.VideoCapture(srcVideoPath)\n # if the videoCapture object is not opened then exit without traceback\n if not cap.isOpened():\n print(\"Error reading the video file !!\")\n return 0\n\n tot_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)\n cap.release()\n return tot_frames", "def total_buffers_count(self) -> int:\n return self._counter", "def pending_nb_bytes(self):\n if self.df_length is not None:\n if self.df_length > 0:\n return self.df_length - len(self.buf)\n\n if self.cf_length is not None:\n if self.cf_length > 0:\n return self.cf_length - len(self.buf)\n \n return 4", "def frame_size(self):\n return self._frame_size", "def bframes_count(**kwargs) -> int:\n path_project = kwargs['project_name']\n project_name = path_project.split( '/' )[-1].strip( '.' )\n if project_name in frames_count:\n return frames_count[project_name]['count']\n else:\n bpy.ops.wm.open_mainfile( filepath=path_project )\n count_frames = bpy.context.scene.frame_end\n frames_count[project_name] = {'project_name': project_name, 'count': count_frames}\n return count_frames", "def get_num_cards(self):\n \n return self._hand.get_size()", "def nbytes(self):\n # type: () -> int\n size = 0\n for chunk in self.data.chunks:\n for buf in chunk.buffers():\n size += buf.size\n return size", "def _numQueuedTotal(self):\n queueSize = len(self.__queue) + len(self.__clientQueue)\n return queueSize", "def realFrameNumber(self, callback=None):\n count = 0\n theoreticalFrameNumber = int(self.video.get(cv2.CAP_PROP_FRAME_COUNT))\n if theoreticalFrameNumber > 30000:\n return theoreticalFrameNumber\n while(True):\n # Capture frame-by-frame\n ret, frame = self.video.read()\n if not ret:\n break\n if callback != None:\n callback(0.1 + (count / theoreticalFrameNumber) * 0.75, \"Calculating the number of frame\")\n count += 1\n return count", "def get_max_frames(self):\n return 8", "def nbytes(self):\n # type: () -> int\n size = 0\n for chunk in self.data.chunks:\n for buf in chunk.buffers():\n if buf is not None:\n size += buf.size\n return size", "def get_num_frames(filename, ext='*.jpg'):\n if os.path.isdir(filename):\n return len(glob.glob(os.path.join(filename, ext)))\n elif os.path.isfile(filename):\n cmd = ('ffprobe -v 0 -count_frames -select_streams v:0 '\n '-show_entries stream=nb_read_frames -of '\n 'default=nokey=1:noprint_wrappers=1 ' + filename).split()\n pid = subprocess.run(cmd, stdout=subprocess.PIPE,\n universal_newlines=True)\n if pid.returncode != 0:\n return None\n nframes_expr = pid.stdout\n nframes = int(nframes_expr.rstrip())\n return nframes\n else:\n raise ValueError('Unexpect filename: {}'.format(filename))", "def n_total_files(self):\n return len(self.fileinfo)", "def full_frame_length(self):\n return self.height * self.width * 3", "def sample_count(self):\n if self._sample_count:\n return self._sample_count\n else:\n return self._wave.getnframes()", "def get_frame_width(self) -> int:\n return self.__sim.frame_size()[0]", "def in_waiting(self):\n [ack, txcount, rxcount] = self._GetResponseFrame()\n return rxcount", "def bspb_frameCounter():\n curTime = int(pm.currentTime())\n maxTime = int(pm.playbackOptions(q=True, maxTime=True))\n return '{0} / {1}'.format(curTime, maxTime)", "def num_packets(self):\n return int(np.ceil(self.layer.numNodes / self.num_packed_elements / self.num_lmts))", "def count(self) -> FrameLike:\n return super().count()", "def count(self) -> FrameLike:\n return super().count()", "def count(self) -> FrameLike:\n return super().count()", "def count(self) -> FrameLike:\n return super().count()", "def count(self):\n return(len(self.cards))", "def get_frame_size(*args):\n return _ida_frame.get_frame_size(*args)", "def count(self):\n return len(self.deck)", "def get_frame_retsize(*args):\n return _ida_frame.get_frame_retsize(*args)", "def size(self) -> int:\n return self.stat().size", "def num_cards(self):\n length=len(self.cards)\n return length", "def count(self):\n return len(self.read_ints())", "def usedspace(self):\n self.log.info(\"freespace\")\n nbytes = 0\n keys = list(self.downloads.keys())\n keys.sort()\n for key in keys:\n download = self.downloads[key]\n nbytes += download['size']\n self.log.info(\"returning:\" + str(nbytes))\n return nbytes", "def get_received_frames_count(self, iface):\n pytest.skip(\"Method is not supported by Iperf TG\")", "def count_remaining(self):\n\t\treturn self._queue.qsize()", "def count_remaining(self):\n\t\treturn self._queue.qsize()", "def number_frames(signal_len, frame_len, frame_step):\n frames = 1\n if signal_len > frame_len:\n temp = (1.0 * signal_len - frame_len)/frame_step\n frames += int(np.floor(temp))\n\n return frames", "def size(self):\n\t\treturn self._count", "def total_nt(self) -> int:\n return self.sequence.length", "def CapturedPacketCount(self):\n if self.force_auto_sync:\n self.get('CapturedPacketCount')\n return self._CapturedPacketCount", "def total_bytes_to_process(self) -> float:\n return pulumi.get(self, \"total_bytes_to_process\")", "def count(self):\n # TODO not implemented yet\n return 0", "def numberOfClasses(self):\n classes = self.classesAndFrames()\n return len(classes.keys())", "def get_total_length_of_probe_chains(self):\n return self.total_probe_length", "def size(self): #returns the size or number of items in the stack\n if self.is_empty():\n return 0\n else:\n return self.num_items", "def len(self):\n # print(self.processed_file_names)\n return self.len_", "def bytes_total(self):\n return int(self.status[\"pgmap\"][\"bytes_total\"])", "def get_total_cameras(self) -> int:\n return self.num_cameras", "def get_bytes_consumed(self):\n total = 0\n for event in self.iter_events(EVENT_NAME_BYTES_CONSUMED):\n total += event.data[\"bytes_consumed\"]\n\n return total", "def nbytes(self) -> int:\n return self._nbytes(False)", "def get_total_session_count(self) -> int:\n return self.streams_count", "def getLength(self):\n return self.count", "def get_size(self):\n cum_size = 0\n for stream in self.__streams.values():\n cum_size += sys.getsizeof(stream)\n for trace in stream:\n cum_size += sys.getsizeof(trace)\n cum_size += sys.getsizeof(trace.stats)\n cum_size += sys.getsizeof(trace.stats.__dict__)\n cum_size += sys.getsizeof(trace.data)\n cum_size += trace.data.nbytes\n # Add one percent buffer just in case.\n return cum_size * 1.01", "def _interFrameLen(self):\n return np.ceil((self.interFrameDuration * self.sampleRate) / self.downsample) * self.downsample", "def get_total_view_count(self):\n done = self.cur.execute(\"SELECT CAST(SUM(view_count) AS DECIMAL(10, 0)) FROM videos\")\n count = self.cur.fetchone()[0]\n return count", "def frame_width(self) -> int:\n pass", "def numReady(antReady) :\n return len(antReady.ready)", "def count_len(self):\n total = 0\n for filename in self.filenames:\n f = open(os.path.join(self.directory, filename))\n line_count = 0\n for _ in f:\n line_count += 1\n if line_count < self.window_size:\n continue\n else:\n total += line_count - self.window_size + 1\n return total", "def get_space_used():\n files = jobtracker.query(\"SELECT * FROM files \" \\\n \"WHERE status IN ('added', 'downloaded', 'unverified')\")\n\n total_size = 0\n for file in files:\n total_size += int(file['size'])\n return total_size", "def num_calls_total(self):\n return self._num_calls_total", "def get_length(self):\n return len(self.cards)", "def rx_packet_count(self):\n return self._rx_packet_count", "def getWidth(self):\n return frameWidth", "def frameWidth(self):\n return self._frame_width", "def get_frame_height(self) -> int:\n return self.__sim.frame_size()[1]", "def __len__(self):\n\t\treturn len(self._idle) + len(self._running)", "def size(self):\n return len(self.cards)", "def size(self):\n return len(self._cards)", "def count(self):\n return len(self.wallpapers)", "def num_frames(length, fsize, fshift):\n pad = (fsize - fshift)\n if length % fshift == 0:\n M = (length + pad * 2 - fsize) // fshift + 1\n else:\n M = (length + pad * 2 - fsize) // fshift + 2\n return M", "def total_bytes_processed(self):\n total_bytes_processed = self._properties.get(\"totalBytesProcessed\")\n if total_bytes_processed is not None:\n return int(total_bytes_processed)", "def get_length(self):\n length = 0\n for card in self.decklist:\n length += card.amount\n return length", "def get_length(self):\n\t\treturn len(self._blocks)", "def frame_size(self):\n size = None\n if self.is_video():\n width = self.__dict__['width']\n height = self.__dict__['height']\n if width and height:\n try:\n size = (int(width), int(height))\n except ValueError:\n raise FFProbeError(\"None integer size %s:%s\" % (width, height))\n\n return size", "def duration(self):\n return self.sound.nframes", "def get_uds_3_frames_count(self, iface):\n pytest.skip(\"Method is not supported by Iperf TG\")", "def channel_size(self):\n if self.channels is None:\n return 0\n return self.channels.size", "def total_count(self) -> int:\n return self.__total_count", "def get_record_count(self):\n return os.path.getsize(self.path) / self._get_record_size()", "def calls_remaining(self) -> int:\n return self.usage_limit - self.current_usage", "def count(self):\n return self.size()", "def count(self):\n \n return len(self.img_lst)", "def count(self):\n return len(self._runs)", "def num_bytes(self):\n if self._num_bytes is None:\n status, info = self._resource._file.stat(\n timeout=(0 if self._timeout is None else self._timeout)\n )\n if not status.ok:\n raise OSError(\n \"\"\"XRootD error: {0}\nin file {1}\"\"\".format(\n status[\"message\"], self._file_path\n )\n )\n self._num_bytes = info.size\n\n return self._num_bytes", "def num_bytes(self):\n if self._num_bytes is None:\n status, info = self._resource._file.stat(\n timeout=(0 if self._timeout is None else self._timeout)\n )\n if not status.ok:\n raise OSError(\n \"\"\"XRootD error: {0}\nin file {1}\"\"\".format(\n status[\"message\"], self._file_path\n )\n )\n self._num_bytes = info.size\n\n return self._num_bytes" ]
[ "0.8600426", "0.77835464", "0.7621566", "0.75558245", "0.7531837", "0.7526261", "0.7391324", "0.7189588", "0.71559983", "0.70775414", "0.70692587", "0.7036695", "0.7004031", "0.69709444", "0.6951622", "0.688492", "0.68131065", "0.6812733", "0.6789151", "0.66947013", "0.66488767", "0.66001004", "0.6592747", "0.65701926", "0.6564938", "0.6561499", "0.6530733", "0.6520889", "0.6509262", "0.647645", "0.6471139", "0.6458702", "0.6456331", "0.6451922", "0.645154", "0.6443686", "0.64222026", "0.642136", "0.642136", "0.642136", "0.642136", "0.6421259", "0.641965", "0.64144915", "0.641336", "0.64094", "0.6383412", "0.63620096", "0.6352646", "0.63466597", "0.634375", "0.634375", "0.6328908", "0.63212365", "0.6320359", "0.63163173", "0.6297705", "0.6293008", "0.62918705", "0.62852746", "0.6284918", "0.62846404", "0.6284639", "0.6278952", "0.62780124", "0.6272427", "0.6257347", "0.6254126", "0.62491995", "0.6247549", "0.6243126", "0.6238075", "0.6228046", "0.62187785", "0.6215849", "0.62156683", "0.6207663", "0.62017983", "0.62002146", "0.6198065", "0.6181442", "0.6172983", "0.6170471", "0.6163588", "0.6153185", "0.61494476", "0.61444867", "0.61412317", "0.61400443", "0.6138791", "0.6137152", "0.6129649", "0.6129056", "0.61277425", "0.6123952", "0.61172706", "0.6108896", "0.61078346", "0.6105151", "0.6103284", "0.6103284" ]
0.0
-1
Computes the zorder of element in the NdOverlay taking into account possible batching of elements.
def get_zorder(self, overlay, key, el): spec = util.get_overlay_spec(overlay, key, el) try: return self.ordering.index(spec) except ValueError: self.ordering = sorted(self.ordering+[spec]) return self.ordering.index(spec)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getz_index(self):\n return self._getz_index", "def optimise_z(z, *args):\n x, y, elements, coordinates = args\n window_com = np.array([x, y, z])\n return pore_diameter(elements, coordinates, com=window_com)[0]", "def z(self):\r\n return self.position.z", "def _set_planar_pixel_order(img):\n if img.ndim == 3:\n # C-order increments along the y-axis slowest (0), then x-axis (1),\n # then z-axis (2). We want it to go along the z-axis slowest, then\n # y-axis, then x-axis.\n img = np.swapaxes(img, 1, 2)\n img = np.swapaxes(img, 0, 1)\n\n return img.copy()", "def obtain_depth(self):\n self.z_buffer = image(self.image_plane.width, self.image_plane.height)\n for j in range(self.image_plane.height):\n for i in range(self.image_plane.width):\n single_point = None\n for ray_tracing in self.ray_tracer:\n ray_tracing.ray_direction(i, j)\n ray_tracing.sphere_to_ray()\n ray_tracing.ray_sphere_intersection()\n ray_tracing.hit_pos()\n hit_point = ray_tracing.getHit()\n\n if single_point is None:\n single_point = hit_point\n elif single_point is not None and single_point.z > hit_point.z:\n single_point = hit_point\n self.z_buffer.setColor(single_point, i, j)", "def z_index(self):\n return self._z_index", "def find_layer(z, params):\r\n N = len(params['d_list'])\r\n for i in range(N):\r\n if z <= params['layer_bottom_list'][i]:\r\n return i-1\r\n return N-1", "def n_z(self, level):\n resolution = self.resolution(level)\n return (self.z_extent // resolution + 63) // 64", "def getZ(self):\n\t\treturn self.coords.z", "def _sort_ns(self):\n n = []\n for layer in self.structure:\n n.append(layer.get_index())\n n = np.asarray(n)\n return n", "def sh_order(j):\n l = sh_degree(j)\n return j + l + 1 - dimension(l)", "def naive_order_calculation(self):\n\t\torder = 0\n\t\tfor pt in self.enumerate_points():\n\t\t\torder += 1\n\t\treturn order", "def idx_z(self, zval):\r\n iz = np.around((zval - self.oz) / self.dz)\r\n return int(iz)", "def standard_sorting(cls, zmat):\n if zmat is None:\n return None\n nats = len(zmat)\n ncoords = 3*nats - 6\n if nats < 4:\n return None\n else:\n r_coords = [0, 1, 3]\n a_coords = [2, 4]\n t_coords = [5]\n if nats > 4:\n extra = np.arange(6, ncoords+1)\n r_coords += extra[::4].tolist()\n a_coords += extra[1::4].tolist()\n t_coords += extra[2::4].tolist()\n return np.argsort(np.concatenate([r_coords, a_coords, t_coords]))", "def _derZ(self, w, x, y, z):\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(np.searchsorted(self.z_list, z), self.z_n - 1), 1)\n alpha = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n dfdz = (\n (\n (1 - alpha) * self.wxInterpolators[y_pos - 1][z_pos](w, x)\n + alpha * self.wxInterpolators[y_pos][z_pos](w, x)\n )\n - (\n (1 - alpha) * self.wxInterpolators[y_pos - 1][z_pos - 1](w, x)\n + alpha * self.wxInterpolators[y_pos][z_pos - 1](w, x)\n )\n ) / (self.z_list[z_pos] - self.z_list[z_pos - 1])\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list, z)\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n z_pos[z_pos < 1] = 1\n dfdz = np.zeros(m) + np.nan\n for i in range(1, self.y_n):\n for j in range(1, self.z_n):\n c = np.logical_and(i == y_pos, j == z_pos)\n if np.any(c):\n alpha = (y[c] - self.y_list[i - 1]) / (\n self.y_list[i] - self.y_list[i - 1]\n )\n dfdz[c] = (\n (\n (1 - alpha) * self.wxInterpolators[i - 1][j](w[c], x[c])\n + alpha * self.wxInterpolators[i][j](w[c], x[c])\n )\n - (\n (1 - alpha)\n * self.wxInterpolators[i - 1][j - 1](w[c], x[c])\n + alpha * self.wxInterpolators[i][j - 1](w[c], x[c])\n )\n ) / (self.z_list[j] - self.z_list[j - 1])\n return dfdz", "def NN_z(x, y, con_ver, nbr_ver, cellsize):\n gx, gy, elevNNGrid = interpolate_to_grid(con_ver[:, 0], con_ver[:,1], con_ver[:,2], \n interp_type = \"natural_neighbor\", \n hres = cellsize[0])\n elev_NN = elevNNGrid[0, 0]\n if not(np.isnan(elev_NN)):\n elev_i = elev_NN\n else:\n print(\"elev_NN is nan: evaluating else loop\")\n d_nbr = np.zeros(3)\n for n in range(0, 3):\n d_nbr[n] = ((x - nbr_ver[n][0])**2 + (y - nbr_ver[n][1])**2)**0.5\n nearest_ver = nbr_ver[d_nbr.argmax(0)]\n elev_i = nearest_ver[2]\n return elev_i", "def _derZ(self, x, y, z):\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(np.searchsorted(self.z_list, z), self.z_n - 1), 1)\n alpha = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n dfdz = (\n (\n (1 - alpha) * self.xInterpolators[y_pos - 1][z_pos](x)\n + alpha * self.xInterpolators[y_pos][z_pos](x)\n )\n - (\n (1 - alpha) * self.xInterpolators[y_pos - 1][z_pos - 1](x)\n + alpha * self.xInterpolators[y_pos][z_pos - 1](x)\n )\n ) / (self.z_list[z_pos] - self.z_list[z_pos - 1])\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list, z)\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n z_pos[z_pos < 1] = 1\n dfdz = np.zeros(m) + np.nan\n for i in range(1, self.y_n):\n for j in range(1, self.z_n):\n c = np.logical_and(i == y_pos, j == z_pos)\n if np.any(c):\n alpha = (y[c] - self.y_list[i - 1]) / (\n self.y_list[i] - self.y_list[i - 1]\n )\n dfdz[c] = (\n (\n (1 - alpha) * self.xInterpolators[i - 1][j](x[c])\n + alpha * self.xInterpolators[i][j](x[c])\n )\n - (\n (1 - alpha) * self.xInterpolators[i - 1][j - 1](x[c])\n + alpha * self.xInterpolators[i][j - 1](x[c])\n )\n ) / (self.z_list[j] - self.z_list[j - 1])\n return dfdz", "def get_order(order, gt_idx_v):\n o = np.tile(order, (gt_idx_v.shape[0],1))\n g = np.expand_dims(gt_idx_v, 1)\n o = o - g\n l, c = np.where(o==0)\n return c # order[c[0]] = gt_idx_v[0]", "def _derZ(self, w, x, y, z):\n if _isscalar(w):\n w_pos = max(min(self.wSearchFunc(self.w_list, w), self.w_n - 1), 1)\n x_pos = max(min(self.xSearchFunc(self.x_list, x), self.x_n - 1), 1)\n y_pos = max(min(self.ySearchFunc(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(self.zSearchFunc(self.z_list, z), self.z_n - 1), 1)\n else:\n w_pos = self.wSearchFunc(self.w_list, w)\n w_pos[w_pos < 1] = 1\n w_pos[w_pos > self.w_n - 1] = self.w_n - 1\n x_pos = self.xSearchFunc(self.x_list, x)\n x_pos[x_pos < 1] = 1\n x_pos[x_pos > self.x_n - 1] = self.x_n - 1\n y_pos = self.ySearchFunc(self.y_list, y)\n y_pos[y_pos < 1] = 1\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n z_pos = self.zSearchFunc(self.z_list, z)\n z_pos[z_pos < 1] = 1\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n i = w_pos # for convenience\n j = x_pos\n k = y_pos\n l = z_pos\n alpha = (w - self.w_list[i - 1]) / (self.w_list[i] - self.w_list[i - 1])\n beta = (x - self.x_list[j - 1]) / (self.x_list[j] - self.x_list[j - 1])\n gamma = (y - self.y_list[k - 1]) / (self.y_list[k] - self.y_list[k - 1])\n dfdz = (\n (\n (1 - alpha)\n * (1 - beta)\n * (1 - gamma)\n * self.f_values[i - 1, j - 1, k - 1, l]\n + (1 - alpha) * (1 - beta) * gamma * self.f_values[i - 1, j - 1, k, l]\n + (1 - alpha) * beta * (1 - gamma) * self.f_values[i - 1, j, k - 1, l]\n + (1 - alpha) * beta * gamma * self.f_values[i - 1, j, k, l]\n + alpha * (1 - beta) * (1 - gamma) * self.f_values[i, j - 1, k - 1, l]\n + alpha * (1 - beta) * gamma * self.f_values[i, j - 1, k, l]\n + alpha * beta * (1 - gamma) * self.f_values[i, j, k - 1, l]\n + alpha * beta * gamma * self.f_values[i, j, k, l]\n )\n - (\n (1 - alpha)\n * (1 - beta)\n * (1 - gamma)\n * self.f_values[i - 1, j - 1, k - 1, l - 1]\n + (1 - alpha)\n * (1 - beta)\n * gamma\n * self.f_values[i - 1, j - 1, k, l - 1]\n + (1 - alpha)\n * beta\n * (1 - gamma)\n * self.f_values[i - 1, j, k - 1, l - 1]\n + (1 - alpha) * beta * gamma * self.f_values[i - 1, j, k, l - 1]\n + alpha\n * (1 - beta)\n * (1 - gamma)\n * self.f_values[i, j - 1, k - 1, l - 1]\n + alpha * (1 - beta) * gamma * self.f_values[i, j - 1, k, l - 1]\n + alpha * beta * (1 - gamma) * self.f_values[i, j, k - 1, l - 1]\n + alpha * beta * gamma * self.f_values[i, j, k, l - 1]\n )\n ) / (self.z_list[l] - self.z_list[l - 1])\n return dfdz", "def depth_rendering(ref_view, disparity_map, lf_size = (64, 512, 512, 3)):\n lf_one_way = int(math.floor(math.sqrt(lf_size[0])))\n\n x_indices = np.arange(lf_size[1])\n y_indices = np.arange(lf_size[2])\n b_indices = np.arange(lf_size[0])\n\n #Create a grid of size lf_size[:3] consisting of the pixel co ordinates of each image\n _, x, y = np.meshgrid(b_indices, x_indices, y_indices, indexing= 'ij')\n\n # Create a grid of size (lf_size[0], 2) consiting of the row, col lf positions\n grid = np.meshgrid(np.arange(lf_one_way), np.arange(lf_one_way), indexing= 'ij')\n stacked = np.stack(grid, 2)\n positions = stacked.reshape(-1, 2)\n\n # Compute the distance from each lf position from the reference view\n # Repeat the elements of this to match the size of the disparity map\n ref_pos = np.array(\n [lf_one_way // 2, lf_one_way // 2])\n distance = (np.tile(ref_pos, (lf_size[0], 1)) - positions).T\n dis_repeated = np.repeat(distance, lf_size[1] * lf_size[2], axis = 1)\n dis_repeated = dis_repeated.reshape(2, lf_size[0], lf_size[1], lf_size[2])\n\n\n # Tile the disparity map so that there is one for each lf_position - lf_size[0]\n tiled_map = np.tile(disparity_map, (lf_size[0], 1, 1))\n\n # Compute the shifted pixels\n x_shifted = (x.astype(np.float32) - tiled_map * dis_repeated[0]).flatten()\n y_shifted = (y.astype(np.float32) - tiled_map * dis_repeated[1]).flatten()\n\n #indices for linear interpolation in a square around the central point\n x_low = np.around(x_shifted).astype(int)\n #x_high = x_low + 1\n\n y_low = np.around(y_shifted).astype(int)\n #y_high = y_low + 1\n\n #Place co-ordinates outside the image back into the image\n x_low_clip = np.clip(x_low, 0, ref_view.shape[0] - 1)\n #x_high_clip = np.clip(x_high, 0, ref_view.shape[0] - 1)\n y_low_clip = np.clip(y_low, 0, ref_view.shape[1] - 1)\n #y_high_clip = np.clip(y_high, 0, ref_view.shape[1] - 1)\n\n #Gather the interpolation points\n interp_pts_1 = np.stack((x_low_clip, y_low_clip))\n #interp_pts_2 = np.stack((x_low_clip, y_high_clip))\n #interp_pts_3 = np.stack((x_high_clip, y_low_clip))\n #interp_pts_4 = np.stack((x_high_clip, y_high_clip))\n\n #Index into the images\n desired_shape = lf_size\n res_1 = torch_big_sample(ref_view, interp_pts_1, desired_shape)\n return res_1\n res_2 = torch_big_sample(ref_view, interp_pts_2, desired_shape)\n res_3 = torch_big_sample(ref_view, interp_pts_3, desired_shape)\n res_4 = torch_big_sample(ref_view, interp_pts_4, desired_shape)\n\n #Compute interpolation weights\n x_low_f = x_low.astype(np.float32)\n d_x_low = 1.0 - (x_shifted.astype(np.float32) - x_low_f)\n d_x_high = 1.0 - d_x_low\n y_low_f = y_low.astype(np.float32)\n d_y_low = 1.0 - (y_shifted.astype(np.float32) - y_low_f)\n d_y_high = 1.0 - d_y_low\n\n w1 = torch.from_numpy(d_x_low * d_y_low)\n w2 = torch.from_numpy(d_x_low * d_y_high)\n w3 = torch.from_numpy(d_x_high * d_y_low)\n w4 = torch.from_numpy(d_x_high * d_y_high)\n\n #THEY AGREE AT THIS POINT\n weighted_1 = torch.mul(repeat_weights(w1, desired_shape), res_1)\n weighted_2 = torch.mul(repeat_weights(w2, desired_shape), res_2)\n weighted_3 = torch.mul(repeat_weights(w3, desired_shape), res_3)\n weighted_4 = torch.mul(repeat_weights(w4, desired_shape), res_4)\n\n novel_view = torch.add(torch.add(weighted_1, weighted_2), weighted_3)\n torch.add(novel_view, weighted_4, out=novel_view)\n return novel_view", "def __get_z__(self):\n return self.Direction['z']", "def calc_ply_order(constraints, targets):\r\n if constraints.sym:\r\n ply_order = np.arange(targets.n_plies // 2 + targets.n_plies % 2)\r\n return ply_order\r\n\r\n order_before_sorting = np.arange(targets.n_plies)\r\n ply_order = np.zeros((targets.n_plies,), int)\r\n ply_order[0::2] = order_before_sorting[\r\n :targets.n_plies // 2 + targets.n_plies % 2]\r\n ply_order[1::2] = order_before_sorting[\r\n targets.n_plies // 2 + targets.n_plies % 2:][::-1]\r\n return ply_order", "def N_z(self) -> int:\n return self.params.N_z", "def pz_fn(self, z):\n pass", "def _derZ(self, w, x, y, z):\n if _isscalar(w):\n x_pos = max(min(np.searchsorted(self.x_list, x), self.x_n - 1), 1)\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(np.searchsorted(self.z_list, z), self.z_n - 1), 1)\n alpha = (x - self.x_list[x_pos - 1]) / (\n self.y_list[x_pos] - self.x_list[x_pos - 1]\n )\n beta = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n dfdz = (\n (\n (1 - alpha)\n * (1 - beta)\n * self.wInterpolators[x_pos - 1][y_pos - 1][z_pos](w)\n + (1 - alpha)\n * beta\n * self.wInterpolators[x_pos - 1][y_pos][z_pos](w)\n + alpha\n * (1 - beta)\n * self.wInterpolators[x_pos][y_pos - 1][z_pos](w)\n + alpha * beta * self.wInterpolators[x_pos][y_pos][z_pos](w)\n )\n - (\n (1 - alpha)\n * (1 - beta)\n * self.wInterpolators[x_pos - 1][y_pos - 1][z_pos - 1](w)\n + (1 - alpha)\n * beta\n * self.wInterpolators[x_pos - 1][y_pos][z_pos - 1](w)\n + alpha\n * (1 - beta)\n * self.wInterpolators[x_pos][y_pos - 1][z_pos - 1](w)\n + alpha * beta * self.wInterpolators[x_pos][y_pos][z_pos - 1](w)\n )\n ) / (self.z_list[z_pos] - self.z_list[z_pos - 1])\n else:\n m = len(x)\n x_pos = np.searchsorted(self.x_list, x)\n x_pos[x_pos > self.x_n - 1] = self.x_n - 1\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list, z)\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n z_pos[z_pos < 1] = 1\n dfdz = np.zeros(m) + np.nan\n for i in range(1, self.x_n):\n for j in range(1, self.y_n):\n for k in range(1, self.z_n):\n c = np.logical_and(\n np.logical_and(i == x_pos, j == y_pos), k == z_pos\n )\n if np.any(c):\n alpha = (x[c] - self.x_list[i - 1]) / (\n self.x_list[i] - self.x_list[i - 1]\n )\n beta = (y[c] - self.y_list[j - 1]) / (\n self.y_list[j] - self.y_list[j - 1]\n )\n dfdz[c] = (\n (\n (1 - alpha)\n * (1 - beta)\n * self.wInterpolators[i - 1][j - 1][k](w[c])\n + (1 - alpha)\n * beta\n * self.wInterpolators[i - 1][j][k](w[c])\n + alpha\n * (1 - beta)\n * self.wInterpolators[i][j - 1][k](w[c])\n + alpha * beta * self.wInterpolators[i][j][k](w[c])\n )\n - (\n (1 - alpha)\n * (1 - beta)\n * self.wInterpolators[i - 1][j - 1][k - 1](w[c])\n + (1 - alpha)\n * beta\n * self.wInterpolators[i - 1][j][k - 1](w[c])\n + alpha\n * (1 - beta)\n * self.wInterpolators[i][j - 1][k - 1](w[c])\n + alpha\n * beta\n * self.wInterpolators[i][j][k - 1](w[c])\n )\n ) / (self.z_list[k] - self.z_list[k - 1])\n return dfdz", "def cells_z(self):\n if self.is_depth:\n return list(reversed(self._cells[2]))\n return self._cells[2]", "def _derZ(self, x, y, z):\n if _isscalar(x):\n x_pos = max(min(self.xSearchFunc(self.x_list, x), self.x_n - 1), 1)\n y_pos = max(min(self.ySearchFunc(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(self.zSearchFunc(self.z_list, z), self.z_n - 1), 1)\n else:\n x_pos = self.xSearchFunc(self.x_list, x)\n x_pos[x_pos < 1] = 1\n x_pos[x_pos > self.x_n - 1] = self.x_n - 1\n y_pos = self.ySearchFunc(self.y_list, y)\n y_pos[y_pos < 1] = 1\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n z_pos = self.zSearchFunc(self.z_list, z)\n z_pos[z_pos < 1] = 1\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n alpha = (x - self.x_list[x_pos - 1]) / (\n self.x_list[x_pos] - self.x_list[x_pos - 1]\n )\n beta = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n dfdz = (\n (\n (1 - alpha) * (1 - beta) * self.f_values[x_pos - 1, y_pos - 1, z_pos]\n + (1 - alpha) * beta * self.f_values[x_pos - 1, y_pos, z_pos]\n + alpha * (1 - beta) * self.f_values[x_pos, y_pos - 1, z_pos]\n + alpha * beta * self.f_values[x_pos, y_pos, z_pos]\n )\n - (\n (1 - alpha)\n * (1 - beta)\n * self.f_values[x_pos - 1, y_pos - 1, z_pos - 1]\n + (1 - alpha) * beta * self.f_values[x_pos - 1, y_pos, z_pos - 1]\n + alpha * (1 - beta) * self.f_values[x_pos, y_pos - 1, z_pos - 1]\n + alpha * beta * self.f_values[x_pos, y_pos, z_pos - 1]\n )\n ) / (self.z_list[z_pos] - self.z_list[z_pos - 1])\n return dfdz", "def testGetOrderedLayers(self):\n container_obj = self.explorer_object.GetContainer(\n 'de44dd97cfd1c8d1c1aad7f75a435603991a7a39fa4f6b20a69bf4458809209c')\n layers = container_obj.GetOrderedLayers()\n self.assertEqual(2, len(layers))\n self.assertEqual(\n '1cee97b18f87b5fa91633db35f587e2c65c093facfa2cbbe83d5ebe06e1d9125',\n layers[0])", "def userToPlotZ(z): \n return dislin.nzposn(z)", "def _get_level_ordering(self):\n # First, get a level for all layers:\n levels = {}\n for layer in self._layers:\n level = max(\n [levels[lay.name] for lay in self.incoming_layers(layer.name)] + [-1]\n )\n levels[layer.name] = level + 1\n max_level = max(levels.values())\n ordering = []\n for i in range(max_level + 1): # input to output\n layer_names = [\n layer.name for layer in self._layers if levels[layer.name] == i\n ]\n ordering.append(\n [\n (name, False, [x.name for x in self.incoming_layers(name)])\n for name in layer_names\n ]\n ) # (going_to/layer_name, anchor, coming_from)\n # promote all output banks to last row:\n for level in range(len(ordering)): # input to output\n tuples = ordering[level]\n index = 0\n for (name, anchor, none) in tuples[:]:\n if self._get_layer_type(name) == \"output\":\n # move it to last row\n # find it and remove\n ordering[-1].append(tuples.pop(index))\n else:\n index += 1\n # insert anchor points for any in next level\n # that doesn't go to a bank in this level\n # order_cache = {}\n for level in range(len(ordering)): # input to output\n tuples = ordering[level]\n for (name, anchor, fname) in tuples:\n if anchor:\n # is this in next? if not add it\n next_level = [\n (n, anchor) for (n, anchor, hfname) in ordering[level + 1]\n ]\n if (\n name,\n False,\n ) not in next_level: # actual layer not in next level\n ordering[level + 1].append(\n (name, True, fname)\n ) # add anchor point\n else:\n pass # finally!\n else:\n # if next level doesn't contain an outgoing\n # connection, add it to next level as anchor point\n for layer in self.outgoing_layers(name):\n next_level = [\n (n, anchor) for (n, anchor, fname) in ordering[level + 1]\n ]\n if (layer.name, False) not in next_level:\n ordering[level + 1].append(\n (layer.name, True, name)\n ) # add anchor point\n ordering = self._optimize_ordering(ordering)\n return ordering", "def get_layers(self):\n layers = []\n\n for s in self.surfaces:\n n = self.miller_to_direction(s)\n r = np.dot(self.get_positions() - self.center, n).max()\n d = self.get_layer_distance(s, 2)\n l = 2 * np.round(r / d).astype(int)\n\n ls = np.arange(l-1,l+2)\n ds = np.array([self.get_layer_distance(s, i) for i in ls])\n\n mask = (np.abs(ds - r) < 1e-10)\n\n layers.append(ls[mask][0])\n\n return np.array(layers, int)", "def getZ(self):\n return self.position.getZ()", "def reverse(self, z):\n batch_size = z.shape[0]\n z_x = z[:, :self.a_size]\n z_adj = z[:, self.a_size:]\n\n h_adj = z_adj.reshape(batch_size, self.b_n_type, self.a_n_node, self.a_n_node)\n h_adj = h_adj.to(memory_format=torch.channels_last)\n h_adj = self.bond_model.reverse(h_adj)\n\n if self.noise_scale == 0:\n h_adj = (h_adj + 0.5) * 2\n adj = h_adj\n adj = adj + adj.permute(0, 1, 3, 2)\n adj = adj / 2\n adj = adj.softmax(dim=1)\n max_bond = adj.max(dim=1).values.reshape(batch_size, -1, self.a_n_node, self.a_n_node)\n adj = torch.floor(adj / max_bond)\n\n adj = adj.to(memory_format=torch.channels_last)\n h_x = z_x.reshape(batch_size, self.a_n_node, self.a_n_type)\n h_x = self.atom_model.reverse((adj, h_x))\n if self.noise_scale == 0:\n h_x = (h_x + 0.5) * 2\n return adj, h_x", "def getOrder(self):\n return len(self.vertices)", "def test_gz_batch(self):\n assert self.design.layout.layers[0].name == 'top'", "def reverse_node_order(self, element_type):\n assert element_type in self.element_data, \\\n \"element type {} not registered Available types :{}\".format(\n element_type, self.element_data.keys()\n )\n for i in range(len(self.element_data[element_type])):\n self.element_data[\n element_type\n ][i].nodes = self.element_data[\n element_type\n ][i].nodes[::-1]\n\n self.element_data[\n element_type\n ][i].xcoords = self.element_data[\n element_type\n ][i].xcoords[::-1]\n\n self.element_data[\n element_type\n ][i].zcoords = self.element_data[\n element_type\n ][i].zcoords[::-1]", "def get_z(self):\n return self.coords[2]", "def nNz(self):\n if self.dim < 3:\n return None\n return self.nCz + 1", "def order(self):\n return len(self.coeff)-1", "def curve_order(self):\n\t\treturn self.h * self.n", "def getZ(self):\n return _libsbml.BoundingBox_getZ(self)", "def get_lz(self):\r\n return self.dz * self.nz - self.oz", "def _getitem3d(self, index):\n\n lovects = self._getlovects()\n hivects = self._gethivects()\n fields = self._getfields()\n\n ix = index[0]\n iy = index[1]\n iz = index[2]\n\n if len(fields[0].shape) > self.dim:\n ncomps = fields[0].shape[-1]\n else:\n ncomps = 1\n\n if len(index) > self.dim:\n if ncomps > 1:\n ic = index[-1]\n else:\n raise Exception('Too many indices given')\n else:\n ic = None\n\n nx = hivects[0,:].max() - self.nghosts\n ny = hivects[1,:].max() - self.nghosts\n nz = hivects[2,:].max() - self.nghosts\n\n if npes > 1:\n nx = comm_world.allreduce(nx, op=mpi.MAX)\n ny = comm_world.allreduce(ny, op=mpi.MAX)\n nz = comm_world.allreduce(nz, op=mpi.MAX)\n\n if isinstance(ix, slice):\n ixstart = max(ix.start or -self.nghosts, -self.nghosts)\n ixstop = min(ix.stop or nx + 1 + self.nghosts, nx + self.overlaps[0] + self.nghosts)\n else:\n ixstart = ix\n ixstop = ix + 1\n if isinstance(iy, slice):\n iystart = max(iy.start or -self.nghosts, -self.nghosts)\n iystop = min(iy.stop or ny + 1 + self.nghosts, ny + self.overlaps[1] + self.nghosts)\n else:\n iystart = iy\n iystop = iy + 1\n if isinstance(iz, slice):\n izstart = max(iz.start or -self.nghosts, -self.nghosts)\n izstop = min(iz.stop or nz + 1 + self.nghosts, nz + self.overlaps[2] + self.nghosts)\n else:\n izstart = iz\n izstop = iz + 1\n\n # --- Setup the size of the array to be returned and create it.\n # --- Space is added for multiple components if needed.\n sss = (max(0, ixstop - ixstart),\n max(0, iystop - iystart),\n max(0, izstop - izstart))\n if ncomps > 1 and ic is None:\n sss = tuple(list(sss) + [ncomps])\n resultglobal = np.zeros(sss, dtype=_libwarpx._numpy_real_dtype)\n\n datalist = []\n for i in range(len(fields)):\n\n # --- The ix1, 2 etc are relative to global indexing\n ix1 = max(ixstart, lovects[0,i])\n ix2 = min(ixstop, lovects[0,i] + fields[i].shape[0])\n iy1 = max(iystart, lovects[1,i])\n iy2 = min(iystop, lovects[1,i] + fields[i].shape[1])\n iz1 = max(izstart, lovects[2,i])\n iz2 = min(izstop, lovects[2,i] + fields[i].shape[2])\n\n if ix1 < ix2 and iy1 < iy2 and iz1 < iz2:\n\n sss = (slice(ix1 - lovects[0,i], ix2 - lovects[0,i]),\n slice(iy1 - lovects[1,i], iy2 - lovects[1,i]),\n slice(iz1 - lovects[2,i], iz2 - lovects[2,i]))\n if ic is not None:\n sss = tuple(list(sss) + [ic])\n\n vslice = (slice(ix1 - ixstart, ix2 - ixstart),\n slice(iy1 - iystart, iy2 - iystart),\n slice(iz1 - izstart, iz2 - izstart))\n\n datalist.append((vslice, fields[i][sss]))\n\n if npes == 1:\n all_datalist = [datalist]\n else:\n all_datalist = comm_world.allgather(datalist)\n\n for datalist in all_datalist:\n for vslice, ff in datalist:\n resultglobal[vslice] = ff\n\n # --- Now remove any of the reduced dimensions.\n sss = [slice(None), slice(None), slice(None)]\n if not isinstance(ix, slice):\n sss[0] = 0\n if not isinstance(iy, slice):\n sss[1] = 0\n if not isinstance(iz, slice):\n sss[2] = 0\n\n return resultglobal[tuple(sss)]", "def _sort_ds(self):\n d = []\n for layer in self.structure:\n if (layer.type == 'Layer' or layer.type == 'Substrate'):\n d.append(layer.thickness)\n d.insert(0, self.structure[0].thickness)\n d.append(self.structure[-1].thickness)\n d = np.asarray(d)\n return d", "def getOrder(self):\n return _libsbml.CompartmentGlyph_getOrder(self)", "def mXZ(nxz,P_dot_Dj,P_dot_ej):\n return np.divide(np.multiply(P_dot_ej, np.sum(nxz, axis=0)), P_dot_Dj)", "def find_order(identlist, npixel):\n aper_lst, wlc_lst = [], []\n for aperture, list1 in sorted(identlist.items()):\n if list1.size<3:\n continue\n less_half = (list1['pixel'] < npixel/2).sum()>0\n more_half = (list1['pixel'] > npixel/2).sum()>0\n #less_half, more_half = False, False\n #for pix, wav in zip(list1['pixel'], list1['wavelength']):\n # if pix < npixel/2.:\n # less_half = True\n # elif pix >= npixel/2.:\n # more_half = True\n if less_half and more_half:\n if list1['pixel'].size>2:\n deg = 2\n else:\n deg = 1\n c = np.polyfit(list1['pixel'], list1['wavelength'], deg=deg)\n wlc = np.polyval(c, npixel/2.)\n aper_lst.append(aperture)\n wlc_lst.append(wlc)\n aper_lst = np.array(aper_lst)\n wlc_lst = np.array(wlc_lst)\n if wlc_lst[0] > wlc_lst[-1]:\n k = 1\n else:\n k = -1\n\n offset_lst = np.arange(-500, 500)\n eva_lst = []\n for offset in offset_lst:\n const = (k*aper_lst + offset)*wlc_lst\n diffconst = np.diff(const)\n eva = (diffconst**2).sum()\n eva_lst.append(eva)\n eva_lst = np.array(eva_lst)\n offset = offset_lst[eva_lst.argmin()]\n\n return k, offset", "def z(self):\n return self.coords[2]", "def de_addressing_array(Z):\n assert len(Z.shape) == 2\n \n if(Z.shape[0] == 1): return Z.copy()\n \n Zd = np.zeros(Z.shape, dtype=np.int)\n Zd[-1] = Z[-1]\n for i in range(-2,-Z.shape[0]-1,-1):\n Zd[i] = np.roll(Zd[i+1],-1)+Z[i]\n\n return Zd", "def derivativeZ(self, *args):\n if self.n_dims >= 4:\n j = 3\n else:\n j = 2\n if self.i_dim == j:\n return np.ones_like(*args[0])\n else:\n return np.zeros_like(*args[0])", "def testGetOrderedLayers(self):\n container_obj = self.explorer_object.GetContainer(\n '7b02fb3e8a665a63e32b909af5babb7d6ba0b64e10003b2d9534c7d5f2af8966')\n layers = container_obj.GetOrderedLayers()\n self.assertEqual(1, len(layers))\n self.assertEqual(\n 'sha256:'\n '7968321274dc6b6171697c33df7815310468e694ac5be0ec03ff053bb135e768',\n layers[0])", "def ordering(self):\n if self.dim_ordering is None:\n return list(range(self.rank))\n\n orig = self.dim_ordering.dims_and_symbols.dims\n return [orig.index(sym) for sym in self.dim_ordering.map.dims]", "def z(self):\n return self._z", "def z(self):\n return self._z", "def z(self):\n return self._z", "def testGetOrderedLayers(self):\n container_obj = self.explorer_object.GetContainer(\n '8e8b7f23eb7cbd4dfe7e91646ddd0e0f524218e25d50113559f078dfb2690206')\n layers = container_obj.GetOrderedLayers()\n self.assertEqual(1, len(layers))\n self.assertEqual(\n 'sha256:'\n '8ac48589692a53a9b8c2d1ceaa6b402665aa7fe667ba51ccc03002300856d8c7',\n layers[0])", "def testGetOrderedLayers(self):\n container_obj = self.explorer_object.GetContainer(\n '5dc287aa80b460652a5584e80a5c8c1233b0c0691972d75424cf5250b917600a')\n layers = container_obj.GetOrderedLayers()\n self.assertEqual(1, len(layers))\n self.assertEqual(\n 'sha256:'\n '5b0d59026729b68570d99bc4f3f7c31a2e4f2a5736435641565d93e7c25bd2c3',\n layers[0])", "def _compute_ptdfs(self):\n z = self._compute_z()\n s = np.zeros([self.n_nodes, self.n_nodes, self.n_nodes])\n for k in range(self.n_nodes):\n for L in range(self.n_nodes):\n for i in range(self.n_nodes):\n if k == 0 and L != 0:\n s[k, L, i] = -1 * z[L-1, i-1]\n elif k != 0 and L == 0:\n s[k, L, i] = z[k-1, i-1]\n elif k != 0 and L != 0 and k != L:\n s[k, L, i] = z[k-1, i-1] - z[L-1, i-1]\n return s", "def locations_z(self):\n if self.is_depth:\n return [-z for z in reversed(self._locations[2])]\n return self._locations[2]", "def order(self):\n return len(self.vertices())", "def nlayer(i):\n node = basis.MCTDHnode(i)\n layer_list.append(i)\n if node.Toplayer() == False:\n return nlayer(node.up().address())\n new_list = list(layer_list) #copy instead of reference\n del layer_list[:]\n return new_list", "def z(self):\n return self._coords[2]", "def create_3d_patch(model,orgin,size,N,setname='default'):\n \n N = map(int,N)\n # create the coordinates and connectivity based on one\n bxyz,bcube = block3d(orgin,size,N)\n \n # add node to model starting with current highest node seq\n nn = int(model.node(bxyz))\n update_bcube = bcube + int(nn)\n \n pelemset = model.element(update_bcube,setname)\n \n \n nx = N[0] + 1\n ny = N[1] + 1\n nz = N[2] + 1\n \n \n nodeline = {}\n\n nodeline['1'] = [nn + 1]\n nodeline['2'] = [nn + 1 + ny*nz*(nx-1)]\n nodeline['3'] = [nn + 1 + ny*nz*(nx-1) + (ny-1)*nz]\n nodeline['4'] = [nn + 1 + nz*(ny-1)]\n nodeline['5'] = [nodeline['1'][0] + (nz-1)]\n nodeline['6'] = [nodeline['2'][0] + (nz-1)]\n nodeline['7'] = [nodeline['3'][0] + (nz-1)]\n nodeline['8'] = [nodeline['4'][0] + (nz-1)]\n \n nodeline['1-2'] = range(nodeline['1'][0],nodeline['2'][0]+1,ny*nz)\n nodeline['2-3'] = range(nodeline['2'][0],nodeline['3'][0]+1,nz)\n nodeline['3-4'] = range(nodeline['4'][0],nodeline['3'][0]+1,ny*nz)\n nodeline['1-4'] = range(nodeline['1'][0],nodeline['4'][0]+1,nz)\n\n nodeline['5-6'] = range(nodeline['5'][0],nodeline['6'][0]+1,ny*nz)\n nodeline['6-7'] = range(nodeline['6'][0],nodeline['7'][0]+1,nz)\n nodeline['7-8'] = range(nodeline['8'][0],nodeline['7'][0]+1,ny*nz)\n nodeline['5-8'] = range(nodeline['5'][0],nodeline['8'][0]+1,nz)\n \n nodeline['1-5'] = range(nodeline['1'][0],nodeline['5'][0]+1,1)\n nodeline['4-8'] = range(nodeline['4'][0],nodeline['8'][0]+1,1)\n nodeline['2-6'] = range(nodeline['2'][0],nodeline['6'][0]+1,1)\n nodeline['3-7'] = range(nodeline['3'][0],nodeline['7'][0]+1,1)\n \n nodeline['1-2-3-4'] = []\n nodeline['5-6-7-8'] = []\n nodeline['1-2-6-5'] = []\n nodeline['4-3-7-8'] = []\n nodeline['1-4-8-5'] = []\n nodeline['2-3-7-6'] = []\n \n for i in range(0,nx):\n for j in nodeline['1-4']:\n nodeline['1-2-3-4'].append(j + i*ny*nz)\n for j in nodeline['5-8']:\n nodeline['5-6-7-8'].append(j + i*ny*nz) \n \n for j in nodeline['1-5']:\n nodeline['1-2-6-5'].append(j + i*ny*nz) \n for j in nodeline['4-8']:\n nodeline['4-3-7-8'].append(j + i*ny*nz)\n \n for i in range(0,ny):\n for j in nodeline['1-5']:\n nodeline['1-4-8-5'].append(j + i*nz)\n for j in nodeline['2-6']:\n nodeline['2-3-7-6'].append(j + i*nz)\n #\n for key in nodeline:\n nodesetname = '-'.join([setname , key])\n model.nodeset(nodesetname,{'nodelist':nodeline[key]})\n\n return model", "def get_z(self) -> int:\n return self.__z", "def stack_red_detect(self):\n self.redundancy_pool.clear()\n\n for nslice in np.arange(self.nz-1):\n self._red_detect_(nslice, thresh = 1.0)\n\n # OK, let's check the the size of the pool and remove them one by one.\n dist_3d = np.zeros((0, 4)) # create an empty array to save z, y, x, f\n\n\n for sl_key, sl_value in self.redundancy_pool.items():\n z_start = sl_value.z_marker # where does the z_marker starts\n z_list = np.array(sl_value.list) # convert it into a 2d array\n z_key = 's_' + format(z_start, '03d')\n zframe_0 = self.z_dense[z_key]\n z_identifier = int(sl_key[3:]) - z_start*1000 # which cell?\n\n pz = self.z_step*np.inner(z_list[:,0], z_list[:,1])/z_list[:,1].sum() # weighted average estimation\n py, px = zframe_0[z_identifier, 0:2] # The x-y coordinates\n pf = zframe_0[z_identifier, 4] # the fluorescence\n\n\n new_entry = np.array([[pz, py, px, pf]])\n dist_3d = np.concatenate((dist_3d, new_entry), axis = 0)\n\n ord_z = np.argsort(dist_3d[:,0], axis = 0)\n # sort in the order of Z.\n\n\n self.dist_3d = dist_3d[ord_z, :]\n\n return dist_3d", "def photons(self, depth=1):\n self.dx[:,:self.W-1] = self.z[:,1:] - self.z[:,:self.W-1]\n self.dy[:self.H-1,:] = self.z[1:,:] - self.z[:self.H-1,:]\n px = self.xv - self.dx*depth\n py = self.yv - self.dy*depth\n return px,py", "def next_zorder_index(self, z):\n if self.is_in(z + 1):\n return z + 1\n\n min_v = self.min_z\n max_v = self.max_z\n bit_position = self.bit_position_init # 10000000.. bit position currently investigating\n load_mask = self.load_mask_init # 01010101.. original value preserving mask\n load_ones = self.load_ones_init # 00101010.. loading value for LOAD(0111..)\n\n while bit_position:\n z_bit, min_bit, max_bit = z & bit_position, min_v & bit_position, max_v & bit_position\n # decision table from the paper\n if not z_bit and not min_bit and not max_bit: # 0 0 0\n pass\n elif not z_bit and not min_bit and max_bit: # 0 0 1\n bigmin = min_v & load_mask | bit_position\n max_v = max_v & load_mask | load_ones\n elif not z_bit and min_bit and max_bit: # 0 1 1\n return int(min_v)\n elif z_bit and not min_bit and not max_bit: # 1 0 0\n # noinspection PyUnboundLocalVariable\n return int(bigmin)\n elif z_bit and not min_bit and max_bit: # 1 0 1\n min_v = min_v & load_mask | bit_position\n elif z_bit and min_bit and max_bit: # 1 1 1\n pass\n else: # 0 1 0 or 1 1 0\n # it should be never happen..\n raise ValueError('Z-order index search failed. Something wrong...')\n\n # investigate next bit position\n bit_position >>= 1\n load_ones >>= 1\n load_mask >>= 1\n load_mask |= self.bit_position_init\n\n # noinspection PyUnboundLocalVariable\n return int(bigmin)", "def z(self):\n return _libsbml.Point_z(self)", "def nCz(self):\n if self.dim < 3:\n return None\n return int(self._n[2])", "def get3Dcoordinates(self, skel_2d):\n skel_2d = skel_2d.to(self.device)\n z_out = self.net(skel_2d)\n z_out = z_out.detach().cpu().numpy()\n z_out = z_out.reshape(-1)\n return z_out", "def EncodeMorton3D(x, y, z):\r\n return Expand3D(x) + (Expand3D(y) << 1) + (Expand3D(z) << 2)", "def order_computation(H: [Permutation]) -> int:\n if len(H) == 1 and H[0].istrivial():\n return 1\n alpha = find_non_trivial_orbit(H)\n if alpha is not None:\n orbit_alpha = compute_orbit(H, alpha, return_transversal=False)\n stab_alpha = stabilizer(H, alpha)\n order_orbit = len(orbit_alpha)\n if len(stab_alpha) == 0:\n return order_orbit\n else:\n return order_orbit * order_computation(stab_alpha)", "def renumber_cells_ordering(self):\n Nactive = sum(~self.cells['deleted'])\n return np.argsort( self.cells['deleted'],kind='mergesort')[:Nactive]", "def trap_depth_old(V,X,Y,Z,Im,Jm,Km,debug=False): \n from project_parameters import debug\n #from all_functions import sum_of_e_field\n def a(a,N):\n \"\"\"Shortcut function to convert array x into a row vector.\"\"\" \n a=np.ravel(a, order='F') # Same order\n return a\n def index_sort(y,x):\n \"\"\"Takes in two lists of the same length and returns y sorted by the indexing of x sorted.\"\"\"\n xs=np.sort(x)\n ix=np.argsort(x)\n ys=np.ones(len(y)) #Sorted by the sorting defined by f being sorted. \n for i in range(len(y)):\n j=ix[i]\n ys[i]=y[j]\n return ys\n if len(V.shape)!=3:\n return('Problem with find_saddle.py dimensionalities.\\n')\n N1,N2,N3=V.shape\n N=N1*N2*N3\n f=V\n [Ex,Ey,Ez]=np.gradient(f,abs(X[1]-X[0]),abs(Y[1]-Y[0]),abs(Z[1]-Z[0]))\n E=np.sqrt(Ex**2+Ey**2+Ez**2)\n fs,Es=a(f,N),a(E,N) # Convert 3D to 1D array\n fs,Es=np.real(fs),np.real(Es)\n # identify the escape position and height by checking each point\n minElectricField=max(fs) # initialize as maximum E field magnitude\n distance=0\n escapeHeight=1\n escapePosition=[0,0,0]\n for i in range(N1):\n for j in range(N2):\n for k in range(N3):\n if [i,j,k]==[Im,Jm,Km]:\n Vm=V[i,j,k]\n elif E[i,j,k]<minElectricField:\n minElectricField=E[i,j,k]\n escapeHeight=V[i,j,k]\n escapePosition=[i,j,k]\n distance=abs(Im+Jm+Km-i-j-k) \n if debug.trap_depth: # plot sortings of potential and electric field to view escape position\n plt.plot(np.sort(fs)) \n plt.title('sorted potential field')\n plt.show()\n plt.plot(np.sort(Es)) \n plt.title('sorted electric field')\n plt.show()\n q1=index_sort(fs,Es) \n plt.title('potential field sorted by sorted indexing of electric field')\n plt.plot(q1)\n plt.show()\n q2=index_sort(Es,fs) \n plt.title('electric field sorted by sorted indexing of potential field')\n plt.plot(q2)\n plt.show() \n check=1 \n if debug.trap_depth: \n print minElectricField,escapeHeight,escapePosition,distance \n if distance<check:\n print('trap_depth.py: Escape point too close to trap minimum. Improve grid resolution or extend grid.')\n if escapeHeight>0.2:\n print('trap_depth.py: Escape point parameter too high. Improve grid resolution or extend grid.')\n D=escapeHeight-Vm\n [Ie,Je,Ke]=escapePosition\n [Xe,Ye,Ze]=[X[Ie],Y[Je],Z[Ke]] \n return [D,Xe,Ye,Ze]", "def extrapToZ(zc,(x0,y0,z0),(px,py,pz)):\n x = x0+ (px/pz)*(zc-z0)\n y = y0+ (py/pz)*(zc-z0)\n\n return (x,y)", "def vnEz(self):\n if self.dim < 3:\n return None\n return np.array(\n [x for x in [self.nNx, self.nNy, self.nCz] if x is not None],\n dtype=int\n )", "def xz_plane(self, y):\n result = []\n base = pos2idx(0, y, 0)\n depth = self.depth\n width = self.width\n for z in range(0, depth):\n result.append([x for x in self._blocks[base:base+width]])\n base += width\n\n return result", "def __get_arranged_nodal_displacements(self) -> NDArray[np.float64]:\n\n nodes = (self.node1, self.node2)\n nodal_displacements = flatten(\n [node.nodal_displacements.x, node.nodal_displacements.y]\n for node in nodes\n )\n return np.array(nodal_displacements, dtype=np.float64)", "def vnFz(self):\n if self.dim < 3:\n return None\n return np.array(\n [x for x in [self.nCx, self.nCy, self.nNz] if x is not None],\n dtype=int\n )", "def xz_plane(blkmap, y):\n result = []\n base = y*blkmap.plane_span\n z = 0\n while z < blkmap.plane_span:\n start = base + z\n z += blkmap.row_span\n end = base + z\n result.append(tuple(blkmap.blocks[start:end]))\n\n return result", "def reindex_graphics(self):\n for obj in self.context.static_objects:\n self.canvas.children.remove(obj.widget.canvas)\n # fill _objects_z_index\n _objects_z_index = {}\n for obj in self.context.static_objects:\n y = obj.widget.pos[1]\n if not y in _objects_z_index:\n _objects_z_index[y] = []\n _objects_z_index[y].append(obj)\n _keys = _objects_z_index.keys()\n _keys.sort()\n _keys.reverse()\n for k in _keys:\n objs = _objects_z_index[k]\n for obj in objs:\n self.canvas.add(obj.widget.canvas)", "def AdjacentAveraging2D(zdata, nPoints=10):\r\n zz = np.zeros_like(zdata)\r\n for u, i in enumerate(zdata):\r\n zz[u] = AdjacentAveraging(i, nPoints=nPoints)\r\n return zz", "def get_dndz(self, z):\n return self._dndz(z)", "def get_packing_order(ind, boxes_to_pack, box_types, test_name=\"default\"):\n layers_packed = get_packed_boxes_from_ind(ind, boxes_to_pack, box_types)\n packing_order = []\n for layer in layers_packed:\n for box in layer.boxes:\n x, y, z = box.llc()\n packing_order.append({\"id\": box.box_type.identifier, \"x\": x, \"y\": y, \"z\": z, \"rotate\": box.rotate})\n print(f\"{box.box_type.identifier} at {box.llc()}\")\n with open(f\"packing_order_{test_name}.json\", 'w') as json_file:\n json.dump(packing_order, json_file, indent=4)", "def buildZGrid(self, plot=False):\r\n\r\n print(\"Constructing Z corners\")\r\n\r\n # self.zcorn = np.array(self.zcorn, dtype=float)\r\n # temp = np.zeros( ((self.ne+1)*(self.nn+1)*self.nz) )\r\n temp = []\r\n count = 0\r\n for item in self.zcorn:\r\n\r\n if \"*\" in item:\r\n ct = (int)(item.split(\"*\")[0])\r\n vl = (float)(item.split(\"*\")[1])\r\n temp += np.tile(vl, ct).tolist()\r\n count += ct\r\n else:\r\n temp += [(float)(item)]\r\n count += 1\r\n\r\n # layers = np.resize(temp, (8, self.ne*self.nn*self.nz ))\r\n layers = np.resize(temp, (self.nz * 2, self.ne * self.nn * 4))\r\n \"\"\"\r\n plt.plot(newtemp[0,:]) # TOP 0 0\r\n plt.plot(newtemp[1,:]) # SAME -- # BOTTOM 0 1\r\n #plt.plot(newtemp[2,:]) # SAME -- # TOP 1 2\r\n\r\n plt.plot(newtemp[3,:]) # SAME -- # BOTTOM 1 3\r\n #plt.plot(newtemp[4,:]) # SAME -- # TOP 2 4\r\n\r\n plt.plot(newtemp[5,:]) # SAME -- # BOTTOM 2 5\r\n #plt.plot(newtemp[6,:]) # SAME -- # TOP 3 6\r\n plt.plot(newtemp[7,:]) # BOTTOM 3 7\r\n \"\"\"\r\n self.ZZT = {} # zztop ha ha...two year's later this is still funny -TI\r\n self.ZZB = {}\r\n for ilay in range(self.nz):\r\n self.ZZT[ilay] = np.zeros((self.ndx, self.ndy))\r\n self.ZZB[ilay] = np.zeros((self.ndx, self.ndy))\r\n iis = 0\r\n # plt.plot(layers[ilay*2])\r\n for iin in range(self.nn):\r\n nears = {}\r\n fars = {}\r\n bnears = {}\r\n bfars = {}\r\n for iif in range(2):\r\n # top\r\n nears[iif] = layers[ilay * 2][iis:iis + 2 * self.ne][0::2].tolist()\r\n fars[iif] = layers[ilay * 2][iis:iis + 2 * self.ne][1::2].tolist()\r\n layers[ilay * 2][iis:iis + 2 * self.ne][0::2] *= 0. # check\r\n layers[ilay * 2][iis:iis + 2 * self.ne][1::2] *= 0.\r\n nears[iif].append(fars[iif][-1])\r\n fars[iif] = [nears[iif][0]] + fars[iif]\r\n # bottom\r\n bnears[iif] = layers[ilay * 2 + 1][iis:iis + 2 * self.ne][0::2].tolist()\r\n bfars[iif] = layers[ilay * 2 + 1][iis:iis + 2 * self.ne][1::2].tolist()\r\n layers[ilay * 2 + 1][iis:iis + 2 * self.ne][0::2] *= 0.\r\n layers[ilay * 2 + 1][iis:iis + 2 * self.ne][1::2] *= 0.\r\n bnears[iif].append(bfars[iif][-1])\r\n bfars[iif] = [bnears[iif][0]] + bfars[iif]\r\n #\r\n iis += 2 * self.ne\r\n\r\n self.ZZT[ilay][:, iin] = nears[0]\r\n self.ZZB[ilay][:, iin] = bnears[0]\r\n # NaN mask for visualizing, but can be sort of a pain to deal with\r\n # imask = np.nonzero( 1-self.ActiveCells[:,iin,ilay] )\r\n # self.ZZT[ilay][:,iin][1::][imask] = np.nan\r\n # self.ZZB[ilay][:,iin][1::][imask] = np.nan\r\n # if self.ActiveCells[0,iin,ilay] == 0:\r\n # self.ZZT[ilay][:,iin][0] = np.nan\r\n # self.ZZB[ilay][:,iin][0] = np.nan\r\n if iin == self.nn - 1:\r\n self.ZZT[ilay][:, iin + 1] = fars[1]\r\n self.ZZB[ilay][:, iin + 1] = bfars[1]\r\n # NaN mask\r\n # self.ZZT[ilay][:,iin+1][1::][imask] = np.nan\r\n # self.ZZB[ilay][:,iin+1][1::][imask] = np.nan\r\n # if self.ActiveCells[0,iin,ilay] == 0:\r\n # self.ZZT[ilay][:,iin+1][0] = np.nan\r\n # self.ZZB[ilay][:,iin+1][0] = np.nan\r\n\r\n print(\"Layers ||\", np.linalg.norm(layers), \"||\")\r\n # exit()\r\n\r\n # visualize\r\n if plot:\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111, projection='3d')\r\n # ax.plot_wireframe( self.X0, self.Y0, self.Z0, rstride=1, cstride=1)\r\n\r\n ax.plot_wireframe(self.X0, self.Y0, self.ZZT[0], rstride=1, cstride=1, color=\"blue\")\r\n # ax.plot_wireframe( self.X0, self.Y0, self.ZZT[1], rstride=1, cstride=1, color=\"blue\")\r\n # ax.plot_wireframe( self.X0, self.Y0, self.ZZT[2], rstride=1, cstride=1, color=\"blue\")\r\n # ax.plot_wireframe( self.X0, self.Y0, self.ZZT[3], rstride=1, cstride=1, color=\"blue\")\r\n\r\n # ax.plot_wireframe( self.X0, self.Y0, self.ZZB[3], rstride=1, cstride=1, color=\"green\")\r\n\r\n plt.gca().set_xlim(np.min(self.X0), np.max(self.X0))\r\n plt.gca().set_ylim(np.max(self.Y0), np.min(self.Y0))\r\n # plt.gca().set_zlim( np.max(self.ZZB[3]), np.min(self.ZZT[0]) )\r\n plt.gca().set_zlim(5000, 4000)\r\n plt.savefig(\"mesh.png\")\r\n plt.show()", "def viewpoly(self, depth: Number) -> np.ndarray:\n cy = self.imgsz[1] / 2 + self.c[1]\n uv = np.array([(0, cy), (self.imgsz[0], cy)])\n xyz = self.uv_to_xyz(uv, directions=False, depth=depth)\n return np.row_stack([self.xyz, xyz, self.xyz])", "def tile_and_bind3d(s, tensor, z, y, x, z_factor=2, y_factor=None, x_factor=None):\n y_factor = y_factor or z_factor\n x_factor = x_factor or y_factor\n zo, zi = s[tensor].split(z, z_factor)\n yo, yi = s[tensor].split(y, y_factor)\n xo, xi = s[tensor].split(x, x_factor)\n s[tensor].bind(zo, te.thread_axis(\"blockIdx.z\"))\n s[tensor].bind(zi, te.thread_axis(\"threadIdx.z\"))\n s[tensor].bind(yo, te.thread_axis(\"blockIdx.y\"))\n s[tensor].bind(yi, te.thread_axis(\"threadIdx.y\"))\n s[tensor].bind(xo, te.thread_axis(\"blockIdx.x\"))\n s[tensor].bind(xi, te.thread_axis(\"threadIdx.x\"))\n s[tensor].reorder(zo, yo, xo, zi, yi, xi)\n return zo, yo, xo, zi, yi, xi", "def geodesicDilate3D(imIn, imMask, imOut, n=1, se=m3D.CUBOCTAHEDRON):\r\n \r\n lowerGeodesicDilate3D(imIn, imMask, imOut, n, se=se)", "def layer_offsets(self):\n ...", "def automorphism_group_order(self):\n if self.q == 2:\n return sage.all.factorial(self.L.size()) * (1 + int(self.variant.is_bipartite()))\n elif self.variant.is_bipartite():\n return (self.L.size() - 1) * self.d * self.L.size() * (self.K.size() - 1) * 2\n elif self.variant.is_minus() or self.p == 2:\n return (self.L.size() - 1) * self.d * self.L.size()\n else:\n return (self.L.size() - 1) * self.d", "def zernike_coeff(filename=None,zernike_max_order=20):\n hdu = pf.open(filename)\n nn = len(hdu)\n data = []\n colnames = ['x','y','M20','M22','M31','M33']\n sigma = 1.08/0.27\n for hdui in hdu[1:]:\n img = hdui.data[0][4:].reshape(npix,npix)\n img = rebin(img,(40,40))\n M20,M22,M31,M33=complexMoments(data=img,sigma=sigma)\n x=hdui.header['ccdXcen']\n y=hdui.header['ccdYcen']\n data.append([x,y,M20,M22,M31,M33])\n data=np.array(data)\n betaAll=[]\n #betaErrAll=[]\n R2adjAll=[]\n beta,betaErr,R2_adj = zernikeFit(data[:,0].real,data[:,1].real,data[:,2].real,max_order=zernike_max_order)\n betaAll.append(beta)\n #betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n for i in range(3,6):\n beta,betaErr,R2_adj = zernikeFit(data[:,0].real,data[:,1].real,data[:,i].real,max_order=zernike_max_order)\n betaAll.append(beta)\n #betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n beta,betaErr,R2_adj = zernikeFit(data[:,0].real,data[:,1].real,data[:,i].imag,max_order=zernike_max_order)\n betaAll.append(beta)\n #betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n betaAll = np.array(betaAll)\n #betaErrAll = np.array(betaErrAll)\n R2adjAll = np.array(R2adjAll)\n x=hdu[0].header['x']\n y=hdu[0].header['y']\n z=hdu[0].header['z']\n phi = hdu[0].header['phi']\n theta=hdu[0].header['theta']\n s_fwhm=hdu[0].header['s_fwhm']\n e1=hdu[0].header['e1']\n e2=hdu[0].header['e2']\n return x,y,z,theta,phi,s_fwhm,e1,e2,betaAll,R2adjAll", "def p_z(self, d, w_id):\n f1 = (self.ndk[d]+self.alpha) / \\\n (np.sum(self.ndk[d])+self.nTopics*self.alpha)\n f2 = (self.nkw[:, w_id]+self.beta) / \\\n (self.nk+self.beta*self.VT)\n\n p = f1*f2\n return p / np.sum(p)", "def positions(self):\n return self.preorder() # return entire preorder iteration", "def get_zarr(self, position):\n pos_info = self.position_map[position]\n well = pos_info['well']\n pos = pos_info['name']\n return self.store[well][pos][self.arr_name]", "def _calcOrderedCellVertexIDs(self):\n ids = numerix.zeros((8, self.nx, self.ny, self.nz), 'l')\n indices = numerix.indices((self.nx, self.ny, self.nz))\n ids[1] = indices[0] + (indices[1] + (indices[2] + 1) * (self.ny + 1) + 1) * (self.nx + 1)\n ids[0] = ids[1] + 1\n ids[3] = indices[0] + (indices[1] + (indices[2] + 1) * (self.ny + 1)) * (self.nx + 1)\n ids[2] = ids[3] + 1\n ids[5] = indices[0] + (indices[1] + indices[2] * (self.ny + 1) + 1) * (self.nx + 1)\n ids[4] = ids[5] + 1\n ids[7] = indices[0] + (indices[1] + indices[2] * (self.ny + 1)) * (self.nx + 1)\n ids[6] = ids[7] + 1\n\n return numerix.reshape(ids.swapaxes(1, 3), (8, self.numberOfCells))", "def numbering_rafts(rafts_loc, rafts_radii, num_of_rafts):\n orbiting_center = np.mean(rafts_loc, axis=0)\n orbiting_dist = np.sqrt((rafts_loc[:, 0] - orbiting_center[0]) ** 2 + (rafts_loc[:, 1] - orbiting_center[1]) ** 2)\n sorted_index = orbiting_dist.argsort()\n dist_sorted = orbiting_dist[sorted_index]\n rafts_loc_sorted = rafts_loc[sorted_index, :]\n rafts_radii_sorted = rafts_radii[sorted_index]\n\n # assign layer\n layer_index = np.ones(num_of_rafts, dtype=int)\n layer_num = 1\n for raft_id in np.arange(1, num_of_rafts):\n if dist_sorted[raft_id] - dist_sorted[raft_id - 1] > rafts_radii_sorted[raft_id]:\n layer_num = layer_num + 1\n layer_index[raft_id] = layer_num\n\n # calculate orbiting angle, note the two negative signs in front of both y- and x- components.\n # For y-component, it is for flipping image axis.\n # For x-component, it is make the counting start at x-axis and go clockwise.\n # Note the value of arctan2 is [-pi, pi]\n orbiting_angles = np.arctan2(-(rafts_loc_sorted[:, 1] - orbiting_center[1]),\n -(rafts_loc_sorted[:, 0] - orbiting_center[0])) * 180 / np.pi\n\n # concatenate and sort\n rafts_loc_radii_dist_angle_layer = \\\n np.column_stack((rafts_loc_sorted[:, 0], rafts_loc_sorted[:, 1],\n rafts_radii_sorted, dist_sorted, orbiting_angles, layer_index))\n\n sorted_index2 = np.lexsort((orbiting_angles, layer_index))\n\n rafts_loc_radii_dist_angle_layer_sorted = rafts_loc_radii_dist_angle_layer[sorted_index2]\n\n rafts_loc_sorted2 = rafts_loc_radii_dist_angle_layer_sorted[:, 0:2].astype(int)\n rafts_radii_sorted2 = rafts_loc_radii_dist_angle_layer_sorted[:, 2].astype(int)\n dist_sorted2 = rafts_loc_radii_dist_angle_layer_sorted[:, 3]\n angles_sorted2 = rafts_loc_radii_dist_angle_layer_sorted[:, 4]\n layer_index_sorted2 = rafts_loc_radii_dist_angle_layer_sorted[:, 5]\n\n return rafts_loc_sorted2, rafts_radii_sorted2, dist_sorted2, angles_sorted2, layer_index_sorted2", "def _derZ(self, w, x, y, z):\n raise NotImplementedError()", "def WFC3_zeropoint(image):\n logging.debug('Calculating zeropoint for {}'.format(image))\n\n PHOTFLAM = fits.getheader(image)['PHOTFLAM']\n try:\n PHOTPLAM = fits.getheader(image)['PHOTPLAM']\n except:\n PHOTPLAM = fits.getheader(image, 1)['PHOTPLAM']\n\n ABMAG_ZEROPOINT=-2.5*np.log10(PHOTFLAM)-5*np.log10(PHOTPLAM)-2.408\n\n return ABMAG_ZEROPOINT", "def NNZ(self):\n return _hypre.HypreParMatrix_NNZ(self)", "def get_stage_z(self):\n raise NotImplementedError" ]
[ "0.57028675", "0.55984366", "0.5478423", "0.54625237", "0.5441317", "0.537248", "0.5289451", "0.52060914", "0.5195802", "0.51892036", "0.51678", "0.5121228", "0.51193726", "0.50830746", "0.507015", "0.5063056", "0.5060691", "0.5056381", "0.5056132", "0.50311744", "0.5008574", "0.49935734", "0.4985095", "0.49817", "0.4966024", "0.49506593", "0.4943147", "0.4935495", "0.4928189", "0.49159983", "0.4913258", "0.4910326", "0.49096045", "0.49035186", "0.4892107", "0.48895133", "0.48888743", "0.4884228", "0.4879878", "0.4874475", "0.48673055", "0.48665506", "0.4864647", "0.48631766", "0.48509538", "0.48406985", "0.48390788", "0.4835115", "0.48332545", "0.4809722", "0.48005402", "0.47995275", "0.47986054", "0.47986054", "0.47986054", "0.4794982", "0.47855014", "0.47848943", "0.47845116", "0.4771352", "0.47704753", "0.47680798", "0.4744903", "0.47445256", "0.47414848", "0.4740209", "0.47401854", "0.47384173", "0.4733435", "0.4731819", "0.4717841", "0.47118893", "0.47106236", "0.47102574", "0.47076097", "0.46953124", "0.46918586", "0.46917245", "0.46860537", "0.4682225", "0.46759072", "0.46740425", "0.46711913", "0.46698368", "0.46695068", "0.46644956", "0.4660328", "0.4660298", "0.46596786", "0.4659233", "0.4657961", "0.4649398", "0.46369883", "0.4634399", "0.46340272", "0.46269143", "0.46265042", "0.46103644", "0.46055695", "0.46043003" ]
0.72016543
0
Gets the extents for the axes from the current View. The globally computed ranges can optionally override the extents.
def get_extents(self, view, ranges): ndims = len(view.dimensions()) num = 6 if self.projection == '3d' else 4 if self.apply_ranges: if ranges: dims = view.dimensions() x0, x1 = ranges[dims[0].name] if ndims > 1: y0, y1 = ranges[dims[1].name] else: y0, y1 = (np.NaN, np.NaN) if self.projection == '3d': if len(dims) > 2: z0, z1 = ranges[dims[2].name] else: z0, z1 = np.NaN, np.NaN else: x0, x1 = view.range(0) y0, y1 = view.range(1) if ndims > 1 else (np.NaN, np.NaN) if self.projection == '3d': z0, z1 = view.range(2) if self.projection == '3d': range_extents = (x0, y0, z0, x1, y1, z1) else: range_extents = (x0, y0, x1, y1) else: range_extents = (np.NaN,) * num if self.apply_extents: norm_opts = self.lookup_options(view, 'norm').options if norm_opts.get('framewise', False) or self.dynamic: extents = view.extents else: extent_list = self.hmap.traverse(lambda x: x.extents, [Element]) extents = util.max_extents(extent_list, self.projection == '3d') else: extents = (np.NaN,) * num return tuple(l1 if l2 is None or not np.isfinite(l2) else l2 for l1, l2 in zip(range_extents, extents))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extents(self):\n self._updateExtents()\n return self.mExtents", "def extents(self):\n x0, y0, width, height = self._rect_bbox\n xmin, xmax = sorted([x0, x0 + width])\n ymin, ymax = sorted([y0, y0 + height])\n return xmin, xmax, ymin, ymax", "def extents(self):\n\n return self._local", "def extents(self):\n if self.direction == 'horizontal':\n vmin = self._selection_artist.get_x()\n vmax = vmin + self._selection_artist.get_width()\n else:\n vmin = self._selection_artist.get_y()\n vmax = vmin + self._selection_artist.get_height()\n return vmin, vmax", "def extent(self):\n return self._ax.extent", "def get_extent(self):\n pass", "def extent(self):\n return np.array(self._extent)", "def _get_extent_axes(self, x):\n if not hasattr(self, 'get_subplotspec'):\n return [self]\n y = ('y' if x == 'x' else 'x')\n idx = (0 if x == 'x' else 1)\n argfunc = (np.argmax if x == 'x' else np.argmin)\n irange = self._range_gridspec(x)\n axs = [ax for ax in self.figure._axes_main\n if ax._range_gridspec(x) == irange]\n if not axs:\n return [self]\n else:\n pax = axs.pop(argfunc([ax._range_gridspec(y)[idx] for ax in axs]))\n return [pax, *axs]", "def return_extents(self):\n\n return [qm.tree.mins, qm.tree.maxs]", "def GetExtents(self, transform=None):\n # Prepare GDAL functions to compute extents\n x_size, y_size = self.RasterXSize, self.RasterYSize\n\n # Compute four corners in destination projection\n upper_left = self.PixelCoordinates(0, 0,\n transform=transform)\n upper_right = self.PixelCoordinates(x_size, 0,\n transform=transform)\n lower_left = self.PixelCoordinates(0, y_size,\n transform=transform)\n lower_right = self.PixelCoordinates(x_size, y_size,\n transform=transform)\n x_values, y_values = list(zip(upper_left, upper_right,\n lower_left, lower_right))\n\n # Return lower-left and upper-right extents\n return Extents(lower_left=XY(min(x_values), min(y_values)),\n upper_right=XY(max(x_values), max(y_values)))", "def extent(self):\n ulx, uly, lrx, lry = self.ul_lr\n return ulx, lry, lrx, uly", "def extent(self):\r\n if not hasattr(self, '_extent'):\r\n self._extent = conf.lib.clang_getCursorExtent(self)\r\n\r\n return self._extent", "def extent(self):\n return self._extent", "def extent(self):\n rx0 = gxapi.float_ref()\n ry0 = gxapi.float_ref()\n rz0 = gxapi.float_ref()\n rx1 = gxapi.float_ref()\n ry1 = gxapi.float_ref()\n rz1 = gxapi.float_ref()\n self.gxvox.get_area(rx0, ry0, rz0, rx1, ry1, rz1)\n if self.is_depth:\n return gxgm.Point2(((rx0.value, ry0.value, -rz1.value), (rx1.value, ry1.value, -rz0.value)))\n return gxgm.Point2(((rx0.value, ry0.value, rz0.value), (rx1.value, ry1.value, rz1.value)),\n self.coordinate_system)", "def geoextent(self):\r\n return self.series_extent", "def extent(self):\n return self.index.max() - self.index.min(), self.columns.max() - self.columns.min()", "def compute_axes(self):\n mini, maxi = self._get_extremes()\n self.y_axis.min = mini\n self.y_axis.max = maxi\n self.y_axis._max_min()\n\n if not None in [s.xvalues for s in self]:\n mini, maxi = self._get_extremes('xvalues')\n self.x_axis.min = mini\n self.x_axis.max = maxi\n self.x_axis._max_min()", "def get_extent(self):\n geot = self.geotransform()\n return (geot[0], geot[3] + self.YSize() * geot[5],\n geot[0] + self.XSize() * geot[1], geot[3])", "def extent(self):\n\n x = np.array([0, self.nx]) * self.dx + self.corner_grid.x0\n ypoint = [0, self.ny] if self.origin == 'lower-left' else [self.ny, 0]\n y = np.array(ypoint) * self.dy + self.corner_grid.y0\n\n return [x[0], x[1], y[0], y[1]]", "def extent(self):\n left = self.transform[0]\n right = left + self.transform[1] * self.shape[1]\n top = self.transform[3]\n bottom = top + self.transform[5] * self.shape[0]\n return left, right, bottom, top", "def _getAxesExtent(\n self,\n x0: float,\n y0: float,\n x1: float,\n y1: float,\n enabledAxes: Optional[EnabledAxes] = None,\n ) -> AxesExtent:\n if enabledAxes is None:\n enabledAxes = self.enabledAxes\n\n y2_0, y2_1 = y0, y1\n left, top, width, height = self.plot.getPlotBoundsInPixels()\n\n if not all(enabledAxes) and not self.plot.isKeepDataAspectRatio():\n # Handle axes disabled for zoom if plot is not keeping aspec ratio\n if not enabledAxes.xaxis:\n x0, x1 = left, left + width\n if not enabledAxes.yaxis:\n y0, y1 = top, top + height\n if not enabledAxes.y2axis:\n y2_0, y2_1 = top, top + height\n\n if self.plot.isKeepDataAspectRatio() and height != 0 and width != 0:\n ratio = width / height\n xextent, yextent = math.fabs(x1 - x0), math.fabs(y1 - y0)\n if xextent != 0 and yextent != 0:\n if xextent / yextent > ratio:\n areaHeight = xextent / ratio\n center = 0.5 * (y0 + y1)\n y0 = center - numpy.sign(y1 - y0) * 0.5 * areaHeight\n y1 = center + numpy.sign(y1 - y0) * 0.5 * areaHeight\n else:\n areaWidth = yextent * ratio\n center = 0.5 * (x0 + x1)\n x0 = center - numpy.sign(x1 - x0) * 0.5 * areaWidth\n x1 = center + numpy.sign(x1 - x0) * 0.5 * areaWidth\n\n # Convert to data space\n x0, y0 = self.plot.pixelToData(x0, y0, check=False)\n x1, y1 = self.plot.pixelToData(x1, y1, check=False)\n y2_0 = self.plot.pixelToData(None, y2_0, axis=\"right\", check=False)[1]\n y2_1 = self.plot.pixelToData(None, y2_1, axis=\"right\", check=False)[1]\n\n return AxesExtent(\n min(x0, x1),\n max(x0, x1),\n min(y0, y1),\n max(y0, y1),\n min(y2_0, y2_1),\n max(y2_0, y2_1),\n )", "def getExtentUnits(self):\n return _libsbml.Model_getExtentUnits(self)", "def get_data_extent(self):\n xs, ys = self.xs, self.ys\n xmin, xmax = min(xs), max(xs)\n ymin, ymax = min(xy), max(ys)\n w = maxx - minx\n h = maxy - miny\n return xmin, ymax, w, h", "def axes(self):\n return self._axes", "def axes(self):\n return self._axes", "def bounds(self, axis, view=None):\n if view is None:\n view = self\n if axis not in self._vshare.bounds:\n self._vshare.bounds[axis] = self._compute_bounds(axis, view)\n return self._vshare.bounds[axis]", "def getCurrentExtent(self):\n if not self.currentBox:\n extent = None\n else:\n extent = boxToExtent(self.currentBox)\n return extent", "def get_extent(fpath):\n extents = []\n with h5py.File(fpath, mode='r') as f:\n for key, value in f['label/label-0'].attrs.items():\n if key.lower().endswith('extent') and isinstance(value, np.ndarray):\n extents.append(value)\n \n extents = np.stack(extents, axis=0)\n maxs = np.max(extents, axis=0)\n mins = np.min(extents, axis=0)\n axis_slices = []\n for min_, max_ in zip(mins[::2], maxs[1::2]):\n axis_slices.append(slice(min_, max_, 1))\n return tuple(axis_slices)", "def GetExtent(vDataSet):\r\n return [vDataSet.GetExtendMinX(),vDataSet.GetExtendMaxX(),\r\n vDataSet.GetExtendMinY(),vDataSet.GetExtendMaxY(),\r\n vDataSet.GetExtendMinZ(),vDataSet.GetExtendMaxZ()]", "def extent(self):\n if self.x is not None:\n if self.y is not None:\n if self.z is not None:\n return (self.x.min(), self.x.max(),\n self.y.min(), self.y.max(),\n self.z.min(), self.z.max())\n return (self.x.min(), self.x.max(),\n self.y.min(), self.y.max())\n return (self.x.min(), self.x.max())\n\n elif self.r is not None and self.t is not None:\n if self.z is not None:\n return (self.z.min(), self.z.max(),\n self.r.min(), self.r.max(),\n self.t.min(), self.t.max())\n return (self.r.min(), self.r.max(),\n self.t.min(), self.t.max())\n\n return ()", "def axes(self) -> np.ndarray: # array[Axes]\n return self._axes", "def _axes_domain(self, *args, **kwargs):\n # See _add_gridline_label for detials\n lon_0 = self.axes.projection.proj4_params.get('lon_0', 0)\n x_range, y_range = type(self)._axes_domain(self, *args, **kwargs)\n x_range = np.asarray(x_range) + lon_0\n return x_range, y_range", "def get_axes(self) -> VGroup:\n return self.axes", "def get_axes(self) -> VGroup:\n return self.axes", "def getExtent(self):\n extent = self.parent.biomeGeometry.extent\n return extent", "def xlim(self):\r\n lim = [ax.get_xlim() for ax in self._subaxes]\r\n if lim == []:\r\n lim = None\r\n return lim", "def get_data_extent(self):\n raise NotImplementedError, \"Derived must override\"", "def bounds(self):\n return self.xmin, self.xmax, self.ymin, self.ymax", "def get_data_extent(self):\n x, y = self.xy[0], self.xy[1]\n w, h = self.width, self.height\n return x, y, w, h", "def bbox(self):\n return np.array(self.path.get_extents()).ravel(order='F')", "def get_data_extent(self):\n \n x, y = self.xy[0], self.xy[1]\n w, h = self.radius, self.radius\n return x-w, y+w, w, h", "def bounds(self):\n return self.GetBounds()", "def getDimensions(self):\n return _libsbml.BoundingBox_getDimensions(self)", "def full_extent(self):\n log.debug(\"Set full extent\")\n self.auto_scale = False\n local_plot = self.main_curve_dialog.get_plot()\n local_plot.set_axis_limits(0, 0, 4096)\n local_plot.replot()", "def bounds(self):\n return self._bounds", "def extent(self) -> Tuple[float]:\n ext = (\n min(self.putin.geometry.x, self.takeout.geometry.x),\n min(self.putin.geometry.y, self.takeout.geometry.y),\n max(self.putin.geometry.x, self.takeout.geometry.x),\n max(self.putin.geometry.y, self.takeout.geometry.y),\n )\n return ext", "def extents(nodes):\n from numpy import min, max\n return ( min(nodes[:,0]), max(nodes[:,0]),\n min(nodes[:,1]), max(nodes[:,1]),\n min(nodes[:,2]), max(nodes[:,2]) )", "def bounds(self):\n return self.substrates.bounds", "def _compute_bounds(self, axis, view):\n return None", "def get_obs_extent(el, curr_mesh):\n dim = 2\n extents = get_domain_bounding_box(curr_mesh)\n obs_extents = cpy.deepcopy(extents)\n domain_cent = np.zeros(dim, dtype=float)\n for d in range(dim):\n domain_cent[d] = 0.5*np.sum(extents[:,d])\n\n #obs_extents[0][0] = extents[0][0] + 5.000*el[0];\n #obs_extents[1][0] = extents[1][0] - 5.000*el[0];\n obs_extents[0][0] = 0.35; obs_extents[1][0]=1.65;\n obs_extents[0][1] = domain_cent[1] - 1.000*el[0];\n obs_extents[1][1] = domain_cent[1] + 1.000*el[0];\n \n \n \n return obs_extents", "def get_eangles(self):\n return self.eangles", "def get_object_bounds(self):\n if len(self._object_bounds) == 0:\n # Nothing plotted yet\n return -.01, .01, -.01, .01\n xmins, xmaxs, ymins, ymaxs = np.array(self._object_bounds).T\n xmax = max(xmaxs.max(), xmins.max())\n xmin = min(xmins.min(), xmaxs.min())\n ymax = max(ymaxs.max(), ymins.max())\n ymin = min(ymins.min(), ymaxs.min())\n return xmin, xmax, ymin, ymax", "def get_bounds(self):\n return self._geometry.bounds", "def extent(self) -> typing.Tuple[str, ...]:\n return self._extent.members()", "def getDimensionsExplicitlySet(self):\n return _libsbml.BoundingBox_getDimensionsExplicitlySet(self)", "def getZoomEnabledAxes(self) -> EnabledAxes:\n return self.__zoomEnabledAxes", "def bounds(self):\n return self.kernel.bounds", "def age_extents(self):\n age_df = self.dismod_file.age\n return age_df.age.min(), age_df.age.max()", "def get_axis_limits(robot=None):\n\n if not robot:\n robots = get_robot_roots()\n if not robots:\n raise MimicError('Nothing Selected; Select a valid robot')\n return\n\n if len(robots) > 1:\n MimicError('Too many selections: Select a single robot')\n return\n\n robot = robots[0]\n\n target_ctrl_path = get_target_ctrl_path(robot)\n\n axis_position_limits = {}\n\n # TODO: HARD CODED - Number of robot axes; should include external axes\n num_axes = 6\n\n for i in range(num_axes):\n axis_number = i + 1 # Axis numbers are 1-indexed\n axis_name = 'Axis {}'.format(axis_number)\n val_min = int(pm.getAttr(target_ctrl_path + '.axis{}Min'.format(axis_number)))\n val_max = int(pm.getAttr(target_ctrl_path + '.axis{}Max'.format(axis_number)))\n\n # Save value to dictionary\n axis_position_limits[axis_name] = {'Min Limit': val_min,\n 'Max Limit': val_max}\n \n # TO-DO: Add external axes\n return axis_position_limits", "def bounds(self):\n if self.change_dimensionality:\n return [self._bounds[0]] * self.N\n else:\n return self._bounds", "def __call__(self, axes, renderer):\n bbox0 = self.get_original_position(axes, renderer)\n bbox = bbox0\n\n x1, y1, w, h = bbox.bounds\n extesion_fraction = self.extesion_fraction\n dw, dh = w*extesion_fraction, h*extesion_fraction\n\n if self.extend in [\"min\", \"both\"]:\n if self.orientation == \"horizontal\":\n x1 = x1 + dw\n else:\n y1 = y1+dh\n\n if self.extend in [\"max\", \"both\"]:\n if self.orientation == \"horizontal\":\n w = w-2*dw\n else:\n h = h-2*dh\n\n return Bbox.from_bounds(x1, y1, w, h)", "def extent_in_crs(self, crs=wgs84):\n\n # this is not so trivial\n # for optimisation we will transform the boundaries only\n poly = self.extent_as_polygon(crs=crs)\n _i, _j = poly.exterior.xy\n return [np.min(_i), np.max(_i), np.min(_j), np.max(_j)]", "def dims(self):\n return (self.startx, self.starty, self.endx, self.endy)", "def domain_bounds(self):\n return self._xmin, self._xmax, self._ymin, self._ymax, self._zmin, self._zmax", "def extent(self):\r\n return conf.lib.clang_getTokenExtent(self._tu, self)", "def getDimensions(self):\n return self._majax, self._minax, self._pa", "def Extrema(self):\n ymin = np.min(self._corners[:, 1])\n xmin = np.min(self._corners[:, 0])\n ymax = np.max(self._corners[:, 1])\n xmax = np.max(self._corners[:, 0])\n return ymin, xmin, ymax, xmax", "def size(self):\n return self.__xmax, self.__ymax", "def zoomEnabledAxes(self) -> EnabledAxes:\n return self._zoom.enabledAxes", "def bounds(self):\n \n return self.osmdb.bounds()", "def ylim(self):\r\n lim = [ax.get_ylim() for ax in self._subaxes]\r\n if lim == []:\r\n lim = None\r\n return lim", "def get_axes_pixelsize(self):\n bbox = self.axes.get_window_extent().transformed(self.figure.dpi_scale_trans.inverted())\n width, height = bbox.width, bbox.height\n width *= self.figure.dpi\n height *= self.figure.dpi\n return width, height", "def naxes(self, world=True):\n return _coordsys.coordsys_naxes(self, world)", "def extent(self, start=None, finish=None):\n start, finish = self.bounds(start, finish)\n try:\n return finish - start + 1\n except TypeError:\n return None", "def get_bbox(self, obj):\n renderer = self.figure.canvas.get_renderer()\n transformer = self.figure.dpi_scale_trans.inverted()\n return obj.get_window_extent(renderer=renderer).transformed(transformer)", "def full_extent(ax, pad=0.0):\n # For text objects, we need to draw the figure first, otherwise the extents are undefined.\n ax.figure.canvas.draw()\n items = ax.get_xticklabels() + ax.get_yticklabels()\n items += [ax, ax.title]\n bbox = Bbox.union([item.get_window_extent() for item in items])\n return bbox.expanded(pad + 1, pad + 1)", "def full_extent(ax, pad=0.0):\n # For text objects, we need to draw the figure first, otherwise the extents\n # are undefined.\n ax.figure.canvas.draw()\n items = ax.get_xticklabels() + ax.get_yticklabels() \n# items += [ax, ax.title, ax.xaxis.label, ax.yaxis.label]\n items += [ax, ax.title]\n bbox = Bbox.union([item.get_window_extent() for item in items])\n return bbox.expanded(1.0 + pad, 1.0 + pad)", "def get_bounds(self):\n raise Exception(\"Non-implemented base class method.\")", "def getDimensions(self):\n return _libsbml.Layout_getDimensions(self)", "def get_bounds(self):\n bottom_right = np.asarray([self.coords[k][0] for k in range(self.dim)])\n upper_left = np.asarray([self.coords[k][-1] for k in range(self.dim)])\n return bottom_right, upper_left", "def get_bounds(self):\n log.debug(str(inspect.stack()[1][3]) + \"--> OC.get_bounds()\")\n\n # TODO: Move the operation out of here.\n\n xmin = Inf\n ymin = Inf\n xmax = -Inf\n ymax = -Inf\n\n # for obj in self.object_list:\n for obj in self.get_list():\n try:\n gxmin, gymin, gxmax, gymax = obj.bounds()\n xmin = min([xmin, gxmin])\n ymin = min([ymin, gymin])\n xmax = max([xmax, gxmax])\n ymax = max([ymax, gymax])\n except Exception as e:\n log.warning(\"DEV WARNING: Tried to get bounds of empty geometry. %s\" % str(e))\n\n return [xmin, ymin, xmax, ymax]", "def get_body_extents(self, bodyName: str, shapeIdx: int = 0) -> np.ndarray:\n return self._sim.getBodyExtents(bodyName, shapeIdx)", "def axes_active(self) -> np.ndarray: # array[Axes]\n return self.axes.flat[:self.n_plots]", "def update_xylims_extremes(xlims_extremes, ylims_extremes):\n xlims, ylims = plt.xlim(), plt.ylim()\n xlims_extremes = [\n min(xlims[0], xlims_extremes[0]),\n max(xlims[1], xlims_extremes[1])\n ]\n ylims_extremes = [\n min(ylims[0], ylims_extremes[0]),\n max(ylims[1], ylims_extremes[1])\n ]\n return xlims_extremes, ylims_extremes", "def _get_extremes(self, attr='values'):\n # calculate the maximum and minimum for all series\n series_max = [0]\n series_min = [0]\n for s in self:\n if s is not None:\n series_max.append(s.max(attr))\n series_min.append(s.min(attr))\n return min(series_min), max(series_max)", "def getExtentConversionFactor(self):\n return _libsbml.Submodel_getExtentConversionFactor(self)", "def viewed_rect(self):\n\t\tif self._has_local_coords:\n\t\t\twidth, height = self.size\n\t\t\treturn (0, 0, width, height)\n\t\telse:\n\t\t\treturn self.bounds", "def _compute_equal_axes_ranges(x_min, x_max, y_min, y_max):\n\n x_axis_min, x_axis_max, y_axis_min, y_axis_max = x_min, x_max, y_min, y_max\n x_range, y_range = abs(x_max - x_min), abs(y_max - y_min)\n if x_range > y_range:\n y_center = (y_max + y_min) / 2\n y_axis_max = y_center + x_range / 2\n y_axis_min = y_center - x_range / 2\n else:\n x_center = (x_max + x_min) / 2\n x_axis_max = x_center + y_range / 2\n x_axis_min = x_center - y_range / 2\n\n return x_axis_min, x_axis_max, y_axis_min, y_axis_max", "def allAxes( mv ):\n if mv is None: return None\n return mv.getAxisList()", "def get_xlim(self):\n return (self._frame.GetXaxis().GetXmin(), self._frame.GetXaxis().GetXmax())", "def bounds(self):\n b = []\n\n for dim in self.dimensions:\n if dim.size == 1:\n b.append(dim.bounds)\n else:\n b.extend(dim.bounds)\n\n return b", "def rf_extents(rf_dict):\n x_min, y_min, x_max, y_max = np.inf, np.inf, -np.inf, -np.inf\n for rf in rf_dict:\n x_min = np.min([rf['on_center_x'], x_min])\n x_max = np.max([rf['on_center_x'], x_max])\n y_min = np.min([rf['on_center_y'], y_min])\n y_max = np.max([rf['on_center_y'], y_max])\n if x_min == x_max:\n x_max += 1\n if y_min == y_max:\n y_max += 1\n return {\n 'x_min': x_min,\n 'x_max': x_max,\n 'y_min': y_min,\n 'y_max': y_max\n }", "def get_axis_vals(self):\n return self._x_axis, self._y_axis", "def get_original_position(self, axes, renderer):\n if self._locator is None:\n bbox = axes.get_position(original=True)\n else:\n bbox = self._locator(axes, renderer)\n return bbox", "def bounds(self):\n return self.min_col, self.min_row, self.max_col, self.max_row", "def list_extents(extent_entries, node_id):\n extents = extent_entries[node_id]\n result = []\n for extent_entry in extents:\n result.append({\n \"offset\": extent_entry.key.content.offset,\n \"size\": extent_entry.record.size,\n \"block\": extent_entry.record.block\n })\n return result", "def princ_axes(self):\r\n # get coordinates of mesh\r\n coords = BoundaryMesh(self.mesh,\"exterior\",True).coordinates()\r\n\r\n # get distances\r\n dist = np.sqrt(np.einsum('ij->i', np.square(coords)))\r\n\r\n # get maximal value\r\n maxind = np.argmax(dist)\r\n maxdist = dist[maxind]\r\n\r\n # get minimal value\r\n minind = np.argmin(dist)\r\n mindist = dist[minind]\r\n\r\n # find coordinates of maximal and minimal points\r\n maxax = coords[maxind, :]\r\n minax = coords[minind, :]\r\n\r\n # get the cross product of these vectors,\r\n # which is the ideal mid-size axis\r\n idealax = np.cross(maxax,minax)\r\n\r\n # get the dot product of this ideal axis with the coordinates,\r\n # take the absolute value, and find the index of the maximum\r\n secind = np.argmax(np.abs(np.einsum('j,ij->i',idealax,coords)))\r\n\r\n # get the second-axis distance\r\n secdist = dist[secind]\r\n\r\n return([maxdist, secdist, mindist], [\"a\", \"b\", \"c\"])", "def _get_colorbar_limits(self):\n if self.boundaries is not None:\n C = self.boundaries\n if self.extend in [\"min\", \"both\"]:\n C = C[1:]\n\n if self.extend in [\"max\", \"both\"]:\n C = C[:-1]\n return min(C), max(C)\n else:\n return self.get_clim()", "def getAxisUnits(self, dim=0): \n return self.__axis_units__[dim]", "def line_axes (self):\n return self._line_axes" ]
[ "0.72161704", "0.6875994", "0.6859144", "0.67906785", "0.65129864", "0.63847166", "0.63576", "0.6300293", "0.6264915", "0.6224861", "0.61757976", "0.61531943", "0.61249256", "0.61061025", "0.60827875", "0.6021081", "0.5992878", "0.599001", "0.59846133", "0.5962699", "0.5962071", "0.59305775", "0.5904698", "0.5868598", "0.5868598", "0.5862479", "0.5849482", "0.58306336", "0.5829935", "0.58241355", "0.5770558", "0.5769583", "0.56367195", "0.56367195", "0.56363386", "0.5603287", "0.5593625", "0.55301875", "0.5515608", "0.5463437", "0.545894", "0.54281974", "0.5403324", "0.5398237", "0.5380358", "0.537666", "0.5365526", "0.5362256", "0.53566873", "0.53509814", "0.53469825", "0.53302044", "0.53144056", "0.5312324", "0.5295565", "0.52940476", "0.529371", "0.52905124", "0.52807546", "0.5278868", "0.5269813", "0.5268965", "0.52670527", "0.52526116", "0.5228616", "0.5202364", "0.51943797", "0.51870483", "0.5174024", "0.5149918", "0.51392674", "0.51363224", "0.51317835", "0.51023203", "0.5095778", "0.5094491", "0.5090022", "0.5089708", "0.5074896", "0.50683004", "0.5065055", "0.5047816", "0.5044498", "0.5041084", "0.5037363", "0.50357383", "0.5025392", "0.5006689", "0.49999592", "0.4995978", "0.49907959", "0.49859688", "0.49858397", "0.4980997", "0.49752915", "0.49734533", "0.49653488", "0.4963193", "0.4959817", "0.49523264" ]
0.7218878
0
Set the plot(s) to the given frame number. Operates by manipulating the matplotlib objects held in the self._handles dictionary. If n is greater than the number of available frames, update using the last available frame.
def update_frame(self, key, ranges=None):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __plot_n__(self, refresh=False, *args):\n # If plot is not requested, return:\n if not self.plotneVar.get() or not self.plotniVar.get():\n return\n\n # Check for a closed window:\n if 'n' in self.plots.keys() and not matplotlib.pyplot.fignum_exists(self.plots['n'].number):\n del self.plots['n']\n refresh = False\n # Update the existing plot, if it exists\n refresh = refresh or 'n' in self.plots.keys()\n if refresh:\n if 'n' in self.plots.keys():\n fig = self.plots['n']\n fig = matplotlib.pyplot.figure(fig.number)\n fig.clear()\n else:\n return\n # Make a new window:\n else:\n fig = matplotlib.pyplot.figure(figsize=(4,3))\n fig.canvas.set_window_title('n, time = ' + '{:.3f}'.format(1e9*self.imp.t(self.it)))\n ax = fig.add_subplot(111)\n\n # Plot:\n if self.plotneVar.get():\n ax.plot(1e4*self.imp.r((self.it), self.ir)[0], self.imp.ne((self.it), self.ir)[0], 'r-', label='e')\n if self.plotniVar.get():\n ax.plot(1e4*self.imp.r((self.it), self.ir)[0], self.imp.ni((self.it), self.ir)[0], 'b-', label='i')\n\n ax.set_xlabel('r (um)', fontsize=12)\n ax.set_ylabel('n (1/cc)', fontsize=12)\n ax.legend()\n\n if self.logxVar.get():\n ax.set_xscale('log')\n if self.logyVar.get():\n ax.set_yscale('log')\n\n matplotlib.pyplot.tight_layout()\n\n if not refresh:\n fig.show()\n fig.canvas.draw()\n if self.wm is not None:\n self.wm.addWindow(matplotlib.pyplot.get_current_fig_manager().window)\n self.plots['n'] = fig", "def animate(i):\r\n plot_x.set_data(history_samples[i][:, 0], history_samples[i][:, 1])", "def animate(self, i):\n try:\n self.lastSpectrum = self.spectrometer.getSpectrum()\n if self.darkReference is not None:\n self.lastSpectrum -= self.darkReference\n if self.whiteReference is not None:\n np.seterr(divide='ignore',invalid='ignore')\n if self.darkReference is not None:\n self.lastSpectrum = self.lastSpectrum / (self.whiteReference-self.darkReference)\n else:\n self.lastSpectrum = self.lastSpectrum / self.whiteReference \n\n self.plotSpectrum(spectrum=self.lastSpectrum)\n except usb.core.USBError as err:\n print(\"The spectrometer was disconnected. Quitting.\")\n self.quitFlag = True\n\n if self.quitFlag:\n self.animation.event_source.stop()\n self.animation = None\n plt.close()", "def render(self, n: int):\n if int(n) == 1:\n self.layout.children = [self.figures[0]]\n elif int(n) == 2:\n self.layout.children = [self.figures[0], self.figures[1]]\n elif int(n) == 3:\n self.layout.children = [\n self.figures[0],\n self.figures[1],\n self.figures[2],\n ]", "def setNumber(self, n):\n VisualElement.setNumber(self, n)\n\n # the brush color is dependent of number\n self.element_region.setBrush(self.brush)", "def current_frame(self, n):\n self.sound.seek(n)\n self._current_frame = n", "def _update_plot(self, *args):\n # type: (dict, dict, dict, dict, dict) -> None\n if len(args) != 5 and not any([isinstance(arg, dict) for arg in args]):\n raise ValueError('Illegal arguments for _update_plot of %s' % self.__name__)\n desvars, responses, objectives, constraints, metadata = args\n\n data = self._compute_new_data(desvars, responses, objectives, constraints, metadata)\n self.cs[:, self.iter] = data[:]\n self.quad.set_array(self.cs.ravel())\n self.ax.set_xlim([-.5, self.iter+.5])\n self.iter += 1", "def set_layer(self, n):\n self.layernum = n\n self.update()", "def updateplot(self):\n plotfiles = []\n try:\n self.plotter.reset()\n self.plotter.set_xrange(self.xrangemin.value(), self.xrangemax.value())\n self.plotter.set_yrange(self.yrangemin.value(), self.yrangemax.value())\n self.plotter.set_bgirange(self.bgintmin.value(), self.bgintmax.value())\n self.plotter.set_pkrange(self.halphamin.value(), self.halphamax.value())\n for n,pf in enumerate(self.selecteddata):\n tf = os.path.join(self.tempdir, \"tf%d\" % n)\n self.dfparser.writefile(tf, pf)\n plotfiles.append(tf)\n self.plotter.set_plot(plotfiles)\n except datafile.Datafile_error as e:\n self.warningmsg.setText(e.args[0])\n self.plotter.clear()\n except plotter.Plotter_error as e:\n self.warningmsg.setText(e.args[0])\n self.plotter.clear()", "def animate(i):\n plot_viz_x = []\n plot_viz_y = []\n for node in graph:\n node_x, node_y = node.position\n agent_count = node.agent_count\n\n for _ in range(0, agent_count):\n plot_viz_x.append(self._get_visual_position(node_x))\n plot_viz_y.append(self._get_visual_position(node_y))\n\n self.plots[0].set_data(plot_viz_x, plot_viz_y)\n self.plots[1].set_data(leader.visual[0], leader.visual[1])\n return self.plots", "def n_series(self, n_series):\n\n self.container['n_series'] = n_series", "def plot(self, *args, **kwargs):\n\n n = len(args)\n\n self.fig, ax = plt.subplots(n,1)\n if 'title' in kwargs:\n self.fig.canvas.set_window_title(kwargs['title'])\n self.fig.suptitle(kwargs['title'], fontsize=11, fontweight='bold')\n if n == 1:\n ax.plot(self.vecs['time'], self.vecs[args[0]])\n ax.set_title('Time vs. ' + args[0].title())\n\n ax.set_ylabel(args[0].title())\n ax.set_xlim([self.vecs['time'][0], self.vecs['time'][-1]])\n\n else:\n for i in range(n):\n ax[i].plot(self.vecs['time'], self.vecs[args[i]])\n ax[i].set_title('Time vs. ' + args[i].title())\n ax[i].set_ylabel(args[i].title())\n ax[i].set_xlim([self.vecs['time'][0], self.vecs['time'][-1]])\n if i != (n-1):\n plt.setp(ax[i].get_xticklabels(), visible=False)\n else:\n ax[i].set_xlabel('Time')\n\n plt.tight_layout(h_pad=0.2)\n plt.subplots_adjust(top=0.85)\n plt.show()", "def simulate(self, n: int, show_neighbourhood: bool = False):\n self.draw_current_state(show_neighbourhood)\n for _ in range(n):\n self.move()\n self.draw_current_state(show_neighbourhood)\n self.plotting_engine.animate()", "def revert_frames(self, n_frames: int = 25) -> None:\n if self.__layout.count() > 0 and self.__trajectory_writer is not None and self.mol_widget is not None:\n self.__trajectory_writer.pause()\n traj = self.__trajectory_writer.get_trajectory()\n n_back = min(n_frames, len(traj) - 1)\n wanted_frame = traj[-n_back]\n atoms = utils.AtomCollection(traj.elements, wanted_frame)\n # delete all frames after the wanted one\n # slices not supported in python bindings of delitem of trajectory when writing this\n for _ in range(n_back):\n del traj[-1]\n self.mol_widget.update_molecule(atoms=atoms)\n self.__trajectory_writer.unpause()", "def setNPoints(self,n):\n assert(n > 0)\n self._c_param.shrake_rupley_n_points = n", "def make_plots(self):\n n_rounds = self.run.n_rounds\n\n log.info('Making %d frames', n_rounds)\n args = [self._get_for_parallel(i) for i in range(n_rounds)]\n self.lbv.map(_plot_helper, args)", "def _on_num_points_change(self, _):\n self.num_points = self.num_points_slider.value\n self.redraw_whole_plot()", "def plot_files(n=15):\n lod = read_files(n)\n\n for i in range(len(lod)):\n plt.plot(range(len(lod[i])), lod[i])\n\n plt.show()", "def run_animation(self):\n\n def _get_frame(frame_index, plots):\n \"\"\" Should be called by run_animations only. \"\"\"\n\n # TODO Using the indices of the self.frames, plot in correct location.\n # Okay right now there is a problem where it's unknown whether the set of coordinates\n # is a line or a dot -- that info got lost up there\n\n for amb_index in range(len(self.frames[frame_index])):\n xs = self.frames[frame_index][amb_index][0]\n ys = self.frames[frame_index][amb_index][1]\n\n # if len(xs) > 1:\n # if xs[0] == xs[1]:\n # plots[amb_index][1].set_data([xs[0]], [ys[0]])\n # if xs[-2] == xs[-1]:\n # plots[amb_index][1].set_data([xs[-1]], [ys[-1]])\n\n plots[amb_index][0].set_data(xs, ys)\n\n print(plots[len(self.ambulance_locations)])\n\n return plots,\n\n fig = plt.figure(figsize=(14, 8))\n\n # TODO need [number of ambulances] x [number of states]\n\n plots = []\n for i in range(len(self.ambulance_locations)):\n new_color = self.ambulance_colors[i]\n\n line_plot, = plt.plot([], [],\n marker='+',\n linestyle='',\n markerfacecolor=new_color,\n markeredgecolor=new_color,\n label=\"Ambulance {} Path\".format(i + 1))\n\n # dot_plot, = plt.plot([], [],\n # marker='o',\n # linestyle='',\n # markerfacecolor=new_color,\n # markeredgecolor=new_color)\n\n # plots.append([line_plot, dot_plot])\n\n plots.append([line_plot])\n\n base_plot = plt.scatter([base.longitude for base in self.bases],\n [base.latitude for base in self.bases],\n marker=\"D\", color=\"black\", label=\"Bases\")\n hospital_plot = plt.scatter([hospital.longitude for hospital in self.hospitals],\n [hospital.latitude for hospital in self.hospitals],\n marker=\"P\", color=\"r\", label=\"Hospitals\")\n\n plots.append(base_plot)\n plots.append(hospital_plot)\n\n # TODO Make boundaries parameters\n\n img = plt.imread(\"./visuals/simple.png\")\n plt.imshow(img, extent=[-117.017637, -117.167672, 32.710484, 32.823033])\n plt.legend(loc=\"upper right\")\n print(\"draw the animation\")\n ani = animation.FuncAnimation(fig, _get_frame, len(self.frames),\n fargs=(plots,), interval=50)\n\n plt.show()\n\n # fps = 15\n # print('save the animation')\n # print(\"it may take up to {}\".format(len(self.frames)/fps))\n # ani.save('regional_vis6.mp4', fps=fps, dpi=150)", "def update(self, index: Optional[int] = None) -> None:\n super().update(index)\n self.ax.clear()\n if self._matrix is not None:\n df = pd.DataFrame(data=np.matmul(self.simulation.history.to_numpy(), self._matrix),\n columns=self.categories,\n index=self.simulation.history.index)\n else:\n df = self.simulation.history\n df.plot(ax=self.ax)\n\n self.ax.set_yscale(self.yscale)\n if self.yscale in ['symlog', 'log']:\n self.ax.set_ylim(0, 2 * self.simulation.simulator.n)\n else:\n self.ax.set_ylim(0, 1.1 * self.simulation.simulator.n)\n\n # rotate the x labels if they are time units\n if self.simulation.time_units:\n for tick in self.ax.get_xticklabels():\n tick.set_rotation(45)\n self.fig.tight_layout()\n self.fig.canvas.draw()", "def numViewPortsChanged(self, num_str):\n self.viewPorts.numViewPortsChanged(int(num_str))\n\n self.rendererWindows = self.viewPorts.getViewPorts()\n for rw in self.rendererWindows:\n rw.outputDialog.imageTab.imageSequenceTab.refreshLinkedRenderers()", "def setFrameThickness(n=1):\n dislin.frame(n)", "def updatePlot(self):\n self.axes.clear()\n nc = len(self.curvelist)\n xpos = self.curvelist[0].xvinfo.vidx\n for i in range(nc):\n ypos = self.curvelist[i].yvinfo.vidx\n self.axes.plot(self.data[xpos],\n self.data[ypos], self.col[i])\n if self.idata is not None:\n self.axes.plot(self.idata[xpos],\n self.idata[ypos], self.col[i]+'.')\n self.canvas.draw()", "def set_list_index(self, n=0):\r\n self.n = n", "async def plot(self, new=False) -> None:\n self._logger.debug(\"running\")\n self.figure.clear()\n self.figure.set_tight_layout(True)\n num_plots = len(self._plots)\n axes = None\n for i in range(num_plots):\n plot = self._plots[i]\n name = plot[0]\n active = plot[2]\n if active:\n if i == 0:\n axes = self.figure.add_subplot(1, 1, 1)\n axes.tick_params(axis='x', labelrotation=30)\n axes.set_ylabel(name, color='#1f77b4')\n await sleep(.001)\n if not new:\n await create_task(self.plot_device_data(axes, name))\n else:\n alt_axes = axes.twinx()\n alt_axes.set_ylabel(name, color='#ff7f0e')\n alt_axes.tick_params(axis='y', labelcolor='#ff7f0e')\n alt_axes.set_yticks(np.arange(0, 6, step=1))\n await sleep(.001)\n if not new:\n await create_task(self.plot_device_data(alt_axes, name))\n\n if not new:\n self.add_vert_lines()\n await sleep(.001)\n self.figure.canvas.draw()\n self._logger.debug(\"done\")", "def init_plot(self, num_axes):\r\n self.i = []\r\n self.val = []\r\n plt.ion()\r\n self.axes = plt.gca()\r\n self.lines =[]\r\n\r\n for i in range(num_axes):\r\n self.val.append([])\r\n self.lines.append([])\r\n self.lines[i], = self.axes.plot([], self.val[0], '-', c=[random.random() for _ in range(3)], linewidth=1.5, markersize=4)", "def update_plot():\n pass", "def animate_GRN(self, n_frames=100, file_name=None, dir_name=\"plots\"):\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n fig = plt.figure()\n ax1 = fig.add_subplot(1, 1, 1)\n\n skip = int((self.x_save.shape[0]) / n_frames)\n E_sample = self.E_save[::skip]\n E_min,E_max = E_sample.min(),E_sample.max()\n\n def animate(i):\n ax1.cla()\n cmap = plt.cm.plasma(self.normalize(E_sample[i],E_min,E_max))\n self.plot_vor_colored(self.x_save[skip * i], ax1,cmap)\n ax1.set(aspect=1, xlim=(0, self.L), ylim=(0, self.L))\n\n Writer = animation.writers['ffmpeg']\n writer = Writer(fps=15, bitrate=1800)\n if file_name is None:\n file_name = \"animation %d\" % time.time()\n an = animation.FuncAnimation(fig, animate, frames=n_frames, interval=200)\n an.save(\"%s/%s.mp4\" % (dir_name, file_name), writer=writer, dpi=264)", "def set_n(self, n: int) -> None:\r\n self.n_is_set = True\r\n self.n = n", "def frame_number(self, frame_number):\n\n self._frame_number = frame_number", "def __init__(self, n, *args,\n tick=None, frameskip=0, window_size=1, reactivity=30,\n Iin=lambda *args: 0, scatterargs={},\n **kwargs):\n self.window_size = window_size\n self.ticks_per_update = frameskip + 1\n self.n = n\n self.Iin = Iin\n self.reactivity = reactivity\n self._tick = tick\n\n self.X = np.zeros((window_size, n.N))\n self.scatterargs = scatterargs", "def update(self, index: Optional[int] = None) -> None:\n super().update(index)\n if self._matrix is not None:\n heights = np.matmul(self.config, self._matrix)\n else:\n heights = self.config\n for i, rect in enumerate(self.ax.patches):\n rect.set_height(heights[i])\n\n self.ax.set_title(f'Time {self.time: .3f}')\n self.fig.tight_layout()\n self.fig.canvas.draw()", "def set_frame(self):\n plt.tick_params(\n bottom=False,\n labelbottom=False,\n left=False,\n labelleft=False,\n )\n # Thicken frame.\n for axis in [\"top\", \"bottom\", \"left\", \"right\"]:\n self.ax.spines[axis].set_linewidth(self.framesize)\n # Diagonal.\n self.ax.plot(\n [0, 1],\n [0, 1],\n transform=self.ax.transAxes,\n lw=self.framesize,\n color=\"black\",\n zorder=1,\n )\n # Axis labels.\n self.ax.set_ylabel(self.y, size=self.labelsize, labelpad=20)\n self.ax.set_xlabel(self.x, size=self.labelsize, labelpad=20)\n self.ax.set_title(self.title, size=self.titlesize, pad=60)\n self.ax.xaxis.set_label_position(\"top\")\n\n return self", "def slider_frames_changed(self):\n\n # Again, please note the difference between indexing and GUI displays.\n index = self.slider_frames.value() - 1\n\n # Differentiate between frame ordering (by quality or chronologically).\n if self.frame_ordering == \"quality\":\n self.frame_index = self.quality_sorted_indices[index]\n self.quality_index = index\n\n else:\n self.frame_index = index\n self.quality_index = self.rank_indices[self.frame_index]\n\n # Adjust the frame list and select the current frame.\n\n self.listWidget.setCurrentRow(index, QtCore.QItemSelectionModel.SelectCurrent)\n\n # Update the image in the viewer.\n self.frame_selector.setPhoto(self.frame_index)\n self.listWidget.setFocus()", "def animate(self,n_frames = 100,file_name=None, dir_name=\"plots\",an_type=\"periodic\",tri=False):\n if an_type == \"periodic\":\n plot_fn = self.plot_vor\n if an_type == \"boundary\":\n plot_fn = self.plot_vor_boundary\n\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n fig = plt.figure()\n ax1 = fig.add_subplot(1, 1, 1)\n\n skip = int((self.x_save.shape[0])/n_frames)\n def animate(i):\n ax1.cla()\n if tri is True:\n plot_fn(self.x_save[skip*i],ax1,tri=self.tri_save[skip*i])\n else:\n plot_fn(self.x_save[skip*i],ax1,tri=False)\n if self.plot_forces is True:\n x = self.x_save[skip*i]\n mask = ~np.isnan(self.x_save[skip*i,:,0]) * ~np.isnan(self.x_save[skip*i+1,:,0])\n x = x[mask]\n F = self.x_save[skip*i+1,mask] - self.x_save[skip*i,mask]\n ax1.quiver(x[:,0],x[:,1],F[:,0],F[:,1])\n ax1.set(aspect=1, xlim=(0, self.L), ylim=(0, self.L))\n\n Writer = animation.writers['ffmpeg']\n writer = Writer(fps=15, bitrate=1800)\n if file_name is None:\n file_name = \"animation %d\" % time.time()\n an = animation.FuncAnimation(fig, animate, frames=n_frames, interval=200)\n an.save(\"%s/%s.mp4\" % (dir_name, file_name), writer=writer, dpi=264)", "def _update_plots(self):\n for dock in self.plotDocks:\n for widget in dock.widgets:\n if not self.dataList.findItems(dock.name(), QtCore.Qt.MatchExactly):\n # no data for this plot -> reset it\n widget.getPlotItem().clear()\n # TODO remove tab from dock and del instance\n else:\n widget.getPlotItem().clear()\n x_data = self.currentDataset[\"results\"][\"time\"]\n y_data = self._get_data_by_name(dock.name())\n widget.getPlotItem().plot(x=x_data, y=y_data)", "def update_plot(frame):\n global plotdata\n while True:\n try:\n data = q.get_nowait()\n except queue.Empty:\n break\n shift = len(data)\n plotdata = np.roll(plotdata, -shift, axis=0)\n plotdata[-shift:, :] = data\n for column, line in enumerate(lines):\n line.set_ydata(plotdata[:, column])\n return lines", "def set_Nchan(self, Nchan):\n raise NotImplementedError()\n #self.__init__(self._profiles[0], phases=self._phases, Nchan=Nchan)", "def set_Nchan(self, Nchan):\n raise NotImplementedError()\n #self.__init__(self._profiles[0], phases=self._phases, Nchan=Nchan)", "def plot(self, nsteps_max=10):\r\n fig = plt.figure()\r\n ax1 = plt.subplot(221)\r\n ax2 = plt.subplot(222)\r\n ax3 = plt.subplot(224)\r\n\r\n if 'fig' in locals(): # assures tight layout even when plot is manually resized\r\n def onresize(event): plt.tight_layout()\r\n try: cid = fig.canvas.mpl_connect('resize_event', onresize) # tighten layout on resize event\r\n except: pass\r\n\r\n self.plot_px_convergence(nsteps_max=nsteps_max, ax=ax1)\r\n\r\n if getattr(self.px_spec, 'ref_tree', None) is None:\r\n self.calc_px(method='LT', nsteps=nsteps_max, keep_hist=True)\r\n\r\n self.plot_bt(bt=self.px_spec.ref_tree, ax=ax2, title='Binary tree of stock prices; ' + self.specs)\r\n self.plot_bt(bt=self.px_spec.opt_tree, ax=ax3, title='Binary tree of option prices; ' + self.specs)\r\n # fig, ax = plt.subplots()\r\n # def onresize(event): fig.tight_layout()\r\n # cid = fig.canvas.mpl_connect('resize_event', onresize) # tighten layout on resize event\r\n # self.plot_px_convergence(nsteps_max=nsteps_max, ax=ax)\r\n\r\n try: plt.tight_layout()\r\n except: pass\r\n plt.show()", "def select_sprite(self, n):\n self.img = self.sub_images[n]\n self.last_sprite = n", "def _add_axes(self, n):\n height = (self.top - self.bottom) / float(self.get_n())\n height = min(height, self.maxheight)\n width = self.right - self.left\n ax = self.figure.add_axes([self.left, self.bottom + (n - 1) * height, width, height])\n return ax", "def redraw(self):\n x2, y2 = [[] for i in range(len(self.x))], \\\n [[] for i in range(len(self.x))]\n game_point = [random.randint(1, 100),\n random.randint(1, 100)]\n for i in range(self.generations):\n x2, y2, game_point = self.move(x2, y2, game_point)\n for i in range(10): # Czyszczenie starych wykresow\n self.plots[i].set_xdata([])\n self.plots[i].set_ydata([])\n self.plots2[i].set_xdata([])\n self.plots2[i].set_ydata([])\n for i in range(len(self.x)): # Nowe dane wykresow\n self.plots[i].set_xdata(self.x[i])\n self.plots[i].set_ydata(self.y[i])\n self.plots2[i].set_xdata(x2[i])\n self.plots2[i].set_ydata(y2[i])\n self.fig.canvas.draw_idle()", "def random_walk_draw(self,num_plots,animated=False,show=True):\n\t\t\n\t\tt_x_arrays = []\n\t\tt_max = self.n\n\t\tfor _ in range(num_plots):\n\t\t\tcurrent_x = self.x_initial\n\t\t\tx_array = [current_x]\n\t\t\tt_array = range(t_max + 1)\n\t\t\tsteps = self._random_walk_simulation()\n\t\t\tfor s in steps:\n\t\t\t\tcurrent_x += s\n\t\t\t\tx_array.append(current_x)\n\t\t\tt_x_arrays.append( [x_array,t_array] )\n\t\t\n\t\t\n\t\tfig = plt.figure('Random walk simulation')\n\t\tax = fig.add_subplot(1,1,1)\n\t\tax.set_ylim([(round(min(x_array) - np.sqrt(self.n)*3)),round(max(x_array) + np.sqrt(self.n)*3)])\n\t\tax.set_xlim([-(round(np.sqrt(self.n))),self.n+(round(np.sqrt(self.n)))])\n\t\t\n\t\tif animated == True: # THIS CASE CURRENTLY HAS BUG FOR SOME REASON. CODE IS IDENTICAL TO 2D ANIMATION?\n\t\t\tfig.suptitle('Simulation of 1D random walk, live')\n\t\t\tself.index = 0\n\t\t\tdef update(i):\n\t\t\t\tax.clear()\n\t\t\t\tax.set_ylim([(round(min(x_array) - np.sqrt(self.n)*3)), round(max(x_array) + np.sqrt(self.n)*3)])\n\t\t\t\tax.set_xlim([-(round(np.sqrt(self.n))), self.n+(round(np.sqrt(self.n)))])\n\t\t\t\tfor i in t_x_arrays:\n\t\t\t\t\tx_vals,t_vals = i \n\t\t\t\t\tax.plot(t_vals[:self.index], x_vals[:self.index])\n\t\t\t\tself.index += 1\n\t\t\ta = anim.FuncAnimation(fig, update, frames=self.n, repeat=False,interval=10)\n\t\telse:\n\t\t\tfig.suptitle('Simulation of 1D random walk, static')\n\t\t\tfor i in t_x_arrays:\n\t\t\t\tx_vals,t_vals = i\n\t\t\t\tax.plot(t_vals, x_vals)\n\t\t\t\n\t\tif show == True:\n\t\t\tplt.show()", "def _update_plot(self):\n\n self.T_ex[:-1] = self.T_ex[1:]\n self.T_ex[-1] = self.ensemble.T_ex\n self.plot_T_ex[0].set_ydata(self.T_ex)\n self.T_kin[:-1] = self.T_kin[1:]\n self.T_kin[-1] = self.ensemble.T_kin\n self.plot_T_kin[0].set_ydata(self.T_kin)\n self.canvas.draw()\n\n renderer = self.canvas.get_renderer()\n raw_data = renderer.tostring_rgb()\n surf = pygame.image.fromstring(raw_data,\n (self.plot_width, self.disp_height),\n \"RGB\")\n self.game_display.blit(surf, (self.disp_width, 0))", "def _plot(\n self, \n frame_idx: int, \n scores: List[float], \n losses: List[float],\n ):\n clear_output(True)\n plt.figure(figsize=(20, 5))\n plt.subplot(131)\n plt.title('frame %s. score: %s' % (frame_idx, np.mean(scores[-10:])))\n plt.plot(scores)\n plt.subplot(132)\n plt.title('loss')\n plt.plot(losses)\n plt.show()", "def __call__(self, i):\n plt.subplot(self.nx, self.ny, i)\n return True", "def set_numpins(self, n):\n self.numpins = n", "def setTickNumber(n=2, axes='XYZ'):\n dislin.ticks(n, axes)", "def _ps_update(self, i):\n\n for d in range(self.plant.dof):\n self.ps_plots[d].set_data(\n np.asarray(self.x_values).T[d],\n np.asarray(self.x_values).T[self.plant.dof+d])\n return self.ps_plots", "def UpdateFrame(self, sender=None, args=None):\n # Update label for sensor: s['label']\n # with the most recent measurement: s().data['data'][-1]\n for s in self.sensors:\n self.gValue[s.GetID()].SetLabel( '{num} {unit}'.format(\n num = s().data['data'][-1],\n unit = str(s['unit'])) )\n try:\n pub.sendMessage( 'Plot.%s' %self.GetLabel() )\n except:\n self.plot_deleted = True\n\n \n self.top_sizer.Layout()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()", "def _update_plots(self):\n #Adding in new data to plots\n currSignal = self._ai_client.get_ai_voltage(self._ai_channel, max_range=self.max_input_voltage)\n self.measured_powers = np.append(self.measured_powers[1:], np.mean(currSignal))\n self.out_voltages = np.append(self.out_voltages[1:], self._curr_output_voltage)\n self.errors = np.append(self.errors[1:], (currSignal[-1] - self.voltageSetpoint))\n self.sp_data = np.append(self.sp_data[1:], self.voltageSetpoint)\n #Update power plots\n self.widgets['curve'][0].setData(self.measured_powers*self.gain)\n #Update setpoint plots\n self.widgets['curve'][1].setData(self.sp_data*self.gain)\n\n # Now update voltage polots\n self.widgets['curve'][2].setData(self.out_voltages)\n self.widgets['curve'][3].setData(self.errors*self.gain)", "def plot(self, n_confs):\n \n import pandas as pd\n import numpy as np\n import matplotlib as mpl\n mpl.use('Agg')\n import matplotlib.pyplot as plt\n import csv\n \n n_iter = len(self.plot_data)\n \n data = np.ndarray((n_iter, n_confs+1))\n data[:,0] = [i[0] for i in self.plot_data]\n data[:,1:] = [i[1].detach().cpu().numpy() for i in self.plot_data]\n\n df=pd.DataFrame(data)\n names = ['iter']\n for i in range(n_confs): names.append(f'c{i+1}')\n df.columns = names\n df.to_csv(f\"{self.plot_name}.tab\", sep=\"\\t\", quoting=csv.QUOTE_NONE) \n\n d = data[:,1:].reshape(-1)\n d = d[~np.isnan(d)]\n mine = d.min() - 0.01\n for i in range(n_confs): \n data[:,i+1] -= mine\n \n df=pd.DataFrame(data)\n names = ['iter']\n for i in range(n_confs): names.append(f'c{i+1}')\n df.columns = names\n \n colors = (0,0,0)\n area = 10\n \n # Plot\n fig = plt.figure(figsize=(15, 15))\n ax = fig.add_subplot(1,1,1)\n for i in range(n_confs):\n ax.plot('iter', f'c{i+1}', data=df)\n ax.set_yscale('log')\n\n plt.xlabel('iter')\n plt.ylabel('loss')\n plt.savefig(f'{self.plot_name}.png')", "def UpdatePlot(self):\n\n if self.first_time:\n for ID, plt in self.plotIDs.iteritems():\n if plt:\n tmp = FellesBaseClass.FindInstance(ID)\n self.plot_panel.oplot(\n np.array(tmp.data['time']),\n np.array(tmp.data['data']),\n draw = True,\n side ='left',\n label = tmp['label'],\n color = tmp['color'],\n xlabel = None, ylabel = None, y2label = None,\n title = None,\n dy = None,\n ylog_scale = False,\n xmin = None, xmax = None, ymin = None, ymax = None,\n refresh = True,\n show_legend= True, legend_loc='ur', legend_on= True,\n delay_draw = False,\n marker = 'None', markersize = None,\n autoscale=True,\n linewidth = 3, # default 2\n drawstyle = 'line', style = 'solid',\n grid = True,\n bgcolor= None, framecolor= None, gridcolor= None,\n labelfontsize= 10, # default 9\n legendfontsize= 12, # default 7\n fullbox=None, # 'box', 'open', 'bottom'\n axes_style=None,\n zorder=None,\n )\n self.first_time = False\n\n else:\n i = 0\n for ID,plt in self.plotIDs.iteritems():\n if plt:\n tmp = FellesBaseClass.FindInstance(ID)\n self.plot_panel.update_line(\n i,\n np.array(tmp.data['time']),\n np.array(tmp.data['data']),\n draw=True,\n )\n i += 1\n\n self.plot_panel.set_xylims(\\\n [\\\n floor( min( [ min( FellesBaseClass.FindInstance(ID).data['time'] )\\\n for ID,plt in self.plotIDs.iteritems() if plt ] ) ),\\\n ceil( max( [ max( FellesBaseClass.FindInstance(ID).data['time'] )\\\n for ID,plt in self.plotIDs.iteritems() if plt ] ) ),\\\n floor( min( [ min( FellesBaseClass.FindInstance(ID).data['data'] )\\\n for ID,plt in self.plotIDs.iteritems() if plt ] ) ),\\\n ceil( max( [ max( FellesBaseClass.FindInstance(ID).data['data'] )\\\n for ID,plt in self.plotIDs.iteritems() if plt ] ) )\\\n ]\\\n )\n\n self.panel_sizer.Fit(self)", "def update_figure(self, coordinates, title, xlabel, ylabel, legend, location, xlim, ylim):\n #Not sure how to handle ticks yet...\n self.axes.clear()\n self.axes.grid()\n fsize = \"10\"\n\n for coordinate in coordinates:\n self.axes.plot(coordinate[0], coordinate[1])\n self.axes.set_title(title, fontsize=fsize)\n self.axes.set_xlabel(xlabel, fontsize=fsize)\n self.axes.set_ylabel(ylabel, fontsize=fsize)\n self.axes.legend(legend, loc=location, fontsize=fsize)\n self.axes.set_xlim(xlim)\n self.axes.set_ylim(ylim)\n\n self.draw()\n self.show()", "def update_plot(axes):\n axes.clear()\n\n i = C.i\n C.i += di # globale Zählvariable erhöhen\n if C.i >= len(tt):\n time.sleep(2)\n C.i = 0\n\n t = tt[i]\n q1 = qq1[i]\n q2 = qq2[i]\n q3 = qq3[i]\n CCframe(q1, q2, q3)\n\n # Ausgabe der aktuellen Zeit\n pl.text(0.06, 0.05, \"t = %3.2fs\" % t, transform = axes.transAxes)\n pl.axis([-3, 3, -3, 3])\n axes.figure.canvas.draw()", "def update_arrows(self, n=None):\n i = 0\n for arrow in self.arrows:\n arrow.update()\n i += 1\n if n and i>n: break", "def _get_frame(frame_index, plots):\n\n # TODO Using the indices of the self.frames, plot in correct location.\n # Okay right now there is a problem where it's unknown whether the set of coordinates\n # is a line or a dot -- that info got lost up there\n\n for amb_index in range(len(self.frames[frame_index])):\n xs = self.frames[frame_index][amb_index][0]\n ys = self.frames[frame_index][amb_index][1]\n\n # if len(xs) > 1:\n # if xs[0] == xs[1]:\n # plots[amb_index][1].set_data([xs[0]], [ys[0]])\n # if xs[-2] == xs[-1]:\n # plots[amb_index][1].set_data([xs[-1]], [ys[-1]])\n\n plots[amb_index][0].set_data(xs, ys)\n\n print(plots[len(self.ambulance_locations)])\n\n return plots,", "def set_frames(self, frames, index_start, ambulance_id, xs, ys, display=10):\n\n if not xs: return\n\n curr_index = index_start\n start_position = 0\n end_position = 1\n last_position = len(xs)\n\n # Enumerate the historically most recent coordinates.\n while start_position < end_position:\n\n frames[curr_index][ambulance_id][0] += xs[start_position: end_position]\n frames[curr_index][ambulance_id][1] += ys[start_position: end_position]\n\n if end_position < last_position:\n end_position += 1\n\n if end_position - start_position > display or end_position == last_position:\n start_position += 1\n\n curr_index += 1", "def update_figure(self):\n # if number of kinetics in model did not change\n # update just last lines\n if self.N_lines - 1 == len(self.model.spectra.keys()) * 2:\n self.dataplot.lines[-1].remove()\n self.dataplot.lines[-1].remove()\n self.draw_figure_first()\n # delete all and redraw\n else:\n n = int((self.N_lines - 1) / 2)\n for _ in range(n):\n self.dataplot.lines[-1].remove()\n self.dataplot.lines[-1].remove()\n self.draw_figure_total()\n\n self.dataplot.relim()\n\n self.dataplot.autoscale_view(True, True, True)\n\n self.draw()", "def grid_animation_quick(self, frames, iterations=10, fps=0.02, figsize=(6, 6)):\r\n color_map = matplotlib.colors.ListedColormap(['white', 'black'])\r\n\r\n fig, ax = plt.subplots(figsize=figsize)\r\n\r\n for r in np.arange(0, iterations):\r\n ax.cla()\r\n ax.axes.grid(False)\r\n ax.set_axis_off()\r\n im = ax.imshow(frames[0], cmap=color_map, animated=True)\r\n for image, step in zip(frames[1:], np.arange(1, len(frames[1:])+1)):\r\n time.sleep(fps)\r\n ax.title.set_text('Rule 942 | Step ' + str(step) + ' | Active ' + str(int(np.sum(image))))\r\n im.set_data(image)\r\n fig.canvas.draw()", "def animate(self,frame,im = None):\n # With matplotlib, it's much, much faster to just update the properties\n # of a display object than it is to create a new one, so we'll just update\n # the data and position of the same objects throughout this animation...\n\n # Since we're making an animation with matplotlib, we need \n # ion() instead of show()...\n fig = plt.gcf()\n ax = plt.axes([.25, .55, .6, .4], facecolor='y')\n plt.axis('off')\n\n # Make an image based on the first frame that we'll update later\n # (The first frame is never actually displayed)\n if im is None:\n plt.imshow(frame,cmap='brg')\n else:\n plt.imshow(im)\n plt.title('Image Space')\n\n # Make 4 rectangles that we can later move to the position of each paw\n rects = [Rectangle((0,0), 1,1, fc='none', ec='red') for i in range(4)]\n [ax.add_patch(rect) for rect in rects]\n\n\n # Process and display each frame\n\n paw_slices = self.find_paws(frame)\n\n # Hide any rectangles that might be visible\n [rect.set_visible(False) for rect in rects]\n\n # Set the position and size of a rectangle for each paw and display it\n for slice, rect in zip(paw_slices, rects):\n dy, dx = slice\n rect.set_xy((dx.start, dy.start))\n rect.set_width(dx.stop - dx.start + 1)\n rect.set_height(dy.stop - dy.start + 1)\n rect.set_visible(True)", "def reset(self):\n try:\n self.ax.cla()\n except Exception as e:\n print 'Exception BasePlot:', e\n raise e\n \n self._plotbuffer = { pat: [0 for _ in range(self._plotlength)] for pat in self._patterns }\n self._timestampbuffer = { pat: [0 for _ in range(self._plotlength)] for pat in self._patterns }\n self.ax.set_axis_bgcolor('black')\n self.ax.set_xticks([])\n self.ax.set_yticks([])", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='red')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='gray')\n self.canvas.draw()", "def animate(self, i):\n self.advance_animation(0.01)\n self.get_status()\n self.ax2.clear()\n self.ax3.clear()\n self.ax2.plot(np.ones(len(self.healthy))*self.total_beds, 'k--')\n samples = range(0, len(self.healthy))\n self.ax2.stackplot(samples, self.sick, self.healthy, self.recovered, self.dead, labels=['Sick: ' +str(self.sick[-1]),'Healthy: '+str(self.healthy[-1]),'Recovered: '+str(self.recovered[-1]), 'Dead: '+str(self.dead[-1])], colors=['orangered','forestgreen', 'deepskyblue', 'black'])\n #self.ax2.legend(bbox_to_anchor=(1.04,0), loc=\"lower left\", borderaxespad=0)\n self.ax2.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05), ncol=2, fancybox=True, fontsize=6, shadow=True)\n self.ax2.xaxis.set_ticks([])\n self.ax2.set_ylim(0, self.n+30)\n self.ax3.plot(samples, self.healthy,'forestgreen', label = 'Healthy')\n self.ax3.plot(samples, self.sick, 'orangered', label = 'Sick')\n self.ax3.plot(samples, self.recovered, 'deepskyblue', label = 'Recovered')\n self.ax3.plot(samples, self.dead, 'black', label = 'Dead')\n self.ax3.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05), ncol=2, fancybox=True, fontsize=6, shadow=True)\n self.ax3.set_ylim(0, self.n+30)\n self.ax3.xaxis.set_ticks([])\n self.ax2.yaxis.set_ticks([])\n self.ax2.set_xlabel('Change over time')\n self.ax3.set_xlabel('Change over time')\n self.ax.set_title('Social Distancing Followed By '+str(self.social_dist)+'% People.')\n self.ax2.set_title('Stacked area graph for each category', fontsize = 8)\n self.ax3.set_title('Percentage graph for each category', fontsize = 8)\n self.exit_animate()", "def update(i):\n print(\"{}/{}\".format(i, len(embs[query])))\n ims[0].set_data(unnorm(frames[query][i]))\n ims[1].set_data(unnorm(frames[candidate][nns[i]]))\n plt.tight_layout()", "def set_subplots(self, names: [str]) -> None:\n self._plots = list()\n self._logger.debug(\"running\")\n if len(names) < 1:\n return\n r = len(names)\n c = 1\n for i in range(0, r):\n self._plots.append((names[i], (r, c, i + 1), True))\n self._logger.debug(\"done\")", "def make_setplot1(d):\n setplot = open('setplot.py', 'w')\n setplot.write('\\n\"\"\" ')\n setplot.write(\"\"\"\nSet up the plot figures, axes, and items to be done for each frame.\n\nThis module is imported by the plotting routines and then the\nfunction setplot is called to set the plot parameters.\n \"\"\")\n setplot.write('\\n\"\"\" ')\n setplot.write(\"\"\"\n\n#--------------------------\ndef setplot(plotdata):\n#--------------------------\n \"\"\")\n setplot.write('\\n \"\"\" ')\n setplot.write(\"\"\"\n Specify what is to be plotted at each frame.\n Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData.\n Output: a modified version of plotdata.\n \"\"\")\n setplot.write('\\n \"\"\" ')\n setplot.write(\"\"\"\n\n plotdata.clearfigures() # clear any old figures,axes,items data\n\n \"\"\")\n\n # create a figure for each component of q:\n\n for iq in range(d.meqn):\n setplot.write(\"\"\"\n\n # Figure for q[%s]\n plotfigure = plotdata.new_plotfigure(name='q[%s]', figno=%s)\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.xlimits = 'auto'\n plotaxes.ylimits = 'auto'\n plotaxes.title = 'q[%s]'\n\n # Set up for item on these axes:\n plotitem = plotaxes.new_plotitem(plot_type='1d')\n plotitem.plot_var = %s\n plotitem.plotstyle = '-o'\n plotitem.color = 'b'\n plotitem.show = True # show on plot?\n \"\"\" % (iq,iq,iq,iq,iq))\n\n\n setplot.write(\"\"\"\n\n # Parameters used only when creating html and/or latex hardcopy\n # e.g., via pyclaw.plotters.frametools.printframes:\n\n plotdata.printfigs = True # print figures\n plotdata.print_format = 'png' # file format\n plotdata.print_framenos = 'all' # list of frames to print\n plotdata.print_fignos = 'all' # list of figures to print\n plotdata.html = True # create html files of plots?\n plotdata.html_homelink = '../README.html' # pointer for top of index\n plotdata.latex = True # create latex file of plots?\n plotdata.latex_figsperline = 2 # layout of plots\n plotdata.latex_framesperline = 1 # layout of plots\n plotdata.latex_makepdf = False # also run pdflatex?\n\n return plotdata\n\n \"\"\")\n setplot.close()\n print(\"=== Created setplot.py\")\n # end of make_setplot1", "def update_simulate_plot(self):\n a = self.plot_zoom.getViewBox().viewRange()\n self.plot_simulate.setXRange(a[0][0], a[0][1])\n self.plot_simulate.setYRange(a[1][0], a[1][1])", "def __add__(self, i):\n self.n += i\n plt.subplot(self.nx, self.ny, self.n)\n return True", "def animate(frames):\n plt.grid('on')\n ax = plt.gca()\n ax.set_xticks(np.arange(0.5, 10, 1))\n ax.set_yticks(np.arange(0.5, 10, 1))\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n\n for i in range(len(env_list)):\n ax.imshow(env_list[i],cmap='binary')\n plt.pause(0.05)", "def updateArrayPlotData(self):\n self.arrayPlotData.set_data(\"channel0\",self.array0)\n self.arrayPlotData.set_data(\"channel1\",self.array1)\n self.arrayPlotData.set_data(\"channel2\",self.array2)\n self.arrayPlotData.set_data(\"channel3\",self.array3)\n self.arrayPlotData.set_data(\"channel4\",self.array4)\n self.arrayPlotData.set_data(\"channel5\",self.array5)\n self.arrayPlotData.set_data(\"channel6\",self.array6)\n self.arrayPlotData.set_data(\"channel7\",self.array7)\n self.arrayPlotData.set_data(\"cursorXS\",self.cursorXS)\n #self.arrayPlotData.set_data(\"cursorVertical\",self.cursorVertical)", "def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.canvas.draw()", "def _nextAnimFrame(step=0):\n lfp_frame.set_data(timestamps[step:step+frame_size], lfp[step:step+frame_size])\n r_raw_frame.set_data(timestamps[step:step+frame_size], raw_ripple[step:step+frame_size])\n r_pow_frame.set_data(timestamps[step:step+frame_size], ripple_power[step:step+frame_size])\n lfp_measure.set_text(txt_template % timestamps[step])\n # Updating the limits is needed still so that the correct range of data\n # is displayed! It doesn't update the axis labels though - That's a\n # different ballgame!\n plot_axes.set_xlim(timestamps[step], timestamps[step+frame_size])\n return lfp_frame, r_raw_frame, r_pow_frame, lfp_measure", "def subplot_of_planes(self, plane_index: Optional[int] = None):\r\n\r\n plane_indexes = self.plane_indexes_from(plane_index=plane_index)\r\n\r\n for plane_index in plane_indexes:\r\n\r\n self.open_subplot_figure(number_subplots=4)\r\n\r\n self.figures_2d(data=True)\r\n\r\n self.figures_2d_of_planes(subtracted_image=True, plane_index=plane_index)\r\n self.figures_2d_of_planes(model_image=True, plane_index=plane_index)\r\n self.figures_2d_of_planes(plane_image=True, plane_index=plane_index)\r\n\r\n self.mat_plot_2d.output.subplot_to_figure(\r\n auto_filename=f\"subplot_of_plane_{plane_index}\"\r\n )\r\n self.close_subplot_figure()", "def setSlicesPerTimepoint(self, n):\n\t\tassert n > 0, \"Slices per timepoint needs to be greater than 0\"\n\t\tprint \"Setting slices per timepoint to \", n\n\t\tself.slicesPerTimepoint = n\n\t\tself.z = n\n\t\tself.readers = []", "def update_plot(self,ax):\n self.replot(ax)", "def update(self, i):\n\n self.current_position = self.mediaPlayer.position()\n \t\n \n\n \"\"\"\n \"Record mode\" and \"wide x-axis mode\" shouls not work together. Wide mode is only for reading data, not writing data. \n The user is not allowed to write data when 16 000 points are displayed (wide mode) on tha diagram. If he does so, the frequency of the graph points decreases with time. \n \"\"\"\n \n if self.checkbox.isChecked():\n self.wideRadio.setEnabled(False)\n if not self.checkbox.isChecked():\n self.wideRadio.setEnabled(True)\n if self.wideRadio.isChecked():\n self.checkbox.setEnabled(False)\n if not self.wideRadio.isChecked():\n self.checkbox.setEnabled(True)\n \n\n\n if self.checkbox.isChecked() and self.mediaPlayer.state() == QMediaPlayer.PlayingState:\n \n self.savedRecently = False\n\n\n self.current_position = self.mediaPlayer.position()\n\n \n if self.xValues == []:\n # \"If the list of xValues is empty\". This happens only in the start of the plotting process.\n self.xValues.append(self.current_position)\n self.yValues.append(self.mouseY)\n self.colors.append(self.currentColor)\n\n #self.position_index = self.xValues.index(self.current_position)\n \n\n if self.xValues != []:\n\n if self.current_position > max(self.xValues):\n # \"If the point is bigger than the last point\". I.e if the point will be plotted in the end of the current graph.\n\n self.xValues.append(self.current_position)\n self.yValues.append(self.mouseY)\n self.colors.append(self.currentColor)\n\n self.position_index = self.xValues.index(self.current_position)\n\n if self.current_position < max(self.xValues):\n # \"If the point is smaller than the last point\". I.e if the point will be plotted in the middle of the current graph.\n\n \n if self.mediaPlayer.position() < 100:\n # The program has a problem of removing a point if x=0. This if-statement solves the problem.\n self.xValues.pop(0)\n self.yValues.pop(0)\n self.colors.pop(0)\n \n\n\n # Clearing all the points that are 100 ms (or less) in front of the current position. \n for number in range(self.current_position, self.current_position + 100):\n if number in self.xValues:\n self.yValues.pop(self.xValues.index(number))\n self.colors.pop(self.xValues.index(number))\n self.xValues.remove(number)\n \n \n \n # Plot new points\n bisect.insort(self.xValues,self.current_position) # Through this method, the element is inserted in order.\n self.yValues.insert(self.xValues.index(self.current_position), self.mouseY)\n self.colors.insert(self.xValues.index(self.current_position), self.currentColor)\n\n self.position_index = self.xValues.index(self.current_position)\n \n\n\n # View modes: zoom or wide.\n\n if self.zoomRadio.isChecked():\n self.canvas.axes.set_ylim(0, 100)\n self.canvas.axes.set_xlim(self.current_position-5000, self.current_position+5000)\n\n self.update_tempLists()\n\n self.curve = self.canvas.axes.scatter(self.tempXList, self.tempYList, s=10 , c=self.tempCList)\n\n\n\n if self.wideRadio.isChecked():\n self.canvas.axes.set_ylim(0, 100)\n\n if self.mediaPlayer.duration() != 0:\n self.canvas.axes.set_xlim(0, self.mediaPlayer.duration())\n elif self.xValues != []:\n self.canvas.axes.set_xlim(0, max(self.xValues))\n\n self.curve = self.canvas.axes.scatter(self.xValues, self.yValues, s=10 , c=self.colors)\n\n \n\n # I remove the previous vertical and horizontal lines. If I do not remove them, the program gets slower and slower, and the frequency of the points decreases with time.\n self.hline.remove()\n self.vline.remove()\n \n # New vertical and horizontal lines are created and updated to the correct values.\n self.vline = self.canvas.axes.axvline(x=self.mediaPlayer.position(), color='gray',linestyle=\":\")\n self.hline = self.canvas.axes.axhline(y=self.mouseY, color='gray',linestyle=\":\")\n\n\n\n return [self.curve] + [self.vline] + [self.hline]", "def _plot_grid(frames: Figure, ncols: int = 3) -> Figure:\n for frame in frames:\n frame.plot_height = frame.plot_height // ncols\n frame.plot_width = frame.plot_width // ncols\n return gridplot(frames, ncols=ncols)", "def __call__(self, info, *fargs):\n frame = info[0] # Frame number\n update = info[1] # Update value\n grid_data = info[2] # Data to draw our grids\n mask = info[3] # Mask of data\n self._setup['update'].set_text(f'Update {update}')\n for ndx,data in enumerate(grid_data):\n self._setup['plots'][ndx].set_array(check_mask(data,mask[ndx]))\n for pp in self._setup['post_plot']:\n pp.blit_update(frame, update, ax_ndx=ndx)\n if self._setup._pbar:\n self._setup._pbar.update(frame)\n if frame == self._setup._num_frames - 1:\n self._setup._pbar.finish()\n return self._setup.get_drawables()", "def setNumberOfIntervals(self, n=500):\n self._simulator_.update(numberOfIntervals=n)\n return", "def update_display(self):\n self.lick_plot_0.setData(self.k+self.T,self.buffer[:,1]) \n self.lick_plot_1.setData(self.k+self.T,self.buffer[:,2]) \n self.breathing_plot.setData(self.k+self.T,self.buffer[:,0]) \n \n if self.settings.movie_on.value():\n self.camera_image.setImage(self.camera.read())\n if self.settings.save_movie.value():\n self.camera.write()\n \n #print(self.buffer_h5.size)", "def _refresh_render(self):\n current_frame = self.frame\n self.frame = int(1E6)\n self.frame = current_frame", "def set_update_frames(self, new_update_frames):\n self.update_frames = new_update_frames\n self.update_count_down = self.update_frames", "def plotPaths(self, simulationIndex, numberOfPaths):\n for k in range(numberOfPaths):\n path = self.getPath(simulationIndex + k);\n plt.plot(path)\n plt.xlabel('Time')\n plt.ylabel('Realizations of the process')\n plt.show()", "def _visibleChannels_changed(self):\n for i in range(0,8):\n if i in self.visibleChannels:\n self.masterContainer.plots[\"channel\"+str(i)][0].visible=True\n else:\n print i\n self.masterContainer.plots[\"channel\"+str(i)][0].visible=False", "def show_obs_stack(obs_num):\n processed_observations = np.load('saved_observations.npy')\n assert obs_num < len(processed_observations) and obs_num >= 3, \"Observation index out of episode bounds.\"\n _, arr = plt.subplots(1,4)\n arr[0].set_title('t-3')\n arr[0].imshow(processed_observations[obs_num-3], cmap='gray')\n arr[1].set_title('t-2')\n arr[1].imshow(processed_observations[obs_num-2], cmap='gray')\n arr[2].set_title('t-1')\n arr[2].imshow(processed_observations[obs_num-1], cmap='gray')\n arr[3].set_title('t')\n arr[3].imshow(processed_observations[obs_num], cmap='gray')\n plt.show()", "def plot_frames(beads, sim, ti, tf, savebase, save_eps):\n \n ### normalize variables for plotting purposes\n \n lx = sim.lx/sim.bl\n ly = sim.ly/sim.bl\n \n ### set general plot properties\n\n savebase += 'eps_' + str(sim.eps) + '_fp_' + str(sim.fp) + '_areak_' + str(sim.areak) + '/'\n os.system(\"mkdir -p \" + savebase)\n quant_steps = 2056\n norm = mpl.colors.Normalize(vmin=0, vmax=sim.ncells) \n downlim = -2\n uplim = lx+2\n num_ticks = 5\n ax_len = 1.0 # Length of one subplot square box\n ax_b = 0.0 # Beginning/offset of the subplot in the box\n ax_sep = 0.0 # Separation length between two subplots\n total_subplots_in_x = 1 # Total number of subplots \n fig = plt.figure()\n subp = misc_tools.Subplots(fig, ax_len, ax_sep, ax_b, total_subplots_in_x) \n ax0 = subp.addSubplot()\n \n ### plot the frames\n \n for step in range(ti, tf):\n \n ### normalize variables for plotting purposes\n \n beads.xi[step, 0, :] /= sim.bl\n beads.xi[step, 1, :] /= sim.bl\n \n time = step*sim.dt\n print 'Step / Total : ', step, tf\n \n ### plot \n\n subp = misc_tools.Subplots(fig, ax_len, ax_sep, ax_b, total_subplots_in_x) \n ax0 = subp.addSubplot()\n \n line0 = ax0.scatter(beads.xi[step, 0, :], beads.xi[step, 1, :], s=1, c=beads.cid, \\\n cmap=plt.cm.get_cmap('jet',quant_steps), \\\n edgecolors='None', alpha=0.7, vmin=0, vmax=sim.ncells, \\\n norm=norm, rasterized=True)\n \n ax0.axis('scaled')\n \n ### title\n \n ax0.set_title(\"$t/\\\\tau_{D}$ = \" + \"{0:.2f}\".format(time/sim.tau_D) + \\\n \", $t/\\\\tau_{A}$ = \" + \"{0:.2f}\".format(time/sim.tau_A), fontsize=30)\n \n ### labels\n \n ax0.set_xlabel(\"$x/r_{0}$\", fontsize=40)\n ax0.set_ylabel(\"$y/r_{0}$\", fontsize=40)\n\n ### limits\n\n ax0.set_xlim((downlim, uplim))\n ax0.set_ylim((downlim, uplim))\n \n ### ticks\n \n ax0.xaxis.set_ticks(np.linspace(0, uplim, num_ticks, endpoint=True))\n ax0.yaxis.set_ticks(np.linspace(0, uplim, num_ticks, endpoint=True))\n ax0.tick_params(axis='both', which='major', labelsize=30)\n \n ### save\n\n savepath1 = savebase + \"frame-\" + \"{0:05d}\".format(int(step)) + \".png\"\n if save_eps:\n savepath2 = savebase + \"frame-\" + \"{0:05d}\".format(int(step)) + \".eps\"\n \n plt.savefig(savepath1, dpi=200, bbox_inches='tight', pad_inches=0.08)\n if save_eps:\n plt.savefig(savepath2, dpi=200, bbox_inches='tight', pad_inches=0.08) \n fig.clf() \n \n return", "def make_frame(nbinx, nbiny, title='',d_beg='',d_end='',ylow='',yup='',maxticks='',dates = False):\n\n aax = (1,) * nbinx\n ax = (aax,) * nbiny\n\n f, (ax) = plt.subplots(nbinx,nbiny, sharex=True, sharey=True)\n f.suptitle(title)\n \n f.subplots_adjust(hspace=0.05)\n f.subplots_adjust(wspace=0.05)\n f.subplots_adjust(top=0.95)\n f.subplots_adjust(right=0.95)\n f.subplots_adjust(left=0.05) \n if dates:\n f.autofmt_xdate(bottom=0.1, rotation=90, ha='right') \n\n # --- customize x and y range and number of ticks\n\n if nbinx==1 and nbiny==1:\n ax = [[ax]] # cast ax into an array to make the operations consistent\n if d_beg != '' and d_end != '':\n for axx in ax:\n\n dlim = [d_beg,d_end]\n for axxx in axx:\n axxx.set_autoscalex_on(False)\n axxx.set_xlim(dlim)\n\n if ylow != '' and yup != '':\n print(' setting up y scale ',ylow,yup, title)\n for axx in ax:\n\n ylim = [ylow,yup]\n for axxx in axx:\n axxx.set_ylim(ylim) \n\n if maxticks != '':\n for axx in ax:\n for axxx in axx: \n axxx.xaxis.set_major_locator(plt.MaxNLocator(maxticks))\n \n #for a single frame ax is a scalar. put it back.. \n if nbinx==1 and nbiny==1:\n ax = ax[0][0] \n return f,(ax)", "def set_up_plotter(self, n_levels: int, param_labels: List[str]):\n self.ax.set_ylim(0, n_levels)\n self.ax.set_zlim(0, 5)\n self.ax.set_xlim(0, 1)\n self.ax.invert_xaxis()\n\n self.ax.set_zlabel(param_labels[0], labelpad=5)\n self.ax.set_ylabel(\"Optimization level\", labelpad=10)\n self.ax.set_xlabel(param_labels[1], labelpad=10)\n\n self.fig.show()\n self.fig.canvas.draw()", "def setNSlices(self,n):\n assert(n> 0)\n self._c_param.lee_richards_n_slices = n", "def plot(self,\n plot=True, plot_stats=True,\n splot=True\n #labels=None, numbers=False, origin='upper',\n #numbers_alpha=None, xlabels_vertical=True,\n #numbers_kwargs={},\n #**kwargs\n ):\n externals.exists(\"pylab\", raiseException=True)\n import pylab as P\n\n self.compute()\n # total number of plots\n nplots = plot + splot\n\n # turn off automatic update if interactive\n if P.matplotlib.get_backend() == 'TkAgg':\n P.ioff()\n\n fig = P.gcf()\n P.clf()\n sps = [] # subplots\n\n nplot = 0\n if plot:\n nplot += 1\n sps.append(P.subplot(nplots, 1, nplot))\n xstart = 0\n lines = []\n for s in self.sets:\n nsamples = len(s[0])\n xend = xstart+nsamples\n xs = xrange(xstart, xend)\n lines += [P.plot(xs, s[0], 'b')]\n lines += [P.plot(xs, s[1], 'r')]\n # vertical line\n P.plot([xend, xend], [N.min(s[0]), N.max(s[0])], 'k--')\n xstart = xend\n if len(lines)>1:\n P.legend(lines[:2], ('Target', 'Prediction'))\n if plot_stats:\n P.title(self.asstring(short='very'))\n\n if splot:\n nplot += 1\n sps.append(P.subplot(nplots, 1, nplot))\n for s in self.sets:\n P.plot(s[0], s[1], 'o',\n markeredgewidth=0.2,\n markersize=2)\n P.gca().set_aspect('equal')\n\n if P.matplotlib.get_backend() == 'TkAgg':\n P.ion()\n P.draw()\n\n return fig, sps", "def plot(self, windowSize='800x600'):\n if not hasattr(self, 'compiled'):\n raise RuntimeError('The object has not compiled yet')\n # create a scrollable window\n _, fm, run = simple_scrollable_window(windowSize)\n count = 0\n img_ref = []\n for key, val in {**self.qubitDict, **self.readoutDict}.items():\n Label(\n fm, text=key + f':{val}', font='Consolas',\n relief='solid', borderwidth=1\n ).grid(row=count, column=0, ipadx=5, ipady=5, sticky='news')\n img_data = self.compiled[val].plot(\n allInOne=False, toByteStream=True, showSizeInfo=False,\n size=[20, 4]\n )\n render = ImageTk.PhotoImage(Image.open(img_data))\n img_ref += [render]\n img = Label(fm, image=render, borderwidth=1, relief='solid')\n img.grid(row=count, column=1, ipadx=5, ipady=5, sticky='news')\n img.image = render\n count += 1\n run()", "def set_list_index(self, n=0):\r\n return self._api.set_list_index(n)", "def set_frame(self, frame, values):\n self._buf[frame] = values", "def _on_frame_changed(self, change):\n self._set_coordinates(self.frame)" ]
[ "0.62682223", "0.576072", "0.5558097", "0.55435854", "0.5533847", "0.55313236", "0.54860616", "0.54809326", "0.547151", "0.5446553", "0.54084885", "0.5352229", "0.53280175", "0.53238046", "0.5293158", "0.52775246", "0.5230508", "0.52274555", "0.5225835", "0.5224569", "0.5220222", "0.5186063", "0.51843345", "0.51758695", "0.5168976", "0.516893", "0.5167638", "0.5142942", "0.51419634", "0.5139828", "0.5128175", "0.51243544", "0.5120009", "0.51098514", "0.5106628", "0.51044816", "0.51017445", "0.5064398", "0.5064398", "0.5063494", "0.5020692", "0.50161", "0.5005357", "0.50021595", "0.49969715", "0.4991504", "0.49877283", "0.49863195", "0.49647456", "0.49481398", "0.49362215", "0.49271685", "0.49271685", "0.49271685", "0.49271685", "0.49271685", "0.49202138", "0.4913419", "0.4906652", "0.49037826", "0.49008754", "0.48996115", "0.4893527", "0.48904744", "0.48799667", "0.4877941", "0.4872249", "0.48655382", "0.48636183", "0.4860455", "0.48576006", "0.48478958", "0.48448783", "0.4843316", "0.4803457", "0.4803176", "0.47923905", "0.47851887", "0.47837687", "0.47822124", "0.4775673", "0.47749647", "0.47743285", "0.4772444", "0.4770271", "0.47668463", "0.47600153", "0.47551322", "0.47532567", "0.47404984", "0.47390857", "0.47364703", "0.4731931", "0.47202283", "0.47172502", "0.4716195", "0.47144336", "0.47043577", "0.47025445", "0.46992126", "0.46933794" ]
0.0
-1
Given a HoloMap compute the appropriate (mapwise or framewise) ranges in order to apply the Compositor collapse operations in display mode (data collapse should already have happened).
def _apply_compositor(self, holomap, ranges=None, keys=None, dimensions=None): # Compute framewise normalization defaultdim = holomap.ndims == 1 and holomap.kdims[0].name != 'Frame' if keys and ranges and dimensions and not defaultdim: dim_inds = [dimensions.index(d) for d in holomap.kdims] sliced_keys = [tuple(k[i] for i in dim_inds) for k in keys] frame_ranges = OrderedDict([(slckey, self.compute_ranges(holomap, key, ranges[key])) for key, slckey in zip(keys, sliced_keys) if slckey in holomap.data.keys()]) else: mapwise_ranges = self.compute_ranges(holomap, None, None) frame_ranges = OrderedDict([(key, self.compute_ranges(holomap, key, mapwise_ranges)) for key in holomap.keys()]) ranges = frame_ranges.values() return Compositor.collapse(holomap, (ranges, frame_ranges.keys()), mode='display')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _modify_map_size(self, merged_map):\n pos_x_white, pos_y_white = np.where(merged_map == 255)\n pos_x_black, pos_y_black = np.where(merged_map == 0)\n\n pos_x_M = np.amax(np.hstack((pos_x_black, pos_x_white)))\n pos_x_m = np.amin(np.hstack((pos_x_black, pos_x_white)))\n pos_y_M = np.amax(np.hstack((pos_y_black, pos_y_white)))\n pos_y_m = np.amin(np.hstack((pos_y_black, pos_y_white)))\n\n reduced_map = merged_map[pos_x_m-5:pos_x_M+5, pos_y_m-5:pos_y_M+5]\n\n return reduced_map", "def disp_map(disp):\n map = np.array([\n [0,0,0,114],\n [0,0,1,185],\n [1,0,0,114],\n [1,0,1,174],\n [0,1,0,114],\n [0,1,1,185],\n [1,1,0,114],\n [1,1,1,0]\n ])\n # grab the last element of each column and convert into float type, e.g. 114 -> 114.0\n # the final result: [114.0, 185.0, 114.0, 174.0, 114.0, 185.0, 114.0]\n bins = map[0:map.shape[0]-1,map.shape[1] - 1].astype(float)\n\n # reshape the bins from [7] into [7,1]\n bins = bins.reshape((bins.shape[0], 1))\n\n # accumulate element in bins, and get [114.0, 299.0, 413.0, 587.0, 701.0, 886.0, 1000.0]\n cbins = np.cumsum(bins)\n\n # divide the last element in cbins, e.g. 1000.0\n bins = bins / cbins[cbins.shape[0] -1]\n\n # divide the last element of cbins, e.g. 1000.0, and reshape it, final shape [6,1]\n cbins = cbins[0:cbins.shape[0]-1] / cbins[cbins.shape[0] -1]\n cbins = cbins.reshape((cbins.shape[0], 1))\n\n # transpose disp array, and repeat disp 6 times in axis-0, 1 times in axis-1, final shape=[6, Height*Width]\n ind = np.tile(disp.T, (6,1))\n tmp = np.tile(cbins, (1, disp.size))\n\n # get the number of disp's elements bigger than each value in cbins, and sum up the 6 numbers\n b = (ind > tmp).astype(int)\n s = np.sum(b, axis=0)\n\n bins = 1 / bins\n\n # add an element 0 ahead of cbins, [0, cbins]\n t = cbins\n cbins = np.zeros((cbins.size+1,1))\n cbins[1:] = t\n\n # get the ratio and interpolate it\n disp = (disp - cbins[s]) * bins[s]\n disp = map[s,0:3] * np.tile(1 - disp,(1,3)) + map[s + 1,0:3] * np.tile(disp,(1,3))\n\n return disp", "def disp_map(disp):\n map = np.array([\n [0, 0, 0, 114],\n [0, 0, 1, 185],\n [1, 0, 0, 114],\n [1, 0, 1, 174],\n [0, 1, 0, 114],\n [0, 1, 1, 185],\n [1, 1, 0, 114],\n [1, 1, 1, 0]\n ])\n # grab the last element of each column and convert into float type, e.g. 114 -> 114.0\n # the final result: [114.0, 185.0, 114.0, 174.0, 114.0, 185.0, 114.0]\n bins = map[0:map.shape[0] - 1, map.shape[1] - 1].astype(float)\n\n # reshape the bins from [7] into [7,1]\n bins = bins.reshape((bins.shape[0], 1))\n\n # accumulate element in bins, and get [114.0, 299.0, 413.0, 587.0, 701.0, 886.0, 1000.0]\n cbins = np.cumsum(bins)\n\n # divide the last element in cbins, e.g. 1000.0\n bins = bins / cbins[cbins.shape[0] - 1]\n\n # divide the last element of cbins, e.g. 1000.0, and reshape it, final shape [6,1]\n cbins = cbins[0:cbins.shape[0] - 1] / cbins[cbins.shape[0] - 1]\n cbins = cbins.reshape((cbins.shape[0], 1))\n\n # transpose disp array, and repeat disp 6 times in axis-0, 1 times in axis-1, final shape=[6, Height*Width]\n ind = np.tile(disp.T, (6, 1))\n tmp = np.tile(cbins, (1, disp.size))\n\n # get the number of disp's elements bigger than each value in cbins, and sum up the 6 numbers\n b = (ind > tmp).astype(int)\n s = np.sum(b, axis=0)\n\n bins = 1 / bins\n\n # add an element 0 ahead of cbins, [0, cbins]\n t = cbins\n cbins = np.zeros((cbins.size + 1, 1))\n cbins[1:] = t\n\n # get the ratio and interpolate it\n disp = (disp - cbins[s]) * bins[s]\n disp = map[s, 0:3] * np.tile(1 - disp, (1, 3)) + map[s + 1, 0:3] * np.tile(disp, (1, 3))\n\n return disp", "def test_change_min_max(self):\n\n datarange = self.colormap.range\n\n # Perform a dummy mapping.\n a = ArrayDataSource(array([0.0, 0.5, 1.0]))\n datarange.add(a)\n b = self.colormap.map_screen(a.get_data())\n datarange.remove(a)\n\n # Update the min_value.\n datarange.low = -1.0\n\n # Test that the map still works.\n a = ArrayDataSource(array([-1.0, 0.0, 1.0]))\n datarange.add(a)\n b = self.colormap.map_screen(a.get_data())\n datarange.remove(a)\n expected = array([0.0, 0.5, 1.0])\n\n close = allclose(ravel(b[:,:1]), expected, atol=0.02)\n self.assert_(close,\n \"Changing min value broke map. Expected %s. Got %s\" % (expected, b[:,:1]))\n\n # Update the max_value.\n datarange.high = 0.0\n # Test that the map still works.\n a = ArrayDataSource(array([-1.0, -0.5, 0.0]))\n datarange.add(a)\n b = self.colormap.map_screen(a.get_data())\n datarange.remove(a)\n expected = array([0.0, 0.5, 1.0])\n\n close = allclose(ravel(b[:,:1]), expected, atol=0.02)\n self.assert_(close,\n \"Changing min value broke map. Expected %s. Got %s\" % (expected, b[:,:1]))\n\n\n return", "def project_ranges(cb, msg, attributes):\n if skip(cb, msg, attributes):\n return msg\n\n plot = get_cb_plot(cb)\n x0, x1 = msg.get('x_range', (0, 1000))\n y0, y1 = msg.get('y_range', (0, 1000))\n extents = x0, y0, x1, y1\n x0, y0, x1, y1 = project_extents(extents, plot.projection,\n plot.current_frame.crs)\n coords = {'x_range': (x0, x1), 'y_range': (y0, y1)}\n return {k: v for k, v in coords.items() if k in attributes}", "def __init__(self, colmaps, min_value, max_value):\n \n self.colmaps = colmaps\n self.anz_seg = len(self.colmaps)\n \n self.xmin = []\n self.xmax = []\n self.colmap = []\n \n # min_value being smaller than the smallest min value\n # of a segment is not allowed (same for max_value)\n if min_value < self.colmaps[0][0]:\n min_value = self.colmaps[0][0]\n \n if max_value > self.colmaps[self.anz_seg-1][1]:\n max_value = self.colmaps[self.anz_seg-1][1]\n \n # scale segment borders to interval [0,1]\n for i in xrange(self.anz_seg):\n x = colmaps[i][0]\n self.xmin.append((x-min_value)/(max_value-min_value))\n \n x = colmaps[i][1]\n self.xmax.append((x-min_value)/(max_value-min_value))\n \n self.colmap.append(colmaps[i][2])\n \n print self.xmin, self.xmax", "def draw_composite_map(date_obj, t850, u200, v200, u500, v500, mslp, gh500, u850, v850, pwat):\n \n #Get lat and lon arrays for this dataset:\n lat = t850.lat.values\n lon = t850.lon.values\n\n #========================================================================================================\n # Create a Basemap plotting figure and add geography\n #========================================================================================================\n\n #Create a Plate Carree projection object\n proj_ccrs = ccrs.Miller(central_longitude=0.0)\n\n #Create figure and axes for main plot and colorbars\n fig = plt.figure(figsize=(18,12),dpi=125)\n gs = gridspec.GridSpec(12, 36, figure=fig) #[ytop:ybot, xleft:xright]\n ax = plt.subplot(gs[:, :-1],projection=proj_ccrs) #main plot\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax2 = plt.subplot(gs[:4, -1]) #top plot\n ax2.set_xticklabels([])\n ax2.set_yticklabels([])\n ax3 = plt.subplot(gs[4:8, -1]) #bottom plot\n ax3.set_xticklabels([])\n ax3.set_yticklabels([])\n ax4 = plt.subplot(gs[8:, -1]) #bottom plot\n ax4.set_xticklabels([])\n ax4.set_yticklabels([])\n\n #Add political boundaries and coastlines\n ax.add_feature(cfeature.COASTLINE.with_scale('50m'), linewidths=1.2)\n ax.add_feature(cfeature.BORDERS.with_scale('50m'), linewidths=1.2)\n ax.add_feature(cfeature.STATES.with_scale('50m'), linewidths=0.5)\n\n #Add land/lake/ocean masking\n land_mask = cfeature.NaturalEarthFeature('physical', 'land', '50m',\n edgecolor='face', facecolor='#e6e6e6')\n sea_mask = cfeature.NaturalEarthFeature('physical', 'ocean', '50m',\n edgecolor='face', facecolor='#ffffff')\n lake_mask = cfeature.NaturalEarthFeature('physical', 'lakes', '50m',\n edgecolor='face', facecolor='#ffffff')\n ax.add_feature(sea_mask,zorder=0)\n ax.add_feature(land_mask,zorder=0)\n ax.add_feature(lake_mask,zorder=0)\n\n #========================================================================================================\n # Fill contours\n #========================================================================================================\n\n #--------------------------------------------------------------------------------------------------------\n # 850-hPa temperature\n #--------------------------------------------------------------------------------------------------------\n\n #Specify contour settings\n clevs = np.arange(-40,40,1)\n cmap = plt.get_cmap('jet')\n extend = \"both\"\n\n #Contour fill this variable\n norm = col.BoundaryNorm(clevs,cmap.N)\n cs = ax.contourf(lon,lat,t850,clevs,cmap=cmap,norm=norm,extend=extend,transform=proj_ccrs,alpha=0.1)\n\n #--------------------------------------------------------------------------------------------------------\n # PWAT\n #--------------------------------------------------------------------------------------------------------\n\n #Specify contour settings\n clevs = np.arange(20,71,0.5)\n\n #Define a color gradient for PWAT\n pwat_colors = gradient([[(255,255,255),0.0],[(255,255,255),20.0]],\n [[(205,255,205),20.0],[(0,255,0),34.0]],\n [[(0,255,0),34.0],[(0,115,0),67.0]])\n cmap = pwat_colors.get_cmap(clevs)\n extend = \"max\"\n\n #Contour fill this variable\n norm = col.BoundaryNorm(clevs,cmap.N)\n cs = ax.contourf(lon,lat,pwat,clevs,cmap=cmap,norm=norm,extend=extend,transform=proj_ccrs,alpha=0.9)\n\n #Add a color bar\n _ = plt.colorbar(cs,cax=ax2,shrink=0.75,pad=0.01,ticks=[20,30,40,50,60,70])\n\n #--------------------------------------------------------------------------------------------------------\n # 250-hPa wind\n #--------------------------------------------------------------------------------------------------------\n\n #Get the data for this variable\n wind = calc.wind_speed(u200, v200)\n\n #Specify contour settings\n clevs = [40,50,60,70,80,90,100,110]\n cmap = col.ListedColormap(['#99E3FB','#47B6FB','#0F77F7','#AC97F5','#A267F4','#9126F5','#E118F3','#E118F3'])\n extend = \"max\"\n\n #Contour fill this variable\n norm = col.BoundaryNorm(clevs,cmap.N)\n cs = ax.contourf(lon,lat,wind,clevs,cmap=cmap,norm=norm,extend=extend,transform=proj_ccrs)\n\n #Add a color bar\n _ = plt.colorbar(cs,cax=ax3,shrink=0.75,pad=0.01,ticks=clevs)\n\n #--------------------------------------------------------------------------------------------------------\n # 500-hPa smoothed vorticity\n #--------------------------------------------------------------------------------------------------------\n\n #Get the data for this variable\n dx,dy = calc.lat_lon_grid_deltas(lon,lat)\n vort = calc.vorticity(u500, v500, dx=dx, dy=dy)\n smooth_vort = smooth(vort, 5.0) * 10**5\n\n #Specify contour settings\n clevs = np.arange(2,20,1)\n cmap = plt.get_cmap('autumn_r')\n extend = \"max\"\n\n #Contour fill this variable\n norm = col.BoundaryNorm(clevs,cmap.N)\n cs = ax.contourf(lon,lat,smooth_vort,clevs,cmap=cmap,norm=norm,extend=extend,transform=proj_ccrs,alpha=0.3)\n\n #Add a color bar\n _ = plt.colorbar(cs,cax=ax4,shrink=0.75,pad=0.01,ticks=clevs[::2])\n \n #========================================================================================================\n # Contours\n #========================================================================================================\n\n #--------------------------------------------------------------------------------------------------------\n # MSLP\n #--------------------------------------------------------------------------------------------------------\n\n #Specify contour settings\n clevs = np.arange(960,1040+4,4)\n style = 'solid' #Plot solid lines\n color = 'red' #Plot lines as gray\n width = 0.8 #Width of contours 0.25\n\n #Contour this variable\n cs = ax.contour(lon,lat,mslp,clevs,colors=color,linewidths=width,linestyles=style,transform=proj_ccrs,alpha=0.9)\n\n #Include value labels\n ax.clabel(cs, inline=1, fontsize=9, fmt='%d')\n\n #--------------------------------------------------------------------------------------------------------\n # Geopotential heights\n #--------------------------------------------------------------------------------------------------------\n\n #Get the data for this variable\n gh500 = gh500 / 10.0\n\n #Specify contour settings\n clevs = np.arange(480,612,4)\n style = 'solid' #Plot solid lines\n color = 'black' #Plot lines as gray\n width = 2.0 #Width of contours\n\n #Contour this variable\n cs = ax.contour(lon,lat,gh500,clevs,colors=color,linewidths=width,linestyles=style,transform=proj_ccrs)\n\n #Include value labels\n ax.clabel(cs, inline=1, fontsize=12, fmt='%d')\n\n #--------------------------------------------------------------------------------------------------------\n # Surface barbs\n #--------------------------------------------------------------------------------------------------------\n\n #Plot wind barbs\n _ = ax.quiver(lon, lat, u850.values, v850.values, transform=proj_ccrs, regrid_shape=(38,30), scale=820, alpha=0.5)\n\n #--------------------------------------------------------------------------------------------------------\n # Label highs & lows\n #--------------------------------------------------------------------------------------------------------\n\n #Label highs and lows\n add_mslp_label(ax, proj_ccrs, mslp, lat, lon)\n\n #========================================================================================================\n # Step 6. Add map boundary, legend, plot title, then save image and close\n #========================================================================================================\n\n #Add china province boundary\n add_china_map_2cartopy(ax, name='province')\n\n #Add custom legend\n from matplotlib.lines import Line2D\n custom_lines = [Line2D([0], [0], color='#00A123', lw=5),\n Line2D([0], [0], color='#0F77F7', lw=5),\n Line2D([0], [0], color='#FFC000', lw=5),\n Line2D([0], [0], color='k', lw=2),\n Line2D([0], [0], color='k', lw=0.1, marker=r'$\\rightarrow$', ms=20),\n Line2D([0], [0], color='r', lw=0.8),]\n\n ax.legend(custom_lines, ['PWAT (mm)', '200-hPa Wind (m/s)', '500-hPa Vorticity', '500-hPa Height (dam)', '850-hPa Wind (m/s)', 'MSLP (hPa)'], loc=2, prop={'size':12})\n\n #Format plot title\n title = \"Synoptic Composite \\nValid: \" + dt.datetime.strftime(date_obj,'%Y-%m-%d %H%M UTC')\n st = plt.suptitle(title,fontweight='bold',fontsize=16)\n st.set_y(0.92)\n\n #Return figuration\n return(fig)", "def _plot_one_value(\n data_matrix, grid_metadata_dict, colour_map_object, min_colour_value,\n max_colour_value, plot_cbar_min_arrow, plot_cbar_max_arrow,\n log_scale=False):\n\n figure_object, axes_object = pyplot.subplots(\n 1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES)\n )\n\n basemap_object, basemap_x_matrix_metres, basemap_y_matrix_metres = (\n _get_basemap(grid_metadata_dict)\n )\n\n num_grid_rows = data_matrix.shape[0]\n num_grid_columns = data_matrix.shape[1]\n x_spacing_metres = (\n (basemap_x_matrix_metres[0, -1] - basemap_x_matrix_metres[0, 0]) /\n (num_grid_columns - 1)\n )\n y_spacing_metres = (\n (basemap_y_matrix_metres[-1, 0] - basemap_y_matrix_metres[0, 0]) /\n (num_grid_rows - 1)\n )\n\n data_matrix_at_edges, edge_x_coords_metres, edge_y_coords_metres = (\n grids.xy_field_grid_points_to_edges(\n field_matrix=data_matrix,\n x_min_metres=basemap_x_matrix_metres[0, 0],\n y_min_metres=basemap_y_matrix_metres[0, 0],\n x_spacing_metres=x_spacing_metres,\n y_spacing_metres=y_spacing_metres)\n )\n\n data_matrix_at_edges = numpy.ma.masked_where(\n numpy.isnan(data_matrix_at_edges), data_matrix_at_edges\n )\n\n # data_matrix_at_edges[numpy.isnan(data_matrix_at_edges)] = -1\n\n plotting_utils.plot_coastlines(\n basemap_object=basemap_object, axes_object=axes_object,\n line_colour=BORDER_COLOUR)\n\n plotting_utils.plot_countries(\n basemap_object=basemap_object, axes_object=axes_object,\n line_colour=BORDER_COLOUR)\n\n plotting_utils.plot_states_and_provinces(\n basemap_object=basemap_object, axes_object=axes_object,\n line_colour=BORDER_COLOUR)\n\n plotting_utils.plot_parallels(\n basemap_object=basemap_object, axes_object=axes_object,\n num_parallels=NUM_PARALLELS)\n\n plotting_utils.plot_meridians(\n basemap_object=basemap_object, axes_object=axes_object,\n num_meridians=NUM_MERIDIANS)\n\n basemap_object.pcolormesh(\n edge_x_coords_metres, edge_y_coords_metres,\n data_matrix_at_edges, cmap=colour_map_object,\n vmin=min_colour_value, vmax=max_colour_value, shading='flat',\n edgecolors='None', axes=axes_object, zorder=-1e12)\n\n colour_bar_object = plotting_utils.plot_linear_colour_bar(\n axes_object_or_matrix=axes_object, data_matrix=data_matrix,\n colour_map_object=colour_map_object, min_value=min_colour_value,\n max_value=max_colour_value, orientation_string='horizontal',\n extend_min=plot_cbar_min_arrow, extend_max=plot_cbar_max_arrow,\n padding=0.05)\n\n tick_values = colour_bar_object.get_ticks()\n\n if log_scale:\n tick_strings = [\n '{0:d}'.format(int(numpy.round(10 ** v))) for v in tick_values\n ]\n elif numpy.nanmax(data_matrix) >= 6:\n tick_strings = [\n '{0:d}'.format(int(numpy.round(v))) for v in tick_values\n ]\n else:\n tick_strings = ['{0:.2f}'.format(v) for v in tick_values]\n\n colour_bar_object.set_ticks(tick_values)\n colour_bar_object.set_ticklabels(tick_strings)\n\n return figure_object, axes_object", "def decode(self, heatmaps, offsets):\n posemap = self._offset_to_pose(offsets)\n inst_indexes, inst_scores = self._get_maximum_from_heatmap(heatmaps[:, :1])\n poses = posemap.view(posemap.size(1), -1)[..., inst_indexes]\n poses = poses.view(self.num_joints, 2, -1).permute(2, 0, 1).contiguous()\n inst_scores = inst_scores.unsqueeze(1).unsqueeze(2).expand(poses.size())\n poses = torch.cat((poses, inst_scores), dim=2)\n return poses.clone()", "def updateMap(self,map):\n if not self.opened:\n col = int( self.world_rect.left / map.header_data['tilewidth'])\n row = int( self.world_rect.top / map.header_data['tileheight'])\n layerIndex = len(map.layer_data)-1\n while(layerIndex > 0):\n layer = map.layer_data[layerIndex]\n if(layer[row][col] > 1):\n layer[row][col] = 0\n break\n layerIndex -= 1\n for g in self.groups():\n g.remove(self)", "def _splineloc(self, coa_map, win=5, upscale=10):\n\n # Get shape of 3-D coalescence map\n nx, ny, nz = coa_map.shape\n n = np.array([nx, ny, nz])\n\n # Find maximum coalescence location in grid\n mx, my, mz = np.unravel_index(np.nanargmax(coa_map), coa_map.shape)\n i = np.array([mx, my, mz])\n\n # Determining window about maximum value and trimming coa grid\n w2 = (win - 1)//2\n x1, y1, z1 = np.clip(i - w2, 0 * n, n)\n x2, y2, z2 = np.clip(i + w2 + 1, 0 * n, n)\n\n # If subgrid is not close to the edge\n if (x2 - x1) == (y2 - y1) == (z2 - z1):\n coa_map_trim = coa_map[x1:x2, y1:y2, z1:z2]\n\n # Defining the original interpolation function\n xo = np.linspace(0, coa_map_trim.shape[0] - 1,\n coa_map_trim.shape[0])\n yo = np.linspace(0, coa_map_trim.shape[1] - 1,\n coa_map_trim.shape[1])\n zo = np.linspace(0, coa_map_trim.shape[2] - 1,\n coa_map_trim.shape[2])\n xog, yog, zog = np.meshgrid(xo, yo, zo)\n interpgrid = Rbf(xog.flatten(), yog.flatten(), zog.flatten(),\n coa_map_trim.flatten(),\n function=\"cubic\")\n\n # Creating the new interpolated grid\n xx = np.linspace(0, coa_map_trim.shape[0] - 1,\n (coa_map_trim.shape[0] - 1) * upscale + 1)\n yy = np.linspace(0, coa_map_trim.shape[1] - 1,\n (coa_map_trim.shape[1] - 1) * upscale + 1)\n zz = np.linspace(0, coa_map_trim.shape[2] - 1,\n (coa_map_trim.shape[2] - 1) * upscale + 1)\n xxg, yyg, zzg = np.meshgrid(xx, yy, zz)\n\n # Interpolate spline function on new grid\n coa_map_int = interpgrid(xxg.flatten(), yyg.flatten(),\n zzg.flatten()).reshape(xxg.shape)\n\n # Calculate max coalescence location on interpolated grid\n mxi, myi, mzi = np.unravel_index(np.nanargmax(coa_map_int),\n coa_map_int.shape)\n mxi = mxi/upscale + x1\n myi = myi/upscale + y1\n mzi = mzi/upscale + z1\n self.output.log(\"\\t\\tGridded loc: {} {} {}\".format(mx, my, mz), self.log)\n self.output.log(\"\\t\\tSpline loc: {} {} {}\".format(mxi, myi, mzi), self.log)\n\n # Run check that spline location is within grid-cell\n if (abs(mx - mxi) > 1) or (abs(my - myi) > 1) or \\\n (abs(mz - mzi) > 1):\n msg = \"\\tSpline warning: spline location outside grid cell\"\n msg += \"with maximum coalescence value\"\n self.output.log(msg, self.log)\n\n xyz = self.lut.xyz2loc(np.array([[mxi, myi, mzi]]), inverse=True)\n loc_spline = self.lut.xyz2coord(xyz)[0]\n\n # Run check that spline location is within window\n if (abs(mx - mxi) > w2) or (abs(my - myi) > w2) or \\\n (abs(mz - mzi) > w2):\n msg = \"\\t !!!! Spline error: location outside interpolation \"\n msg += \"window !!!!\\n\\t\\t\\tGridded Location returned\"\n self.output.log(msg, self.log)\n\n xyz = self.lut.xyz2loc(np.array([[mx, my, mz]]), inverse=True)\n loc_spline = self.lut.xyz2coord(xyz)[0]\n\n else:\n msg = \"\\t !!!! Spline error: interpolation window crosses edge of \"\n msg += \"grid !!!!\\n\\t\\t\\tGridded Location returned\"\n self.output.log(msg, self.log)\n\n xyz = self.lut.xyz2loc(np.array([[mx, my, mz]]), inverse=True)\n loc_spline = self.lut.xyz2coord(xyz)[0]\n\n return loc_spline", "def uk_map(fig1, indata, clevs, datlons, datlats, mtitle, munits, maskswitch):\n\t\n\tfrom mpl_toolkits import basemap as bm\n\timport matplotlib.cm as cm\n\tfrom mpl_toolkits.basemap import shiftgrid \n\tfrom netCDF4 import Dataset\n\tfrom matplotlib.colors import LightSource\n\timport matplotlib.pyplot as plt\n\timport numpy as np\n\timport hillshade\n\timport set_shade\n\timport colour_map\n\t\n\tif maskswitch==1:\n\t\t# import missing data map for masking out of oceans \n\t\tmissdata = Dataset('/exports/work/geos_cxc/users/ahardin4/output/amibatch/afixa/miss.nc', 'r', format='NETCDF3_CLASSIC')\n\t\t\n\t# create the figure and axes instances.\n\tax = fig1.add_axes([0.1,0.1,0.8,0.8])\n\tm = bm.Basemap(llcrnrlon=-9.5,llcrnrlat=49.5,urcrnrlon=2.5,urcrnrlat=59,rsphere=(6378137.00,6356752.3142),\\\n \tresolution='f',area_thresh=1000.,projection='laea', lat_0=54.5,lon_0=-2.75,ax=ax)\n\tm.drawcoastlines()\n\t\n\t# read in etopo5 topography/bathymetry.\n\turl = 'http://ferret.pmel.noaa.gov/thredds/dodsC/data/PMEL/etopo5.nc'\n\tetopodata = Dataset(url)\n\ttopoin = etopodata.variables['ROSE'][:]\n\tlons = etopodata.variables['ETOPO05_X'][:]\n\tlats = etopodata.variables['ETOPO05_Y'][:]\n\t\n\t# shift data so lons go from -180 to 180 instead of 00 to 360.\n\ttopoin,lons = shiftgrid(180.,topoin,lons,start=False)\n\n\t# transform coordinates\n\tx,y=m(datlons[:,:],datlats[:,:])\n\t# transform to nx x ny regularly spaced 5km native projection grid\n\tnx = int((m.xmax-m.xmin)/5000.)+1; ny = int((m.ymax-m.ymin)/5000.)+1\n\ttopodat = m.transform_scalar(topoin,lons,lats,nx,ny)\n\t\n\t# create light source object for topography\n\tls = LightSource(azdeg = 0, altdeg = 2)\n\t# use set_shade function (also available)\n\trgb = set_shade(topodat)\n\n\t# plot image over map with imshow.\n\tim = m.imshow(rgb)\n\t\n\t# apply function to colormap pointers, can be any function at all, as long as\n\t# 0 remains 0, 1 remains 1, and values increase from one to the other.\n\t\n\t# x^4 is good for pseudo-log plots of rainfall:\n\t#log_jet=cmap_xmap(lambda x: (x*x*x*x), cm.hsv)\n\t\n\t#set to lambda x: x for no change:\n\tlog_jet=cmap_xmap(lambda x: (x), cm.jet)\n\t\n\t#apply function to colormap if desired to make whole scale 'hotter' or 'colder'\n\t#example makes colourmap significantly hotter by confining values to upper quarter:\t\n\t#log_jet=cmap_map(lambda x: x/4+0.75, cm.gist_rainbow)\n\t\n\t# mask out oceans, but not lakes. Useful when plotting or comparing against observed\n\tif maskswitch==1:\n\t\tmissmap=missdata.variables['land_map']\n\t\tmissmap2=missdata.variables['land_map']\n\t\t# cut from big mask to small mask if necessary\n\t\t#smallmap=missmap[0,6:46,0:34]\n\t\tsmallmap=missmap[0,:,:]\n\t\tsmallmap2=missmap2[0,:,:]\n\t\t# expand out by one to take into account interpolation\n\t\t\n\t\tfor i in range(1,39):\n\t\t\tfor j in range(1,33):\n\t\t\t\tif smallmap[i,j] == 0.0:\n\t\t\t\t\tsmallmap2[i-1,j]=0.0 \n\t\t\t\t\tsmallmap2[i,j-1]=0.0\n\t\t\t\t\tsmallmap2[i+1,j]=0.0 \n\t\t\t\t\tsmallmap2[i,j+1]=0.0\n\t\t\n\t\t# perform masking\n\t\tindata=np.ma.masked_array(indata,mask=(smallmap2<-0.5))\n\t\tprint smallmap2[0,0], smallmap2[36,0], smallmap2[20,20]\n\t\t#indata[indata<=0.1]=np.nan\n\t# produce semi-transparent contour map\n\tcontourmap=m.contourf(x,y,indata,clevs,cmap=cm.get_cmap(log_jet,len(clevs)-1),extend='both',\n\t\talpha=0.5,origin='lower',rasterized=True)\n\t\t\n\t# produce simple block plot\n\t#contourmap=m.pcolor(x,y,indata,shading='interp',cmap=cm.get_cmap(log_jet,len(clevs)-1),\n\t#\talpha=0.5)\n\t\t\n\t# place colour bar on right\n\tcb = m.colorbar(contourmap,\"right\", size=\"5%\", pad='3%')\n\t# configure colour bar labeling\n\tcl = plt.getp(cb.ax, 'ymajorticklabels')\n\tcontourmap=plt.setp(cl, fontsize=14)\n\n\t# draw parallels and meridians so as not to clash with colour bar placement\n\t# labels = [left,right,top,bottom]\n\tm.drawparallels(np.arange(-70.,80,1.), labels=[1,0,0,1], fontsize=13)\n\tm.drawmeridians(np.arange(351.,362.,2.),labels=[1,0,0,1], fontsize=13)\n\t\n\t# configure title and units\n\tcb.ax.set_xlabel(munits, fontsize=12)\n\tcontourmap=plt.title(mtitle, fontsize=14)", "def calculate_min_max_tiles(self):", "def is_map_obstacle_in_screen_range(self):\n raise NotImplementedError", "def split_simcc_xy(self, heatmap: Union[np.ndarray, torch.Tensor]):\n size = heatmap.size()\n k = size[0] if size[0] <= 20 else 20\n maps = []\n for _ in range(k):\n xy_dict = {}\n single_heatmap = heatmap[_]\n xy_dict['x'], xy_dict['y'] = self.merge_maps(single_heatmap)\n maps.append(xy_dict)\n return maps, k", "def update_histo_frame():\n min_histo.text = str(MIN_RANGE_F) # Display the legend\n max_histo.text = str(MAX_RANGE_F)\n\n histogram = np.zeros(GRID_AXIS) # Clear histogram accumulation array\n # Collect camera data and calculate the histogram\n for _row in range(0, GRID_AXIS):\n for _col in range(0, GRID_AXIS):\n histo_index = int(map_range(GRID_DATA[_col, _row], 0, 1, 0, GRID_AXIS - 1))\n histogram[histo_index] = histogram[histo_index] + 1\n\n histo_scale = np.max(histogram) / (GRID_AXIS - 1)\n if histo_scale <= 0:\n histo_scale = 1\n\n # Display the histogram\n for _col in range(0, GRID_AXIS):\n for _row in range(0, GRID_AXIS):\n if histogram[_col] / histo_scale > GRID_AXIS - 1 - _row:\n image_group[((_row * GRID_AXIS) + _col)].fill = index_to_rgb(\n round((_col / GRID_AXIS), 3)\n )\n else:\n image_group[((_row * GRID_AXIS) + _col)].fill = BLACK", "def CC_2Dfilter(\n h5path_labels,\n map_propnames,\n criteria,\n h5path_int='',\n slicedim=0,\n usempi=False,\n outputfile='',\n protective=False,\n ):\n\n (min_area,\n max_area,\n max_intensity_mb,\n max_eccentricity,\n min_solidity,\n min_euler_number,\n min_extent) = criteria\n\n # prepare mpi\n mpi_info = utils.get_mpi_info(usempi)\n\n # TODO: check output path\n\n # open data for reading\n h5file_mm, ds_mm, _, _ = utils.h5_load(h5path_labels, comm=mpi_info['comm'])\n if h5path_int:\n h5file_mb, ds_mb, _, _ = utils.h5_load(h5path_int, comm=mpi_info['comm'])\n else:\n ds_mb = None\n # mask used as intensity image in mean_intensity criterium\n\n # get the maximum labelvalue in the input\n root = h5path_labels.split('.h5')[0]\n maxlabel = get_maxlabel(root, ds_mm)\n\n # prepare mpi\n n_slices = ds_mm.shape[slicedim]\n series = np.array(range(0, n_slices), dtype=int)\n if mpi_info['enabled']:\n series = utils.scatter_series(mpi_info, series)[0]\n if mpi_info['rank'] == 0:\n fws_reduced = np.zeros((maxlabel + 1, len(map_propnames)),\n dtype='float')\n else:\n fws_reduced = None\n\n fws = np.zeros((maxlabel + 1, len(map_propnames)),\n dtype='float')\n\n mapall = criteria.count(None) == len(criteria)\n\n # pick labels observing the constraints\n go2D = ((max_eccentricity is not None) or\n (min_solidity is not None) or\n (min_euler_number is not None) or\n mapall)\n if go2D:\n\n for i in series:\n slcMM = utils.get_slice(ds_mm, i, slicedim)\n if h5path_int:\n slcMB = utils.get_slice(ds_mb, i, slicedim) # , 'bool'\n else:\n slcMB = None\n fws = check_constraints(slcMM, fws, map_propnames,\n criteria, slcMB, mapall)\n if mpi_info['enabled']:\n mpi_info['comm'].Reduce(fws, fws_reduced, op=MPI.MAX, root=0)\n else:\n fws_reduced = fws\n\n else:\n\n if mpi_info['rank'] == 0:\n fws = check_constraints(ds_mm, fws, map_propnames,\n criteria, ds_mb, mapall)\n fws_reduced = fws\n\n # write the forward maps to a numpy vector\n if mpi_info['rank'] == 0:\n slc = int(n_slices/2)\n slcMM = ds_mm[slc, :, :]\n slcMB = ds_mb[slc, :, :] if h5path_int else None\n datatypes = get_prop_datatypes(slcMM, map_propnames, slcMB)\n for i, propname in enumerate(map_propnames):\n root = outputfile.split('.h5')[0]\n nppath = '{}_{}.npy'.format(root, propname)\n outarray = np.array(fws_reduced[:, i], dtype=datatypes[i])\n np.save(nppath, outarray)\n\n # close and return\n h5file_mm.close()\n if h5path_int:\n h5file_mb.close()\n\n if mpi_info['rank'] == 0:\n return outarray", "def set_colormap_range(self):\n cmin = self.settingsWidget.ui.colormap_min\n cmax = self.settingsWidget.ui.colormap_max\n region = self.plot.getHistogramWidget().region\n\n if(self.sender() == region):\n cmin.setText(str(region.getRegion()[0]))\n cmax.setText(str(region.getRegion()[1]))\n return\n\n # Sometimes the values in the lineEdits are\n # not proper floats so we get ValueErrors\n try:\n # If necessary swap min and max\n if(float(cmin.text()) > float(cmax.text())):\n _tmp = cmin.text()\n cmin.setText(cmax.text())\n cmax.setText(_tmp)\n\n region = [float(cmin.text()), float(cmax.text())]\n self.plot.getHistogramWidget().region.setRegion(region)\n except ValueError:\n return", "def mollview(map=None,fig=None,plot=False,filenme=None,\n\t\t\t rot=None,coord=None,unit='',\n\t\t\t xsize=800,title='Mollweide view',nest=False,\n\t\t\t min=None,max=None,flip='astro',\n\t\t\t remove_dip=False,remove_mono=False,\n\t\t\t gal_cut=0,\n\t\t\t format='%g',format2='%g',\n\t\t\t cbar=True,cmap=None, notext=False,\n\t\t\t norm=None,hold=False,margins=None,sub=None,\n\t\t\t return_projected_map=False):\n\ttry:\n\t\tfrom healpy import pixelfunc, projaxes as PA\n\texcept ImportError:\n\t\twarnings.warn(\n\t\t\t\"Could not load healpy package. If you want to use this feature, \"\n\t\t\t\"plaese install the healpy package from here: http://healpy.readthedocs.io/en/latest/\"\n\t\t\t\"or via pip or conda.\", RuntimeWarning)\n\t\treturn\n\n\t# Create the figure\n\n\tif not (hold or sub):\n\t\tif fig == None:\n\t\t\tf=plt.figure(figsize=(8.5,5.4))\n\t\t\textent = (0.02,0.05,0.96,0.9)\n\t\telse:\n\t\t\tf=fig\n\t\t\textent = (0.02,0.05,0.96,0.9)\n\telif hold:\n\t\tf=plt.gcf()\n\t\tleft,bottom,right,top = np.array(f.gca().get_position()).ravel()\n\t\textent = (left,bottom,right-left,top-bottom)\n\t\tf.delaxes(f.gca())\n\telse: # using subplot syntax\n\t\tf=plt.gcf()\n\t\tif hasattr(sub,'__len__'):\n\t\t\tnrows, ncols, idx = sub\n\t\telse:\n\t\t\tnrows, ncols, idx = sub//100, (sub%100)//10, (sub%10)\n\t\tif idx < 1 or idx > ncols*nrows:\n\t\t\traise ValueError('Wrong values for sub: %d, %d, %d'%(nrows,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ncols,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t idx))\n\t\tc,r = (idx-1)%ncols,(idx-1)//ncols\n\t\tif not margins:\n\t\t\tmargins = (0.01,0.0,0.0,0.02)\n\t\textent = (c*1./ncols+margins[0],\n\t\t\t 1.-(r+1)*1./nrows+margins[1],\n\t\t\t 1./ncols-margins[2]-margins[0],\n\t\t\t 1./nrows-margins[3]-margins[1])\n\t\textent = (extent[0]+margins[0],\n\t\t\t extent[1]+margins[1],\n\t\t\t extent[2]-margins[2]-margins[0],\n\t\t\t extent[3]-margins[3]-margins[1])\n\n\t# Starting to draw : turn interactive off\n\twasinteractive = plt.isinteractive()\n\tplt.ioff()\n\ttry:\n\t\tif map is None:\n\t\t\tmap = np.zeros(12)+np.inf\n\t\t\tcbar=False\n\t\tmap = pixelfunc.ma_to_array(map)\n\t\tax=PA.HpxMollweideAxes(f,extent,coord=coord,rot=rot,\n\t\t\t\t\t\t format=format2,flipconv=flip)\n\t\tf.add_axes(ax)\n\t\tif remove_dip:\n\t\t\tmap=pixelfunc.remove_dipole(map,gal_cut=gal_cut,\n\t\t\t\t\t\t\t\t\tnest=nest,copy=True,\n\t\t\t\t\t\t\t\t\tverbose=True)\n\t\telif remove_mono:\n\t\t\tmap=pixelfunc.remove_monopole(map,gal_cut=gal_cut,nest=nest,\n\t\t\t\t\t\t\t\t\t copy=True,verbose=True)\n\t\timg = ax.projmap(map,nest=nest,xsize=xsize,coord=coord,vmin=min,vmax=max,\n\t\t\t cmap=cmap,norm=norm)\n\t\tif cbar:\n\t\t\tim = ax.get_images()[0]\n\t\t\tb = im.norm.inverse(np.linspace(0,1,im.cmap.N+1))\n\t\t\tv = np.linspace(im.norm.vmin,im.norm.vmax,im.cmap.N)\n\t\t\tif matplotlib.__version__ >= '0.91.0':\n\t\t\t\tcb=f.colorbar(im,ax=ax,\n\t\t\t\t\t\t orientation='horizontal',\n\t\t\t\t\t\t shrink=0.5,aspect=25,ticks=PA.BoundaryLocator(),\n\t\t\t\t\t\t pad=0.05,fraction=0.1,boundaries=b,values=v,\n\t\t\t\t\t\t format=format)\n\t\t\telse:\n\t\t\t\t# for older matplotlib versions, no ax kwarg\n\t\t\t\tcb=f.colorbar(im,orientation='horizontal',\n\t\t\t\t\t\t shrink=0.5,aspect=25,ticks=PA.BoundaryLocator(),\n\t\t\t\t\t\t pad=0.05,fraction=0.1,boundaries=b,values=v,\n\t\t\t\t\t\t format=format)\n\t\t\tcb.solids.set_rasterized(True)\n\t\tax.set_title(title)\n\t\tif not notext:\n\t\t\tax.text(0.86,0.05,ax.proj.coordsysstr,fontsize=14,\n\t\t\t\tfontweight='bold',transform=ax.transAxes)\n\t\tif cbar:\n\t\t\tcb.ax.text(0.5,-1.0,unit,fontsize=14,\n\t\t\t\t transform=cb.ax.transAxes,ha='center',va='center')\n\t\tf.sca(ax)\n\tfinally:\n\t\tif plot:\n\t\t\tplt.draw()\n\t\tif wasinteractive:\n\t\t\tplt.ion()\n\t\t\t#plt.show()\n\tif return_projected_map:\n\t\treturn img", "def drawMap(self):\n world_map = folium.Map(location=[25, 10], zoom_start=3)\n totals_column = 'total_' + self.map_type.lower()\n top10 = self.covid_df.sort_values(totals_column, axis=0, ascending=False)['location'][:10]\n scale, units = self.unitsDetector(self.covid_df[totals_column].max())\n \n color_scheme = {'Cases': 'YlOrRd', 'Deaths': 'PuRd'}[self.map_type]\n bins = list(np.linspace(0, np.ceil(self.covid_df[totals_column].max() / scale) * scale, 6))\n legend_name = 'Total Number of COVID-19 ' + self.map_type\n map_file_name = self.generateFileName()\n \n folium.Choropleth(geo_data=self.geo_data,\n data=self.covid_df,\n columns=['location', totals_column],\n key_on='feature.properties.ADMIN',\n fill_color=color_scheme,\n bins=bins,\n legend_name=legend_name,\n highlight=True\n ).add_to(world_map)\n \n for i in range(10):\n country = top10.iloc[i]\n cases = self.covid_df[self.covid_df['location'] == country][totals_column] / scale\n \n # Centroid coordinates for each country labelled by its ISO-2 code\n lat = self.countries_centroids.loc[self.name_iso2_mapping[country]]['latitude']\n long = self.countries_centroids.loc[self.name_iso2_mapping[country]]['longitude']\n popup = f\"{country}: {cases.values[0]:.2f}{units} total {self.map_type.lower()}\"\n \n folium.Marker(location=[lat, long],\n popup=folium.Popup(popup, \n max_width=1000)\n ).add_to(world_map)\n \n world_map.save(map_file_name)", "def undistort_rectify_map(self):\n return cv.initUndistortRectifyMap(self._k, self._dist, np.eye(3), self._k, self.frame_size[::-1], cv.CV_16SC2)", "def compute_ranges(self, obj, key, ranges):\n all_table = all(isinstance(el, Table) for el in obj.traverse(lambda x: x, [Element]))\n if obj is None or not self.normalize or all_table:\n return OrderedDict()\n # Get inherited ranges\n ranges = self.ranges if ranges is None else dict(ranges)\n\n # Get element identifiers from current object and resolve\n # with selected normalization options\n norm_opts = self._get_norm_opts(obj)\n\n # Traverse displayed object if normalization applies\n # at this level, and ranges for the group have not\n # been supplied from a composite plot\n return_fn = lambda x: x if isinstance(x, Element) else None\n for group, (axiswise, framewise) in norm_opts.items():\n elements = []\n # Skip if ranges are cached or already computed by a\n # higher-level container object.\n framewise = framewise or self.dynamic\n if group in ranges and (not framewise or ranges is not self.ranges):\n continue\n elif not framewise: # Traverse to get all elements\n elements = obj.traverse(return_fn, [group])\n elif key is not None: # Traverse to get elements for each frame\n frame = self._get_frame(key)\n elements = [] if frame is None else frame.traverse(return_fn, [group])\n if not axiswise or ((not framewise or len(elements) == 1)\n and isinstance(obj, HoloMap)): # Compute new ranges\n self._compute_group_range(group, elements, ranges)\n self.ranges.update(ranges)\n return ranges", "def make_pm_maps(input_file, input_pm_file, output_file, num_cones, num_bins=80, titles=None, mincount=0, maxcount=40, cut=None):\n # get titles for each subplot and dwarf proper motions\n titles, dwarf_pmra, dwarf_pmdec, = load_dwarf_info(input_file, titles)\n\n # load stellar pm values\n ra, dec, pmra, pmdec, parallax, parallax_error = load_gaia_search_info(input_pm_file)\n\n # from table 2 in\n # if titles is not None:\n # titles = fix_names(titles)\n # for i, title, dpmra, dpmdec in enumerate(zip(titles, dwarf_pmra, dwarf_pmdec)):\n # dwarf_pmra[i], dwarf_pmdec[i] = fix_pms(title, dpmra, dpmdec)\n # # dwarf_pmra[5] = 1.81\n # # dwarf_pmra[8] = -1.21\n # # dwarf_pmra[11] = 0.22\n # # dwarf_pmdec[5] = 0.14\n # # dwarf_pmdec[8] = -0.92\n # # dwarf_pmdec[11] = -1.41\n\n # set fig size and shape\n d = len(titles)\n rows = 3\n cols = int(np.ceil(d/rows))\n fig, axs = plot_setup(rows, cols, d)\n max_count = [0, 0]\n\n # plot each dwarf in separate subplots\n for ax, title, dwarfpmra, dwarfpmdec, *data in zip(axs, titles, dwarf_pmra, dwarf_pmdec, ra, dec, pmra, pmdec, parallax, parallax_error):\n counts, xedges, yedges, im = pm_histogram(fig, ax, data, title, dwarf_pmra=dwarfpmra, dwarf_pmdec=dwarfpmdec, cut=cut)\n\n # make labels across all subplots\n universal_plot_labels(fig, r\"Proper motion, right ascension [mas/yr]\", r\"Proper motion, declination [mas/yr]\")\n\n # add a universal colorbar, change cmap in hist2d above\n # fig.colorbar(im, ax=axs.ravel().tolist())\n\n fig.savefig(output_file, bbox_inches='tight')", "def adjust_map(map_, n_codes):\n assert np.size(map_, 1) <= n_codes, \"Map does not fit in number of codes\"\n margin = n_codes - np.size(map_, 1)\n left = margin // 2\n right = margin - left\n return np.concatenate((map_[:, 0:1, :],)*left + (map_,) + (map_[:,-1:, :],)*right, axis=1)", "def example_SegmentedColorMapping(min_value, max_value):\n \n colmap1 = ColorMapper(\"red2\")\n colmap1.exponent = 0.7\n \n colmap2 = ColorMapper(\"green\")\n \n colmap3 = ColorMapper(\"green\")\n colmap3.invert = True\n \n colmap4 = ColorMapper(\"blue2\")\n colmap4.invert = True\n colmap4.exponent = 0.5\n \n colmap = SegmentedColorMapping([ (-4.0, -2.0, colmap1), (-2.0, 0.0, colmap2),\n (0.0, 2.0, colmap3), (2.0, 4.0, colmap4)],\n min_value, max_value)\n \n return colmap", "def set_colormap_full_range(self):\n if(self.plot.image is None):\n return\n \n cmin = self.settingsWidget.ui.colormap_min\n cmax = self.settingsWidget.ui.colormap_max\n data_min = numpy.min(self.plot.image)\n data_max = numpy.max(self.plot.image)\n cmin.setText(str(data_min))\n cmax.setText(str(data_max))\n self.set_colormap_range()", "def map_area( m ):\n \n \n m.drawcoastlines( linewidth = 1.5, linestyle = 'solid', color = [ 75./255., 75/255., 75/255. ] )\t\n # ------draw parallels----------------\n circles = np.arange( -90., 90. + 30, 30. ) #delat = 30.\n m.drawparallels( circles, labels = [ 1, 0, 0, 0 ] )\n \n # -------draw meridians---------------\n meridians = np.arange( 0., 360, 60. ) #delon = 60.\n m.drawmeridians( meridians, labels = [ 0, 0, 0, 1 ] )", "def _fcn_minmax_roi(self):\n self.roi._update_cbar_minmax()\n self.cbqt.cbobjs._objs['roi']['clim'] = self.roi._clim\n kwargs = self.cbqt.cbobjs._objs['roi'].to_kwargs(True)\n self.roi.update_from_dict(kwargs)\n self.roi._update_cbar()", "def gate_out(self, *dim_ranges):\n relevant_data = self.get_points(*[r.dim for r in dim_ranges])\n mins = np.array([r.min for r in dim_ranges])\n maxes = np.array([r.max for r in dim_ranges])\n test1 = np.any(relevant_data < mins, axis=1)\n test2 = np.any(relevant_data > maxes, axis=1)\n final = np.logical_or(test1, test2) \n return DataTable(self.data[final], self.dims, self.legends, self.tags.copy())", "def make_map_slider(self,root, col,color_range, **kwargs):\n \n #Identify all the temporal options for the specified fill setting\n shpfieldslist = [item[0] for item in self.shps[0].fields]\n self.datefieldlist[col] = [item for item in shpfieldslist if item.startswith(color_range)]\n datestrlist = [item[-6:] for item in self.datefieldlist[col]]\n self.datenumlist[col] = [int(item) for item in datestrlist]\n \n #Set bounds of scale\n datemax = max(self.datenumlist[col])\n datemin = min(self.datenumlist[col])\n self.map_temporalflag[col] = 1\n \n #Generate the scale\n self.mapslider_list[col] = tk.Scale(root, \n from_=datemin, to=datemax, \n orient='horizontal',\n tickinterval = 0,\n length = self.screenwidth*0.25\n )\n \n #If a slideval was provided, set the scale to that value\n if 'slideval' in kwargs:\n slideval = kwargs.pop('slideval')\n else:\n slideval = datemax\n if not slideval:\n slideval = datemax\n self.mapslider_list[col].set(slideval)\n \n \n #Make label\n mapslider_label = tk.Label(root, text=self.translate('Map')+': ',\n bg=self.default_background)\n mapslider_label.grid(column=0, row=2)\n self.mapslider_label_list[col] = (mapslider_label)\n \n #Bind controls to scale and plae on grid\n self.mapslider_list[col].bind(\"<ButtonRelease-1>\", lambda e: self.mapslide(col, factor=self.mapslider_list[col].get()))\n self.mapslider_list[col].grid(column=1, row=2, columnspan=3)\n \n #Store the field setting to be used for generating the map\n dateindex = self.datenumlist[col].index(slideval)\n datefield = self.datefieldlist[col][dateindex]\n self.date_setting_list[col] = datefield\n color_range = self.date_setting_list[col]\n \n return color_range", "def get_visible_asteroids(asteroid_map, x, y, width, height):\n\n regions = [\n [range(y, -1, -1), range(x, -1, -1)], # a\n [range(y, -1, -1), range(x, width)], # b\n [range(y, height), range(x, -1, -1)], # c\n [range(y, height), range(x, width)], # d\n ]\n\n for y_range, x_range in regions:\n for check_y in y_range:\n for check_x in x_range:\n if asteroid_map[check_y][check_x] == '#':\n mark_invisible(\n asteroid_map, x, y, width, height,\n check_x-x, check_y-y)", "def visibility_angle(minviselmap, azmap, rmap,\n rpol, azpol, elpol, DEM_res, DEM_xmin, DEM_ymin,\n rad_x, rad_y, beamwidth, pulsewidth, az_conv=0,\n raster_oversampling=1, verbose=True):\n ncols, nrows = minviselmap.shape\n\n pulselength = pulsewidth * 3.e8 / 2. # [m]\n az_conv_offset = az_conv / 2.\n\n if raster_oversampling == 0:\n N = 1\n elif raster_oversampling == 1:\n N = int(np.ceil(2 * DEM_res / pulselength))\n else:\n N = raster_oversampling\n\n if N != 1:\n # New dimensions\n nc = N * ncols\n nr = N * nrows\n\n # repeat the values NxN, equivalent of rebin in IDL\n minvisvals = np.repeat(np.repeat(minviselmap, N, axis=0), N, axis=1)\n\n # New x- and y-vectors\n xvec = np.arange(nr) * DEM_res / N + DEM_xmin\n yvec = np.arange(nc) * DEM_res / N + DEM_ymin\n\n xdiff = (xvec - rad_x)\n ydiff = (yvec - rad_y)\n\n # New distance from radar map\n X, Y = np.meshgrid(xdiff, ydiff)\n rvals = np.sqrt(X ** 2 + Y ** 2)\n\n # New azimuth map\n azmap_rad = (np.arctan2(X, Y) + 2 * np.pi) % (2 * np.pi)\n azvals = azmap_rad * 180. / np.pi\n else:\n rvals = rmap\n azvals = azmap\n minvisvals = minviselmap\n\n \"\"\"\n Define the area around a point P(range, azimuth) where the cells\n have a contribution to the visibility. This area is defined with the\n range limits from range-dr/2 to range+dr/2 and the\n azimuth limits from azimuth-daz_offset to azimuth+daz_offset.\n\n For a Gaussian antenna, azimuth offset more than 2*HPBW does not a\n have remarkable contribution.\n With a rectangular pulse and a matched filter cells farer away\n than pulse length does not a have remarkable contribution.\n \"\"\"\n\n daz_offset = (2. * beamwidth) + az_conv_offset # [deg]\n del_offset = (2. * beamwidth) # [deg]\n\n delta_deg = 0.1 # [deg]\n ndaz = int(2 * daz_offset / delta_deg)\n ndel = int(2 * del_offset / delta_deg)\n\n daz_vec = np.arange(ndaz + 1) * delta_deg - daz_offset # [deg]\n del_vec = np.arange(ndel + 1) * delta_deg - del_offset # [deg]\n\n daz_area_antenna, del_area_antenna = np.meshgrid(daz_vec, del_vec)\n\n # Get the two-way weighting factor due to the azimuth and elevation offsets\n # to the main antenna direction (assuming a Gaussian antenna pattern).\n ant_weight = antenna_pattern_gauss(daz_area_antenna, del_area_antenna,\n beamwidth, az_conv=az_conv,\n units='deg')\n\n ant_weight_total = np.nansum(ant_weight)\n\n nazim = len(azpol)\n nrange = len(rpol)\n range_resolution = rpol[1] - rpol[0]\n\n # pyart storage format: 2D arrays (naz * nel, nranges)\n vispol = np.zeros((nazim * len(elpol), nrange))\n\n # Inside the loops over range (rr) and azimuth (az), the\n # coordinates (rr, az) describe the point P(rr, az) for which\n # the visibility is calculated. If more than one DEM cell is within\n # the area from az-daz/2 to az+daz/2 and from rr-dr/2 to\n # rr+dr/2, the calculated visibility value is set to all of these\n # cells (next neighbor).\n\n for iaz in range(nazim):\n logging.info(f'Computing azimuth {azpol[iaz]:2.1f}')\n # Get azimuth values to explore\n azmin = azpol[iaz] - daz_offset\n azmax = azpol[iaz] + daz_offset\n if azmin < 0:\n azmin = 360. + azmin\n indaz = np.logical_or(\n np.logical_and(\n azvals >= 0, azvals < azmax), np.logical_and(\n azvals >= azmin, azvals <= 360.))\n\n elif azmax > 360:\n azmax = azmax - 360.\n indaz = np.logical_or(np.logical_and(azvals >= azmin,\n azvals <= 360),\n np.logical_and(azvals >= 0,\n azvals < azmax))\n else:\n indaz = np.logical_and(azvals >= azmin, azvals < azmax)\n\n if not np.any(indaz):\n logging.warning(\n f'Visibility for azim {azpol[iaz]:f} not known')\n continue\n\n indaz = np.where(indaz)\n\n # These declaration avoids reindexing at every loop turn\n # and save a lot of time\n rvals_indaz = rvals[indaz]\n\n for iel, el in enumerate(elpol):\n # range bins (center pulse) [m]rvec[ir]-dr/2\n rmin_ground = rpol * np.cos(el * np.pi / 180.)\n # range bins (center pulse) [m]rvec[ir]+dr/2\n rmax_ground = (rpol + range_resolution) * np.cos(el * np.pi / 180.)\n\n for ir in range(0, nrange):\n # Get range values to explore\n indr = np.logical_and(rvals_indaz >= rmin_ground[ir],\n rvals_indaz < rmax_ground[ir])\n\n if not np.any(indr):\n logging.warning(\n 'Visibility for az {:f} deg and range {:f} not known' .format(\n azpol[iaz], rpol[ir]))\n vispol[iaz + iel * nazim, ir] = 100.\n continue\n\n # DEM cells that contribute to the cells to set indset\n indcells = tuple([indaz[0][indr], indaz[1][indr]])\n\n ind = minvisvals[indcells] < el - del_offset\n if np.all(ind): # radar beam completely above min vis ang\n if ir > 0:\n if vispol[iaz + iel * nazim, ir - 1] == 100:\n vispol[iaz + iel * nazim, ir] = 100\n else:\n vispol[iaz + iel * nazim,\n ir] = vispol[iaz + iel * nazim, ir - 1]\n else:\n vispol[iaz + iel * nazim, ir] = 100.\n continue\n\n ind = minvisvals[indcells] > el + del_offset\n if np.all(ind): # radar beam completely below min vis ang\n continue\n\n # Calculate offsets in azimuth to the point P(rr,az)\n daz_area = azvals[indcells] - azpol[iaz]\n\n ind = daz_area > 180.\n daz_area[ind] = daz_area[ind] - 360.\n ind = daz_area < -180.\n daz_area[ind] = daz_area[ind] + 360.\n\n vis = 0.\n ind_rzero = rvals[indcells] <= 0.0\n\n if not np.any(ind_rzero):\n vis = vis_weighting(daz_vec, del_vec, daz_area, ant_weight,\n minvisvals[indcells], el)\n\n vis = (vis / ant_weight_total) * 100.\n\n # Set vis to all values inside the set area.\n if ir > 0:\n if vispol[iaz + iel * nazim, ir - 1] < vis:\n vispol[iaz + iel * nazim,\n ir] = vispol.data[iaz + iel * nazim,\n ir - 1]\n else:\n vispol[iaz + iel * nazim, ir] = vis\n else:\n vispol[iaz + iel * nazim, ir] = vis\n\n return vispol", "def merge_ranges():", "def transition(name, year, grids, control):\n ## SETUP -------------------------------------------------\n cc = control['cohorts'][name + '_Control']\n from_cohort_a0 = grids.area[name + '--0', year]\n from_cohort = grids.area[name, year]\n transitions_to = cc['transitions_to']\n to_cohort_a0 = grids.area[transitions_to + '--0', year]\n to_cohort = grids.area[transitions_to + '--0', year]\n ice_slope = \\\n grids.ice.get_ice_slope_grid( name )\\\n .reshape(grids.shape).astype(np.float32)\n ALD, PL = grids.ald['ALD', year], grids.ald[name ,year] \n AOI = grids.area.area_of_interest()\n POIn = grids.poi[name, year]\n POInm1 = grids.poi[name, year-1]\n drainage = grids.drainage.grid.reshape(grids.shape)\n above_idx = drainage == 'above'\n porosity = grids.ald.porosity[name]\n\n max_rot = cc['max_terrain_transition']\n\n if cc['POI_Function'] == 'Sigmoid2':\n params = np.array([\n cc['Parameters']['above']['sigmoid2_K'],\n cc['Parameters']['above']['sigmoid2_C'],\n cc['Parameters']['above']['sigmoid2_A'],\n cc['Parameters']['above']['sigmoid2_B'],\n cc['Parameters']['below']['sigmoid2_K'],\n cc['Parameters']['below']['sigmoid2_C'],\n cc['Parameters']['below']['sigmoid2_A'],\n cc['Parameters']['below']['sigmoid2_B'],\n ]).astype(np.float32)\n poi_func = calc_new_sig2_poi\n elif cc['POI_Function'] == 'Sigmoid':\n params = np.array([\n cc['Parameters']['above']['sigmoid_A1'],\n cc['Parameters']['above']['sigmoid_A2'],\n cc['Parameters']['above']['sigmoid_x0'],\n cc['Parameters']['above']['sigmoid_dx'],\n cc['Parameters']['below']['sigmoid_A1'],\n cc['Parameters']['below']['sigmoid_A2'],\n cc['Parameters']['below']['sigmoid_x0'],\n cc['Parameters']['below']['sigmoid_dx'],\n ]).astype(np.float32)\n poi_func = calc_new_sig_poi\n elif cc['POI_Function'] == 'Hill':\n params = np.array([\n cc['Parameters']['above']['hill_B'],\n cc['Parameters']['above']['hill_N'],\n cc['Parameters']['below']['hill_B'],\n cc['Parameters']['below']['hill_N'],\n ]).astype(np.float32)\n poi_func = calc_new_hill_poi\n elif cc['POI_Function'] == 'Linear':\n params = np.array([\n cc['Parameters']['above']['linear_a'],\n cc['Parameters']['above']['linear_b'],\n cc['Parameters']['below']['linear_a'],\n cc['Parameters']['below']['linear_b'],\n ]).astype(np.float32)\n poi_func = calc_linear_linear_poi\n else:\n raise KeyError(\"Not a valid function type\")\n\n present = from_cohort > 0\n pl_breach = ALD >= PL\n current_cell_mask = np.logical_and(np.logical_and(AOI, present), pl_breach)\n\n ## work ---------------\n blocks = (32, 32)\n threads = (\n int(np.ceil(ALD.shape[0] / blocks[0])),\n int(np.ceil(ALD.shape[1] / blocks[1]))\n )\n \n X = np.zeros(ALD.shape)\n calc_x[blocks, threads](X, ALD,PL)#.astype(np.float32)\n\n \n new_poi = np.zeros(X.shape)\n poi_func(new_poi, params, X, above_idx)\n\n\n update_poi[blocks, threads](POIn, POInm1, new_poi, current_cell_mask)\n \n\n # not cuda'd\n ALD[current_cell_mask] = \\\n ALD[current_cell_mask] + \\\n (ALD[current_cell_mask] - PL[ current_cell_mask ] ) * porosity\n\n rate_of_transition = np.zeros(POIn.shape) \n calc_rot[blocks, threads](rate_of_transition, POIn, ice_slope, max_rot)\n\n change = np.zeros(POIn.shape) \n calc_change[blocks, threads](\n change, rate_of_transition, from_cohort, present\n )\n \n # not cuda'd\n to_cohort_a0[present] = to_cohort[present] + change[present]\n from_cohort_a0[present] = from_cohort[present] - change[present]", "def show_colormaps():\n maps = sorted(cmlist)\n nmaps = len(maps) + 1\n\n a = np.linspace(0, 1, 256).reshape(1, -1) # pylint: disable=E1103\n a = np.vstack((a, a))\n\n fig = plt.figure(figsize=(5, 10))\n fig.subplots_adjust(top=0.99, bottom=0.01, left=0.2, right=0.99)\n for i, name in enumerate(maps):\n ax = plt.subplot(nmaps, 1, i + 1)\n plt.axis(\"off\")\n plt.imshow(a, aspect='auto', cmap=get_cmap(name), origin='lower')\n pos = list(ax.get_position().bounds)\n fig.text(pos[0] - 0.01, pos[1], name, fontsize=10,\n horizontalalignment='right')\n\n plt.show()", "def show_umap_bokeh(data, metadata, color_field=None,\n n_neighbors=10, min_dist=0.001, metric='euclidean'):\n if color_field is None:\n dims = 3\n else:\n dims = 2\n um = umap.UMAP(n_neighbors=n_neighbors, n_components=dims,\n min_dist=min_dist, metric=metric)\n vis = um.fit_transform(data)\n\n if color_field is None:\n color = umap_color(vis[:, 2], None, 20)\n color_field = \"Third UMAP dimension\"\n else:\n color = umap_color(metadata, color_field, 20, dtype=int)\n\n scatter_data = pandas.DataFrame({\n 'umap_1': vis[:, 0],\n 'umap_2': vis[:, 1],\n 'color': color,\n 'htid': list(metadata.index),\n 'title': ['<br>'.join(textwrap.wrap(t))\n for t in metadata['title']],\n 'author': list(metadata['author']),\n 'pub_date': list(metadata['pub_date'])\n })\n\n plot_figure = figure(\n title=('UMAP Projection of Phasor vectors for ~1000 random '\n 'HathiTrust volumes (colored by {})'.format(color_field)),\n plot_width=800,\n plot_height=800,\n tools=('pan, wheel_zoom, tap, reset')\n )\n\n plot_figure.add_tools(HoverTool(\n tooltips=(\n \"<div><span style='font-size: 10px'>@htid{safe}</span></div>\"\n \"<div><span style='font-size: 10px'>@author{safe}</span></div>\"\n \"<div><span style='font-size: 10px'>@title{safe}</span></div>\"\n \"<div><span style='font-size: 10px'>@pub_date{safe}</span></div>\"\n )\n ))\n\n plot_figure.circle(\n 'umap_1',\n 'umap_2',\n color='color',\n source=scatter_data,\n )\n\n tap = plot_figure.select(type=TapTool)\n tap.callback = OpenURL(\n url='https://babel.hathitrust.org/cgi/pt?id=@htid{safe}'\n )\n show(plot_figure)", "def filter_plot(mode, country, continent, start_date, end_date, options):\n # Default is World mode\n chart_data = world_daywise_df\n map_data = countries_daywise_df\n print(country, continent)\n if mode == SelectionMode.Continents.value:\n #Continents mode\n if not isinstance(continent, list):\n continent = [continent]\n\n chart_data = continents_daywise_df[continents_daywise_df['WHO Region'].isin(continent)]\n map_data = map_data[map_data['WHO Region'].isin(continent)]\n elif mode == SelectionMode.Countries.value:\n # Countries mode\n if not isinstance(country, list):\n country = [country]\n\n chart_data = countries_daywise_df[countries_daywise_df['Country/Region'].isin(country)]\n map_data = chart_data\n\n chart_data = chart_data.query('Date >= @start_date & Date <= @end_date')\n map_data = map_data.query('Date >= @start_date & Date <= @end_date')\n\n # fix error when groupby geometry or put it in the aggregate column\n temp = map_data.drop(['geometry', 'country_code', 'Date'], axis=1).groupby(['Country/Region']).agg(metrics).reset_index()\n map_data = join_country_code_data(temp, country_code_data)\n\n if is_perCapita(options):\n for metric in ['Confirmed', 'Deaths', 'Recovered']:\n chart_data[metric + '_per_capita'] = chart_data[metric] / chart_data['Population']\n map_data[metric + '_per_capita'] = map_data[metric] / map_data['Population']\n \n if is_perCapita(options):\n return plot(chart_data, 'Confirmed_per_capita', 'Confirmed Cases Per Capita'), \\\n plot(chart_data, 'Deaths_per_capita', 'Confirmed Deaths Per Capita'), \\\n plot(chart_data, 'Recovered_per_capita', 'Confirmed Recoveries Per Capita'), \\\n generate_map(map_data)\n\n return plot(chart_data, 'Confirmed', 'Confirmed Cases'), \\\n plot(chart_data, 'Deaths', 'Confirmed Deaths'), \\\n plot(chart_data, 'Recovered', 'Confirmed Recoveries'), \\\n generate_map(map_data)", "def display_map(data_map, clear):\n # if clear:\n #clear_output()\n\n # Check which player have to play and define displaying constants.\n player = 'player' + str((data_map['main_turn'] % 2) + 1)\n ennemy = 'player' + str(2 - (data_map['main_turn'] % 2))\n ui_color = data_map[player + 'info'][0]\n\n data_cell = {'ui_color': ui_color}\n\n # Generate the units to be displayed.\n for i in range(1, data_map['map_size'] + 1):\n for j in range(1, data_map['map_size'] + 1):\n\n # Coloration black/white of the cells.\n background_cell = ''\n if (i + j) % 2 == 0:\n background_cell = Back.WHITE\n\n if (i, j) in data_map['player1']:\n data_cell['(' + str(i) + ',' + str(j) + ')'] = data_map['player1'][(i, j)][1] + background_cell + ' ☻' + str(data_map['player1'][(i, j)][0]) + (str(data_map['player1'][(i, j)][2]) + ' ')[:2]\n elif (i, j) in data_map['player2']:\n data_cell['(' + str(i) + ',' + str(j) + ')'] = data_map['player2'][(i, j)][1] + background_cell + ' ☻' + str(data_map['player2'][(i, j)][0]) + (str(data_map['player2'][(i, j)][2]) + ' ')[:2]\n else:\n data_cell['(' + str(i) + ',' + str(j) + ')'] = background_cell + (' ' * 5)\n\n # Generate the statistics to be displayed.\n player1_cell = data_map[player].keys()\n cell1_couter = 0\n player2_cell = data_map[ennemy].keys()\n cell2_couter = 0\n unit_name = {'E': 'Elf', 'D': 'Dwarf'}\n\n for i in range(1, 5):\n for j in range(1, 3):\n if len(player1_cell) > cell1_couter:\n data_cell['stat' + str(i) + str(j)] = (('0' + str(player1_cell[cell1_couter][0]))[-2:] + '-' + ('0' + str(player1_cell[cell1_couter][1]))[-2:] + ' ' + unit_name[data_map[player][player1_cell[cell1_couter]][0]] + ' hp: ' + str(data_map[player][player1_cell[cell1_couter]][2]) + ' ' * 20)[:20]\n cell1_couter += 1\n else:\n data_cell['stat' + str(i) + str(j)] = ' ' * 20\n for j in range(3, 5):\n if len(player2_cell) > cell2_couter:\n data_cell['stat' + str(i) + str(j)] = (('0' + str(player2_cell[cell2_couter][0]))[-2:] + '-' + ('0' + str(player2_cell[cell2_couter][1]))[-2:] + ' ' + unit_name[data_map[ennemy][player2_cell[cell2_couter]][0]] + ' hp: ' + str(data_map[ennemy][player2_cell[cell2_couter]][2]) + ' ' * 20)[:20]\n cell2_couter += 1\n else:\n data_cell['stat' + str(i) + str(j)] = ' ' * 20\n\n # Generate the title of the map to be displayed.\n data_cell['turn'] = str(data_map['main_turn']/2 + 1)\n data_cell['playername'] = data_map[player + 'info'][1]\n data_cell['blank'] = ((data_map['map_size'] * 5) - 19 - len(data_cell['turn']) - len(data_cell['playername'])) * ' '\n\n # Print the top of the UI.\n for line in data_map['data_ui']:\n print line % data_cell", "def plotRegionFromMCOOLS(filepath:str,resolution:int,genome_coord1:str,genome_coord2=None,\n if_log=False,balance=False,title=\"Matrix\",plotType=\"static\",range_color=None,color_continuous_scale = None):\n import plotly.express as px \n from IPython.display import Image\n import re\n import cooler\n import numpy as np\n\n cool = filepath+\"::/resolutions/\"+str(resolution)\n\n if (genome_coord2 == None):\n genome_coord2 = genome_coord1\n c = cooler.Cooler(cool)\n matrix = c.matrix(balance=balance).fetch(genome_coord1,genome_coord2).astype(\"double\")\n\n if(if_log == True): \n matrix = np.log10(matrix+1)\n if(color_continuous_scale == None):\n color_continuous_scale = px.colors.sequential.Viridi \n fig = px.imshow(matrix,color_continuous_scale=color_continuous_scale,range_color=range_color)\n fig = fig.update_layout(title=title)\n fig = fig.update_layout(template='simple_white').update_layout(width=650,height=600)\n #fig = fig.update_layout(xaxis_title=genome_coord2,yaxis_title=genome_coord1)\n\n #manually change axis\n posx = re.split(\"[:-]\",genome_coord2)\n xvals = np.percentile([np.round(i) for i in range(0,matrix.shape[1])],(0,25,50,75,100),interpolation='midpoint')\n xtexts = xvals*resolution + int(posx[1].replace(\",\",\"\")) + resolution/2\n xtexts = [genomecoord2human(i) for i in xtexts]\n\n posy = re.split(\"[:-]\",genome_coord1)\n yvals = np.percentile([np.round(i) for i in range(0,matrix.shape[0])],(0,25,50,75,100),interpolation='midpoint')\n ytexts = yvals*resolution + int(posy[1].replace(\",\",\"\")) + resolution/2\n ytexts = [genomecoord2human(i) for i in ytexts]\n\n fig = fig.update_xaxes(ticktext = xtexts,tickvals = xvals).update_yaxes(ticktext = ytexts,tickvals = yvals)\n\n # static plot have better performance in jupyter\n if(plotType == \"interaction\"):\n return fig\n else : return Image(fig.to_image(format=\"png\", engine=\"kaleido\"))", "def subplot_contribution_map_list(\r\n self, contribution_map_list_list: List[aa.Array2D]\r\n ):\r\n contribution_maps = [\r\n contribution_map\r\n for contribution_map in contribution_map_list_list\r\n if contribution_map is not None\r\n ]\r\n\r\n number_subplots = len(contribution_maps)\r\n\r\n if number_subplots == 0:\r\n return\r\n\r\n self.open_subplot_figure(number_subplots=number_subplots)\r\n\r\n for contribution_map_array in contribution_maps:\r\n self.figure_contribution_map(contribution_map=contribution_map_array)\r\n\r\n self.mat_plot_2d.output.subplot_to_figure(\r\n auto_filename=\"subplot_contribution_map_list\"\r\n )\r\n\r\n self.close_subplot_figure()", "def create_map():\n pass\n # for line in range(0, shared.lines):\n # map_data[line][0] = (1, -1)\n # map_data[line][shared.columns - 1] = (1, -1)\n #\n # for column in range(0, shared.columns):\n # map_data[0, column] = (-1, 1)\n # # if column <= shared.left_space or column > shared.columns - shared.left_space:\n # map_data[shared.lines - 1, column] = (-1, 1)", "def merge(bound_map, min_symbol_map, eps, image):\n x, y, max_y, canvas, images, scale = 0, 0, 0, Image.new('RGB', (800, 1200), (255, 255, 255)), [], []\n for label in bound_map:\n img = crop_img(image, bound_map[label])\n images.append(img_resize(img, min_symbol_map[label]))\n scale.append(images[-1].width / img.width)\n eps = max(scale) * eps\n for image in images:\n if x + image.width + 4*eps > 800: # next row\n x = 0\n y = max_y\n max_y = 0\n canvas.paste(image, (x + 2 * eps, y + 2 * eps, x + 2 * eps + image.width, image.height + 2 * eps + y))\n if max_y < image.height + 2 * eps + y:\n max_y = image.height + 2 * eps + y\n x += 4 * eps + image.width\n\n return [canvas.crop(0, 0, 800, max(max_y, y)), eps]", "def _getColormapRange(self):\n item = self.item()\n if item is not None and self._colormap is not None:\n return self._colormap.getColormapRange(item)\n else:\n return 1, 100 # Fallback", "def overviewFigure(tile_change, output = False, show = True):\n \n import matplotlib.pyplot as plt\n \n # Load AGB, and update masks to exclude areas outisde forest definition. Good for visualisation\n AGB_t1 = tile_change.tile_t1.getAGB()\n AGB_t2 = tile_change.tile_t2.getAGB()\n \n # Mask out areas < 10 tC/ha\n AGB_t1 = np.ma.array(AGB_t1, mask = np.logical_or(AGB_t1.mask, AGB_t1 < 10.))\n AGB_t2 = np.ma.array(AGB_t2, mask = np.logical_or(AGB_t2.mask, AGB_t1 < 10.))\n \n AGB_change = AGB_t2 - AGB_t1\n #AGB_pcChange = 100 * (AGB_change / AGB_t1) # %\n \n # Calculate change type\n change_type = tile_change.getChangeType()\n change_code = tile_change.ChangeCode\n \n # Set minor loss and minor gain to nodata\n change_code[np.logical_or(change_code == 3, change_code == 4)] = 0\n change_code.mask[change_type.data == 0] = True\n \n fig = plt.figure(figsize = (7, 6))\n \n # Plot a map of AGB at t1\n ax1 = fig.add_subplot(2, 2, 1)\n buildMap(fig, ax1, AGB_t1, tile_change.lat, tile_change.lon, title = 'AGB %s'%str(tile_change.year_t1), cbartitle = 'tC/ha', vmin = 10., vmax = 40., cmap = 'YlGn')\n \n # Plot a map of AGB at t2\n ax2 = fig.add_subplot(2, 2, 2, sharex = ax1, sharey = ax1)\n buildMap(fig, ax2, AGB_t2, tile_change.lat, tile_change.lon, title = 'AGB %s'%str(tile_change.year_t2), cbartitle = 'tC/ha', vmin = 10., vmax = 40., cmap = 'YlGn') \n \n # Plot a map of absolute AGB change \n ax3 = fig.add_subplot(2, 2, 3, sharex = ax1, sharey = ax1)\n buildMap(fig, ax3, AGB_change, tile_change.lat, tile_change.lon, title = 'AGB change (%s-%s)'%(str(tile_change.tile_t1.year),str(tile_change.year_t2)),\n cbartitle = 'tC/ha', vmin = -10., vmax = 10., cmap = 'RdBu') \n \n # Plot a map of % AGB change\n ax4 = fig.add_subplot(2, 2, 4, sharex = ax1, sharey = ax1)\n buildMap(fig, ax4, change_code, tile_change.lat, tile_change.lon, title = 'Change type (%s-%s)'%(str(tile_change.tile_t1.year),str(tile_change.year_t2)),\n vmin = 1., vmax = 6., cmap = 'Spectral')\n \n plt.tight_layout()\n \n # Output image to png\n if output:\n output_pattern = tile_change.output_pattern.replace('.tif','.png')\n \n output_path = os.path.abspath(os.path.expanduser('%s/%s'%(tile_change.output_dir, output_pattern%('OverviewFigure'))))\n \n plt.savefig(output_path, dpi = 150)\n \n # Display image on screen\n if show:\n plt.show()\n \n plt.close()", "def idft_map(input_visibilities, output_map, input_uv):\n m, n = output_map.shape\n size = m * n\n\n x = Visibility.generate_xy(m, 1)\n y = Visibility.generate_xy(n, 1)\n\n x, y = np.meshgrid(x, y)\n x = x.reshape(size)\n y = y.reshape(size)\n\n im = np.zeros(size)\n\n for i in range(size):\n im[i] = (1 / input_visibilities.size) * np.sum(\n input_visibilities * np.exp(\n 2j * np.pi * (input_uv[0, :] * x[i] + input_uv[1, :] * y[i])))\n\n return im.reshape(m, n)", "def summarize_ranges(self, ranges):\n if len(ranges) == 0: return []\n min_ = 'min'\n max_ = 'max'\n for r in ranges:\n if r[0][0] == \"min\":\n r[0][0] = min_\n else:\n min_ = r[0][0]\n if r[-1][1] == \"max\":\n r[-1][1] = max_\n else:\n max_ = r[-1][1]\n return ranges[-1]", "def plot_map(priors):\n sns.set_style(\"white\")\n\n cmap=sns.cubehelix_palette(8, start=.5, rot=-.75,as_cmap=True)\n hdulists=list(map(lambda prior:postmaps.make_fits_image(prior,prior.sim), priors))\n fig = plt.figure(figsize=(10*len(priors),10))\n figs=[]\n for i in range(0,len(priors)):\n figs.append(aplpy.FITSFigure(hdulists[i][1],figure=fig,subplot=(1,len(priors),i+1)))\n\n for i in range(0,len(priors)):\n vmin=np.min(priors[i].sim)\n vmax=np.max(priors[i].sim)\n figs[i].show_colorscale(vmin=vmin,vmax=vmax,cmap=cmap)\n figs[i].show_markers(priors[i].sra, priors[i].sdec, edgecolor='black', facecolor='black',\n marker='o', s=20, alpha=0.5)\n figs[i].tick_labels.set_xformat('dd.dd')\n figs[i].tick_labels.set_yformat('dd.dd')\n figs[i].add_colorbar()\n figs[i].colorbar.set_location('top')\n return figs,fig", "def part1(problem_input: Iterable[str]) -> int:\n height_map = [[int(n) for n in s.strip()] for s in problem_input]\n visible: set[tuple[int, int]] = set()\n\n # From the West\n for y in range(0, len(height_map)):\n visible.add((0, y))\n for x in range(1, len(height_map[0])):\n if height_map[y][x] > max(height_map[y][:x]):\n visible.add((x, y))\n\n # From the East\n for y in range(0, len(height_map)):\n visible.add((len(height_map[0]) - 1, y))\n for x in range(len(height_map[0]) - 2, -1, -1):\n if height_map[y][x] > max(height_map[y][x + 1 :]):\n visible.add((x, y))\n\n # From the North\n for x in range(0, len(height_map[0])):\n visible.add((x, 0))\n for y in range(1, len(height_map)):\n if height_map[y][x] > max(c[x] for c in height_map[:y]):\n visible.add((x, y))\n\n # From the South\n for x in range(0, len(height_map[0])):\n visible.add((x, len(height_map) - 1))\n for y in range(len(height_map) - 2, -1, -1):\n if height_map[y][x] > max(c[x] for c in height_map[y + 1 :]):\n visible.add((x, y))\n\n return len(visible)", "def _process_out_for_gui(prediction, argmax):\n out = torch.zeros_like(prediction)\n # Create a curbs mask\n mask = argmax == 1\n # Fill seg 1 (curbs) with those in the mask\n out[1] = torch.where(mask, prediction[1], out[2])\n\n # Create a curb cuts mask\n mask = argmax == 2\n # Fill seg 2 (curb cuts) with those in the mask\n out[2] = torch.where(mask, prediction[2], out[2])\n\n # Memory saving\n del mask\n\n return out", "def _get_display_range(image): # pragma: no cover\n ip = _get_image_properties(image)\n immin, immax = np.min(image), np.max(image)\n if ip.signed:\n magnitude = max(abs(immin), abs(immax))\n lo, hi = -magnitude, magnitude\n cmap = _diverging_colormap\n elif any(ip):\n _raise_warnings(ip)\n lo, hi = immin, immax\n cmap = _nonstandard_colormap\n else:\n lo = 0\n imtype = image.dtype.type\n hi = dtype_range[imtype][1]\n cmap = _default_colormap\n return lo, hi, cmap", "def countmap(band,skypos,tranges,skyrange,width=False,height=False,\n\t\t\t verbose=0,tscale=1000.,memlight=False,hdu=False,retries=20):\n\timsz = gxt.deg2pix(skypos,skyrange)\n\tcount = np.zeros(imsz)\n\tfor trange in tranges:\n\t\t# If memlight is requested, break the integration into\n\t\t# smaller chunks.\n\t\tstep = memlight if memlight else trange[1]-trange[0]\n\t\tfor i in np.arange(trange[0],trange[1],step):\n\t\t\tt0,t1=i,i+step\n\t\t\tif verbose:\n\t\t\t\tprint_inline('Coadding '+str(t0)+' to '+str(t1))\n\t\t\tevents = gQuery.getArray(gQuery.rect(band,skypos[0],skypos[1],t0,t1,\n\t\t\t\t\t\t\t\t\t\t\t\t skyrange[0],skyrange[1]),\n\t\t\t\t\t\t\t\t\t verbose=verbose,retries=retries)\n\n\t\t\t# Check that there is actually data here.\n\t\t\tif not events:\n\t\t\t\tif verbose>1:\n\t\t\t\t\tprint \"No data in \"+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\ttimes = np.array(events,dtype='float64')[:,0 ]/tscale\n\t\t\tcoo =\tnp.array(events,dtype='float64')[:,1:]\n\n\t\t\t# If there's no data, return a blank image.\n\t\t\tif len(coo)==0:\n\t\t\t\tif verbose:\n\t\t\t\t\tprint 'No data in this frame: '+str([t0,t1])\n\t\t\t\tcontinue\n\n\t\t\t# Define World Coordinate System (WCS)\n\t\t\twcs = define_wcs(skypos,skyrange,width=False,height=False)\n\n\t\t\t# Map the sky coordinates onto the focal plane\n\t\t\tfoc = wcs.sip_pix2foc(wcs.wcs_world2pix(coo,1),1)\n\n\t\t\t# Bin the events into actual image pixels\n\t\t\tH,xedges,yedges=np.histogram2d(foc[:,1]-0.5,foc[:,0]-0.5,\n\t\t\t\t\t\t\t\tbins=imsz,range=([ [0,imsz[0]],[0,imsz[1]] ]))\n\t\t\tcount += H\n\n\treturn count", "def get_data(f, zoom_level, start_pos_1, end_pos_1, start_pos_2, end_pos_2):\n \n c = cooler.Cooler(f[str(zoom_level)])\n \n (chroms, chrom_sizes, chrom_cum_lengths) = get_chromosome_names_cumul_lengths(c)\n \n i0 = abs_coord_2_bin(c, start_pos_1, chroms, chrom_cum_lengths, chrom_sizes)\n i1 = abs_coord_2_bin(c, end_pos_1, chroms, chrom_cum_lengths, chrom_sizes)\n j0 = abs_coord_2_bin(c, start_pos_2, chroms, chrom_cum_lengths, chrom_sizes)\n j1 = abs_coord_2_bin(c, end_pos_2, chroms, chrom_cum_lengths, chrom_sizes)\n \n pixels = c.matrix(as_pixels=True, balance=False, max_chunk=np.inf)[i0:i1+1, j0:j1+1]\n \n if not len(pixels):\n return pd.DataFrame(columns=['genome_start1', 'genome_start2', 'balanced'])\n \n bins = c.bins()[['chrom', 'start', 'end', 'weight']]\n pixels = annotate(pixels, bins)\n\n pixels['genome_start1'] = chrom_cum_lengths[pixels['chrom1']] + pixels['start1']\n pixels['genome_start2'] = chrom_cum_lengths[pixels['chrom2']] + pixels['start2']\n pixels['balanced'] = (\n pixels['count'] * pixels['weight1'] * pixels['weight2']\n )\n \n return pixels[['genome_start1', 'genome_start2', 'balanced']]", "def __set_mask_regions(self):\n self.bottom_clip = np.int32(np.int32([[[60,0], [1179,0], [1179,650], [60,650]]]))\n self.roi_clip = np.int32(np.int32([[[640, 425], [1179,550], [979,719],\n [299,719], [100, 550], [640, 425]]]))", "def _mask(self, map_):\n return None", "def __init__(self, costmap):\n # Copy the map metadata\n self.resolution = costmap.info.resolution\n self.min_x = costmap.info.origin.position.x\n self.min_y = costmap.info.origin.position.y\n self.y_width = costmap.info.height\n self.x_width = costmap.info.width\n self.max_x = self.min_x + self.x_width *self.resolution\n self.max_y = self.min_y + self.y_width *self.resolution\n print self.min_x, self.min_y\n print self.max_x, self.max_y\n print \"Resolution: \", self.resolution\n print self.x_width, self.y_width\n \n\n self.motion = self.get_motion_model()\n \n # Copy the actual map data from the map\n x = 0\n y = 0\n ox = list()\n oy = list()\n # obstacle map generation\n self.obstacle_map = [[False for _ in range(self.y_width)]\n for _ in range(self.x_width)]\n obstacles = 0\n for value in costmap.data:\n if value >95:\n obstacles += 1\n self.obstacle_map[x][y] = True\n ox.append(float(x)*self.resolution +self.min_x)\n oy.append(float(y)*self.resolution +self.min_y)\n # Update the iterators\n x += 1\n if x == self.x_width:\n x = 0\n y += 1\n print \"Loaded %d obstacles\"%(obstacles)\n if show_animation: # pragma: no cover\n plt.plot(ox, oy, \".k\")\n plt.grid(True)\n \n # plt.axis(\"equal\")", "def generate_callbacks(self, app, cache) -> None:\n super().generate_callbacks(app, cache)\n\n @cache.memoize(timeout=60 * 60)\n def get_plot_data(smile: str) -> pd.DataFrame:\n client = get_contribs_client()\n contributions = client.query_contributions(\n query={\n \"project\": \"open_catalyst_project\",\n \"data__adsorbateSmiles__exact\": smile,\n },\n fields=[\"identifier\", \"data.bulkFormula\", \"data.adsorptionEnergy\"],\n paginate=True,\n )\n records = [\n {\n \"formula\": resp[\"data\"][\"bulkFormula\"],\n \"identifier\": resp[\"identifier\"],\n \"energy\": resp[\"data\"][\"adsorptionEnergy\"][\"value\"],\n }\n for resp in contributions[\"data\"]\n ]\n return pd.DataFrame(records)\n\n @app.callback(\n Output(self.id(\"heat_map\"), \"figure\"),\n Input(self.get_kwarg_id(\"smiles\"), \"value\"),\n Input(self.get_kwarg_id(\"targetE\"), \"value\"),\n Input(self.get_kwarg_id(\"range_E\"), \"value\"),\n Input(self.id(\"tabs\"), \"value\"),\n )\n @cache.memoize(timeout=60 * 60 * 24)\n def update_figure(smile, mid_E, range_E, active_tab):\n # guard statement to ensure callback is not triggered unless viewing visualization\n if active_tab != \"visualization\":\n raise PreventUpdate\n\n smile = smile[0]\n df = get_plot_data(smile)\n df_full, df_min_E = self.modify_df(df)\n return self.get_plot(df_full, df_min_E, mid_E, range_E, 1)\n\n @app.callback(\n Output(self.id(\"display_table\"), \"children\"),\n Input(self.id(\"heat_map\"), \"clickData\"),\n )\n def display_click_data(clickData):\n if clickData is None:\n table = ctl.get_data_list(\n {\n \"Elements\": \"None\",\n \"Number of Calculations\": \"None\",\n \"Calculations\": \"None\",\n }\n )\n else:\n el1 = str(clickData[\"points\"][0][\"x\"])\n el2 = str(clickData[\"points\"][0][\"y\"])\n el_combo = el1 if el1 == el2 else el1 + \", \" + el2\n randids = clickData[\"points\"][0][\"text\"].split(\"-\")\n num_calcs = str(len(randids) - 1)\n table = ctl.get_data_list(\n {\n \"Elements\": el_combo,\n \"Number of Calculations\": num_calcs,\n \"Calculations\": [\n html.Div(\n dcc.Link(rand_id_now, href=\"/catalysis/\" + rand_id_now)\n )\n for rand_id_now in randids\n ],\n }\n )\n return table", "def flux_map(cobra_model,\n excluded_metabolites=None, excluded_reactions=None,\n excluded_compartments=None, display_name_format=True,\n overwrite_reversibility=True, **kwargs):\n\n # Initialize empty map_info field in object notes\n for obj in itertools.chain([cobra_model], cobra_model.metabolites,\n cobra_model.reactions):\n if 'map_info' not in obj.notes:\n obj.notes['map_info'] = {}\n\n # build cofactor metabolites from strings\n cobra_metabolites = []\n if excluded_metabolites:\n compartments = set((m.compartment for m in cobra_model.metabolites))\n metabolite_list = [\n cf + '_' + co for cf, co in itertools.product(\n excluded_metabolites, compartments)] + excluded_metabolites\n for cofactor in metabolite_list:\n try:\n cobra_metabolites += [\n cobra_model.metabolites.get_by_id(cofactor)]\n except KeyError:\n pass\n # what if its already a cobra metabolite?\n\n cobra_rxns = []\n if excluded_reactions:\n for rxnid in excluded_reactions:\n try:\n cobra_rxns += [\n cobra_model.reactions.get_by_id(rxnid)]\n except KeyError:\n pass\n\n # Exclude metabolites and reactions in the given comparment\n excluded_metabolites = set(cobra_metabolites)\n excluded_reactions = set(cobra_rxns)\n\n if excluded_compartments:\n met_compartments = set((m for m in cobra_model.metabolites.query(\n lambda x: set(x.compartment).intersection(\n set(excluded_compartments)), None)))\n excluded_metabolites |= met_compartments\n\n # Do I want to redo this not to include excluded metabolites?\n # rxn_compartments = set((r for r in cobra_model.reactions.query(\n # lambda x: set(x.compartments).intersection(\n # set(excluded_compartments)), None)))\n # excluded_reactions |= rxn_compartments\n\n # for reaction in excluded_reactions:\n # reaction.notes['map_info'] = {'hidden': True}\n\n for metabolite in excluded_metabolites:\n metabolite.notes['map_info'] = {'hidden': True}\n\n def is_hidden(obj):\n try:\n return bool(obj.notes['map_info']['hidden'])\n except KeyError:\n return False\n\n for reaction in cobra_model.reactions:\n\n if overwrite_reversibility:\n reaction.notes['map_info']['reversibility'] = \\\n bool(reaction.reversibility)\n\n # Unless 'hidden' specifically set to False, hide the reaction if all\n # the reactants or products are hidden (excluding cofactors)\n if ('hidden' not in reaction.notes['map_info']) & ~is_hidden(reaction):\n\n # Hide reactions if all of their products or reactants are hidden.\n # Don't include cofactor metabolites in this calculation.\n if 'cofactors' in reaction.notes['map_info']:\n cofactors = reaction.notes['map_info']['cofactors'].keys()\n else:\n cofactors = {}\n\n if (all([is_hidden(met) for met in reaction.reactants\n if met.id not in cofactors]) or\n all([is_hidden(met) for met in reaction.products\n if met.id not in cofactors])):\n\n reaction.notes['map_info']['hidden'] = True\n\n # Add diplay names to the cobra metabolites accoring to the\n # display_name_format function\n if display_name_format:\n\n # Handle the case for a default display name formatter. This is\n # optimized for models using the typical bigg_id naming convention,\n # ending with 'ID_c' compartment identifier.\n if display_name_format is True:\n display_name_format = (\n lambda met: re.sub('__[D,L]', '', met.id[:-2].upper()))\n\n for met in cobra_model.metabolites:\n\n # Don't overwrite existing display names\n if 'display_name' not in met.notes['map_info']:\n met.notes['map_info']['display_name'] = (\n display_name_format(met))\n\n # Append model's map_info kwargs\n render_kwargs = dict(cobra_model.notes['map_info'])\n render_kwargs.update(kwargs)\n\n return render_model(cobra_model, **render_kwargs)", "def split_tiles(module_data):\n raise NotImplementedError", "def mapRange(num, min1, max1, min2, max2, clamp=True):\n if(clamp and num < min1):\n return min2\n if(clamp and num > max1):\n return max2\n\n num1 = (num - min1) / (max1 - min1)\n num2 = (num1 * (max2 - min2)) + min2\n return num2", "def plot_HDres_histos_vs_z(\n df,\n nameout,\n threshold_var=\"class0\",\n threshold_list=[0.5, 0.7, 0.9],\n threshold_sign=\">\",\n):\n\n P = df[df[\"class0\"] > 0.5]\n Ias = df[df[\"target\"] == 0]\n\n TP = P[P[\"target\"] == 0]\n FP = P[P[\"target\"] != 0]\n\n sel_TP_dic = {}\n sel_FP_dic = {}\n for t in threshold_list:\n if threshold_sign == \">\":\n sel_TP_dic[t] = TP[TP[threshold_var] > t]\n sel_FP_dic[t] = FP[FP[threshold_var] > t]\n else:\n sel_TP_dic[t] = TP[TP[threshold_var] < t]\n sel_FP_dic[t] = FP[FP[threshold_var] < t]\n\n plt.clf()\n cm = CMAP\n fig = plt.figure(figsize=(14, 14))\n # gs = gridspec.GridSpec(4, 2, width_ratios=[3, 1], height_ratios=[2, 2, 1, 1])\n # gs.update(wspace=0.1, hspace=0.3)\n\n # # gridspec init\n # ax00 = plt.subplot(gs[0, 0]) # Hres Ia\n # ax10 = plt.subplot(gs[1, 0], sharex=ax00) # Hres CC\n # ax20 = plt.subplot(gs[2:, 0], sharex=ax00) # efficiency\n # ax01 = plt.subplot(gs[0, 1], sharey=ax00) # histo Ia\n # ax11 = plt.subplot(gs[1, 1], sharey=ax10) # histo CC\n # ax21 = plt.subplot(gs[2, 1]) # histo x1\n # ax31 = plt.subplot(gs[3, 1]) # histo c\n gs = gridspec.GridSpec(3, 3, height_ratios=[2, 2, 1])\n # gs.update(wspace=0.2, hspace=0.1)\n\n # gridspec init\n ax00 = plt.subplot(gs[0, 0:2]) # Hres Ia\n ax10 = plt.subplot(gs[1, 0:2], sharex=ax00) # Hres CC\n ax20 = plt.subplot(gs[2, 0]) # redshift dist\n ax01 = plt.subplot(gs[0, 2], sharey=ax00) # histo Ia\n ax11 = plt.subplot(gs[1, 2], sharey=ax10) # histo CC\n ax21 = plt.subplot(gs[2, 1]) # histo x1\n ax31 = plt.subplot(gs[2, 2]) # histo c\n\n # lines\n ax00.plot([0, 1.2], np.zeros(len([0, 1.2])), \"k:\")\n ax10.plot([0, 1.2], np.zeros(len([0, 1.2])), \"k:\")\n\n mubins = np.arange(-2, 2 + 0.1, 0.1)\n\n # Hres w. histogram\n def HRwhisto(\n df, sel_dic, ax_left, ax_right, threshold_sign, ylabel=\"TP\", visible=False\n ):\n if ylabel == \"TP\":\n sntyp = \"Ia\"\n else:\n sntyp = \"CC\"\n ax_left.scatter(\n df[\"SIM_REDSHIFT_CMB\"],\n df[\"delmu\"],\n c=df[\"class0\"],\n cmap=CMAP,\n vmin=0.5,\n vmax=1,\n s=8,\n )\n ax_left.errorbar(\n df[\"SIM_REDSHIFT_CMB\"],\n df[\"delmu\"],\n yerr=df[\"delmu_err\"],\n color=\"gray\",\n zorder=0,\n fmt=\"none\",\n marker=\"none\",\n )\n\n ax_left.set_ylim(-2, 2)\n ax_left.set_xlim(0, 1.2)\n ax_left.set_ylabel(f\"{ylabel} residual\", fontsize=18)\n ax_left.tick_params(labelsize=14)\n plt.setp(ax_left.get_xticklabels(), visible=visible)\n if visible is True:\n ax_left.set_xlabel(\"simulated redshift\", fontsize=18)\n for t in threshold_list:\n sel = sel_dic[t]\n n_SNe = len(sel)\n ax_right.hist(\n sel[\"delmu\"],\n orientation=\"horizontal\",\n histtype=\"step\",\n color=cm(t),\n bins=mubins,\n density=True,\n label=f\"{n_SNe} {sntyp} {threshold_sign} {t}\",\n lw=2,\n )\n ax_right.legend(loc=\"lower center\", prop={\"size\": 13})\n plt.setp(ax_right.get_yticklabels(), visible=False)\n plt.setp(ax_right.get_xticklabels(), visible=False)\n ax_right.plot(\n [ax_right.get_xlim()[0], ax_right.get_xlim()[1]],\n np.zeros(len([ax_right.get_xlim()[0], ax_right.get_xlim()[1]])),\n \"k:\",\n )\n\n HRwhisto(TP, sel_TP_dic, ax00, ax01, threshold_sign, ylabel=\"TP\", visible=False)\n HRwhisto(FP, sel_FP_dic, ax10, ax11, threshold_sign, ylabel=\"FP\", visible=True)\n\n # z histos\n n, bins_to_use, tmp = ax20.hist(\n Ias[\"SIM_REDSHIFT_CMB\"], histtype=\"step\", color=\"black\", bins=15, lw=3\n )\n\n for t in threshold_list:\n sel_TP = sel_TP_dic[t]\n sel_FP = sel_FP_dic[t]\n ax20.hist(\n sel_TP[\"SIM_REDSHIFT_CMB\"], histtype=\"step\", color=cm(t), bins=bins_to_use\n )\n ax20.hist(\n sel_FP[\"SIM_REDSHIFT_CMB\"],\n histtype=\"step\",\n color=cm(t),\n linestyle=\"--\",\n bins=bins_to_use,\n )\n ax20.set_xlim(0, 1.2)\n ax20.tick_params(labelsize=14)\n ax20.set_xlabel(\"simulated redshift\", fontsize=18)\n\n # hist stretch\n n, bins_to_use, tmp = ax21.hist(Ias[\"x1\"], color=\"black\", histtype=\"step\", lw=3)\n for t in threshold_list:\n sel_TP = sel_TP_dic[t]\n ax21.hist(\n sel_TP[\"x1\"],\n orientation=\"vertical\",\n histtype=\"step\",\n color=cm(t),\n bins=bins_to_use,\n lw=2,\n )\n ax21.set_xlabel(\"x1\", fontsize=18)\n ax21.yaxis.set_label_position(\"right\")\n ax21.set_xlim(-3, 3)\n ax21.tick_params(labelsize=14)\n # color histo\n n, bins_to_use, tmp = ax31.hist(Ias[\"c\"], color=\"black\", histtype=\"step\", lw=3)\n for t in threshold_list:\n sel_TP = sel_TP_dic[t]\n ax31.hist(\n sel_TP[\"c\"],\n orientation=\"vertical\",\n histtype=\"step\",\n color=cm(t),\n bins=bins_to_use,\n lw=2,\n )\n ax31.set_xlabel(\"c\", fontsize=18)\n ax31.set_xlim(-1, 1)\n ax31.tick_params(labelsize=14)\n ax31.yaxis.set_label_position(\"right\")\n\n gs.tight_layout(fig)\n plt.savefig(nameout)\n plt.close()\n del fig", "def define_areas(\n pixel_filtered_map: np.ndarray, district_heating_zone_threshold: float\n):\n structure = np.ones((3, 3)).astype(int)\n expanded_map = binary_dilation(input=pixel_filtered_map, structure=structure)\n eroded_map = binary_erosion(input=expanded_map, structure=structure)\n labels_array, n_label = measurements.label(\n input=eroded_map,\n structure=structure,\n )\n\n # labels start from 1, therefore the array size is 'num_labels_array + 1'\n areas_potential = np.zeros((n_label + 1)).astype(float)\n if n_label > 0:\n end, start, sorted_array = get_browsing_indexes(\n labels_array=labels_array,\n pixel_filtered_map=pixel_filtered_map,\n n_label=n_label,\n )\n\n for i, (start_index, end_index) in enumerate(zip(start, end)):\n area = sorted_array[start_index:end_index, 3]\n area_potential = np.sum(area)\n if area_potential >= district_heating_zone_threshold:\n # i+1 because labeling starts from 1 and not from 0\n # factor 0.001 for conversion from MWh/ha to GWh/ha\n areas_potential[i + 1] = np.around(np.sum(area_potential) / 1000, 2)\n\n areas = areas_potential[labels_array]\n filtered_map = pixel_filtered_map * (areas > 0).astype(int)\n total_potential = np.sum(areas_potential)\n return areas, filtered_map, total_potential, areas_potential[1:]", "def test_export_assesments_map_control(self, with_map):\n with factories.single_commit():\n audit = factories.AuditFactory()\n assessment = factories.AssessmentFactory(audit=audit)\n factories.RelationshipFactory(source=audit, destination=assessment)\n control = factories.ControlFactory()\n revision = models.Revision.query.filter(\n models.Revision.resource_id == control.id,\n models.Revision.resource_type == control.__class__.__name__\n ).order_by(\n models.Revision.id.desc()\n ).first()\n with factories.single_commit():\n snapshot = factories.SnapshotFactory(\n parent=audit,\n child_id=control.id,\n child_type=control.__class__.__name__,\n revision_id=revision.id\n )\n if with_map:\n factories.RelationshipFactory(source=snapshot, destination=assessment)\n if with_map:\n val = control.slug\n else:\n val = \"\"\n self.assertColumnExportedValue(val, assessment,\n \"map:control versions\")", "def figure_contribution_map(self, contribution_map: aa.Array2D):\r\n self.mat_plot_2d.plot_array(\r\n array=contribution_map,\r\n visuals_2d=self.get_visuals_2d(),\r\n auto_labels=aplt.AutoLabels(\r\n title=\"Contribution Map\", filename=\"contribution_map_2d\"\r\n ),\r\n )", "def _create_observation_mask(self):\n\n\n if self.BLUE_PARTIAL:\n centers, radii = [], []\n for agent in self._team_blue:\n if not agent.isAlive: continue\n centers.append(agent.get_loc())\n radii.append(agent.range)\n self._blue_mask = self._create_vision_mask(centers, radii)\n if self.TEAM_MEMORY == \"fog\":\n self.blue_memory = np.logical_and(self.blue_memory, self._blue_mask)\n else:\n self._blue_mask = np.zeros_like(self._static_map, dtype=bool)\n\n if self.RED_PARTIAL:\n centers, radii = [], []\n for agent in self._team_red:\n if not agent.isAlive: continue\n centers.append(agent.get_loc())\n radii.append(agent.range)\n self._red_mask = self._create_vision_mask(centers, radii)\n if self.TEAM_MEMORY == \"fog\":\n self.red_memory = np.logical_and(self.red_memory, self._red_mask)\n else:\n self._red_mask = np.zeros_like(self._static_map, dtype=bool)", "def display_map(grid):\n fig, ax = plt.subplots(figsize=(7, 7))\n\n major_ticks_x = np.arange(0, LENGTH_case + 1, 5)\n minor_ticks_x = np.arange(0, LENGTH_case + 1, 1)\n major_ticks_y = np.arange(0, WIDTH_case + 1, 5)\n minor_ticks_y = np.arange(0, WIDTH_case + 1, 1)\n ax.set_xticks(major_ticks_x)\n ax.set_xticks(minor_ticks_x, minor=True)\n ax.set_yticks(major_ticks_y)\n ax.set_yticks(minor_ticks_y, minor=True)\n ax.grid(which='minor', alpha=0.2)\n ax.grid(which='major', alpha=0.5)\n ax.set_ylim([0, WIDTH_case])\n ax.set_xlim([0, LENGTH_case])\n ax.grid(True)\n\n # Select the colors with which to display obstacles and free cells\n cmap = colors.ListedColormap(['white', 'red'])\n\n # Displaying the map\n ax.imshow(grid, cmap=cmap)\n plt.title(\"Map : free cells in white, occupied cells in red\");\n\n return fig, ax", "def resize_ranges(self, bp: int, chrom_sizes: Optional[Mapping[str, Numeric]] = None):\n table = self.data\n limits = {\"lower\": 0}\n if chrom_sizes:\n limits[\"upper\"] = self.chromosome.replace(chrom_sizes)\n table = table.assign(\n start=(table[\"start\"] - bp).clip(**limits),\n end=(table[\"end\"] + bp).clip(**limits),\n )\n if bp < 0:\n # Drop any bins that now have zero or negative size\n ok_size = table[\"end\"] - table[\"start\"] > 0\n logging.debug(\"Dropping %d bins with size <= 0\", (~ok_size).sum())\n table = table[ok_size]\n # Don't modify the original\n return self.as_dataframe(table.copy())", "def get_price_range_map(data_map):\n res_map = defaultdict(lambda: deepcopy(static_constants.UNIT_PRICE_DEFAULT))\n for key, list_of_price in data_map.items():\n list_of_price.sort()\n lower_price = np.percentile(list_of_price, 40)\n higher_price = np.percentile(list_of_price, 70)\n median_price = np.percentile(list_of_price, 50)\n res_map[key] = {'lower_price': lower_price, 'median_price': median_price, 'higher_price': higher_price}\n return res_map", "def get_heightmap(robot,\n heightmap_resolution,\n workspace_limits):\n color_img_set, depth_img_set = robot.get_camera_data()\n depth_img_set = depth_img_set * robot.cam_depth_scale # Apply depth scale from calibration\n color_heightmap, depth_heightmap = utils.get_heightmap(color_img_set, depth_img_set,\n robot.cam_intrinsics,\n robot.cam_pose, workspace_limits,\n heightmap_resolution)\n depth_heightmap[np.isnan(depth_heightmap)] = 0\n kernel = np.ones([3, 3])\n color_heightmap = cv2.dilate(color_heightmap, kernel, iterations=2)\n color_heightmap = cv2.erode(color_heightmap, kernel, iterations=2)\n valid_depth_heightmap = cv2.dilate(depth_heightmap, kernel, iterations=2)\n valid_depth_heightmap = cv2.erode(valid_depth_heightmap, kernel, iterations=2)\n return color_heightmap, valid_depth_heightmap", "def make_animation_subset_levels(X, fixed_axes, fixed_value_1, fixed_value_2,\n filtration_size):\n # Create the array indexes\n obj = [slice(None, None, None)] * 4\n obj[fixed_axes[0]] = fixed_value_1\n obj[fixed_axes[1]] = fixed_value_2\n # print obj\n\n # Create sequence of threshold values\n thresholds = np.linspace(start=np.amin(X[obj]), stop=np.amax(X[obj]), num=filtration_size)\n # print thresholds\n # TEST PLOT\n # fig, ax = plt.subplots()\n # # interp = kwargs.get('interpolation', 'none')\n # # colors = kwargs.get('colormap', 'seismic')\n # img0 = ax.imshow(X[obj], cmap='Blues', interpolation='none')\n # fig.colorbar(img0, ax=ax, fraction=0.022, pad=0.01)\n # ax.invert_yaxis()\n # # tx = ax.set_title('%s = %d' % (X.dimensions[sweep_axis], i))\n # fig.tight_layout()\n # fig.show()\n\n # def get_middle(xx):\n # return 1 - (float(np.amax(xx)) / (np.amax(xx) + abs(np.amin(xx))))\n\n def init():\n global fig, ax, im, tx\n fig = plt.figure()\n ax = plt.axes()\n # idx = list(obj)\n # idx[sweep_axis] = slice(None, None, None)\n # middle = get_middle(X[idx])\n # print obj\n im = ax.imshow(X[obj] < thresholds[2], cmap='Blues',#cmap=shiftedColorMap(cm.seismic, midpoint=middle),\n interpolation='none', aspect='auto')\n # vmin=np.amin(X[idx]), vmax=np.amax(X[idx]))\n ax.invert_yaxis()\n # cb = fig.colorbar(im)\n # tx = ax.set_title('%s = %d' % (X.dimensions[sweep_axis], i))\n return\n\n def animate(n):\n # update indexes\n # obj[sweep_axis] = n\n # vmax = np.max(X[obj])\n # vmin = np.min(X[obj])\n im.set_data(X[obj] < thresholds[n])\n # im.set_clim(vmin, vmax)\n # tx.set_text('%s = %d' % (X.dimensions[sweep_axis], n))\n return\n\n init()\n anim = animation.FuncAnimation(fig, animate, frames=np.arange(filtration_size), interval=100, blit=False)\n return anim", "def mi_canolty(lo, hi, f_lo, f_hi, fs=1000, w_lo=3, w_hi=3,\n filterfn=None, filter_kwargs=None, n_surr=100):\n\n lo, hi = pa_series(lo, hi, f_lo, f_hi, fs=fs, w_lo=w_lo, w_hi=w_hi,\n filterfn=filterfn, filter_kwargs=filter_kwargs)\n\n # Calculate modulation index\n pac = np.abs(np.mean(hi * np.exp(1j * lo)))\n\n # Calculate surrogate MIs\n pacS = np.zeros(n_surr)\n \n loj = np.exp(1j * lo)\n for s in range(n_surr):\n loS = np.roll(loj, np.random.randint(len(lo)))\n pacS[s] = np.abs(np.mean(hi * loS))\n\n # Return z-score of observed PAC compared to null distribution\n return (pac - np.mean(pacS)) / np.std(pacS)", "def conclusion_summary_map(self):\n pass", "def _multiple_values_max(self, maps, threshold):\r\n max_val = np.zeros((maps.shape[0], maps.shape[1]), dtype=np.float)\r\n for i in range(maps.shape[1]):\r\n cmin = np.min(maps[:,i])\r\n cmax = np.max(maps[:,i])\r\n limit = cmax - (cmax - cmin) * threshold[i]\r\n min_mask = maps[:,i] <= limit\r\n max_mask = maps[:,i] > limit\r\n # for an abundance map the delta is around [-1..1],\r\n # but it can be outside this interval, it's something\r\n # to test\r\n # a guard with a -10 value maybe ok.\r\n rmin = min_mask * -10\r\n max_val[:,i] = max_mask * maps[:,i] + rmin\r\n max_vec = np.max(max_val, axis=1)\r\n max_mask = max_vec > -10\r\n argmax = np.argmax(max_val, axis=1)\r\n return (argmax + 1) * max_mask", "def trans_30490_to_42840(self, df, cols):\n\n series_map_lst = []\n for group_id in self.group_ids:\n if type(group_id) == str:\n group_id = [group_id]\n\n tr = df.groupby(group_id)[cols].sum()\n if len(group_id) == 2:\n tr.index = pd.Series(tr.index.values).apply(lambda x: \"--\".join(x))\n\n series_map_lst.append(tr)\n \n res = pd.concat(series_map_lst)\n\n return res", "def contour_map(self, map, affine, **kwargs):\n self._map_show(map, affine, type='contour', **kwargs)", "def show_channels(chmaps, n_cols=8, normalize=None, ofpath=None):\n n_rows = (chmaps.shape[0] - 1) // n_cols + 1\n\n if n_rows == 1:\n n_cols = chmaps.shape[0]\n\n if normalize is None:\n vmin, vmax = None, None\n else:\n vmin, vmax = normalize\n\n fig = plt.figure()\n\n grid = AxesGrid(fig, 111,\n nrows_ncols=(n_rows, n_cols),\n axes_pad=0.0,\n share_all=True)\n\n for i, chmap in enumerate(chmaps):\n grid[i].imshow(chmap, vmin=vmin, vmax=vmax)\n\n grid.axes_llc.get_xaxis().set_ticks([])\n grid.axes_llc.get_yaxis().set_ticks([])\n\n if ofpath is None:\n plt.get_current_fig_manager().window.showMaximized()\n plt.show()\n else:\n fig.savefig(ofpath)\n plt.close(fig)", "def test_degrade_widemask_or(self):\n\n nside_coverage = 32\n nside_map = 256\n nside_map2 = 64\n sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map,\n WIDE_MASK, wide_mask_maxbits=7)\n sparse_map_or = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map2,\n WIDE_MASK, wide_mask_maxbits=7)\n # Fill some pixels in the \"high-resolution\" map\n pixel = np.arange(4000, 8000)\n sparse_map.set_bits_pix(pixel, [4])\n\n # Check which pixels will be full in the \"low-resolution\" map and fill them\n pixel2 = np.unique(np.right_shift(pixel, healsparse.utils._compute_bitshift(nside_map2, nside_map)))\n sparse_map_or.set_bits_pix(pixel2, [4])\n\n # Degrade with or\n sparse_map_test = sparse_map.degrade(nside_map2, reduction='or')\n\n # Check the results\n testing.assert_almost_equal(sparse_map_or._sparse_map, sparse_map_test._sparse_map)\n\n # Repeat for maxbits > 8\n sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map,\n WIDE_MASK, wide_mask_maxbits=16)\n sparse_map_or = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map2,\n WIDE_MASK, wide_mask_maxbits=16)\n # Fill some pixels in the \"high-resolution\" map\n pixel = np.arange(0, 1024)\n pixel = np.concatenate([pixel[:512], pixel[512::3]]).ravel()\n sparse_map.set_bits_pix(pixel, [4, 12])\n sparse_map.clear_bits_pix(pixel[:16], [4]) # set low value in the first pixel\n\n # Check which pixels will be full in the \"low-resolution\" map and fill them\n # Note that we are filling more than the ones that are going to be True\n # since we want to preserve the coverage_map\n pixel2_all = np.unique(np.right_shift(pixel,\n healsparse.utils._compute_bitshift(nside_map2, nside_map)))\n sparse_map_or.set_bits_pix(pixel2_all, [4, 12])\n\n # Get the pixel number of the bad pixels\n pixel2_bad = np.array([0])\n sparse_map_or.clear_bits_pix(pixel2_bad, [4]) # set low value in the first pixel\n\n # Degrade with or\n sparse_map_test = sparse_map.degrade(nside_map2, reduction='or')\n\n # Check the results\n testing.assert_almost_equal(sparse_map_test._sparse_map, sparse_map_or._sparse_map)\n\n # Test degrade-on-read\n self.test_dir = tempfile.mkdtemp(dir='./', prefix='TestHealSparse-')\n\n fname = os.path.join(self.test_dir, 'test_wide_degrade.hs')\n sparse_map.write(fname)\n\n sparse_map_test2 = healsparse.HealSparseMap.read(fname, degrade_nside=nside_map2, reduction='or')\n testing.assert_almost_equal(sparse_map_test2._sparse_map, sparse_map_or._sparse_map)", "def process_map_img(self, map, height, width, dtype=np.int8):\n img_in = np.array(map, dtype=np.uint8).reshape(height, width)\n img = img_in.copy()\n\n # NOTE(CH3): We might not need to do this conversion...\n img[(img_in == self.free_ros)] = self.free_img\n img[(img_in == self.unknown_ros)] = self.unknown_img\n img[(img_in == self.obstacle_ros)] = self.obstacle_img\n\n # Isolate layers\n free_mask = cv2.inRange(img, self.free_img, self.free_img)\n unknown_mask = cv2.inRange(img, self.unknown_img, self.unknown_img)\n obstacle_mask = cv2.inRange(img, self.obstacle_img, self.obstacle_img)\n\n # Process free layer\n if self.process_free:\n free_mask = cv2.erode(free_mask, self.kernel, iterations=self.iters)\n free_mask = cv2.dilate(free_mask, self.kernel, iterations=self.iters)\n\n # if True: # erode_free\n # free_mask = cv2.erode(free_mask, self.kernel, iterations=3)\n\n # Construct new map img\n new_img = make_img(img.shape, (self.unknown_ros), dtype=dtype)\n new_img[(free_mask != 0)] = self.free_ros\n new_img[(obstacle_mask != 0)] = self.obstacle_ros\n\n return new_img", "def _convert_to_not_cut(\n cls,\n source_indexes_not_cut_map: List[int],\n target_indexes_not_cut_map: List[int],\n source_to_targets_map: DIMENSION_MAP,\n ) -> DIMENSION_MAP:\n source_to_targets_map_not_cut = {}\n for source_index, target_indexes in source_to_targets_map.items():\n source_index_not_cut = source_indexes_not_cut_map[source_index]\n target_indexes_not_cut = list(map(lambda x: target_indexes_not_cut_map[x], target_indexes))\n source_to_targets_map_not_cut[source_index_not_cut] = target_indexes_not_cut\n return source_to_targets_map_not_cut", "def merge_maps(self, map_2d):\n x = map_2d.data.max(0, keepdim=True)[0]\n y = map_2d.data.max(1, keepdim=True)[0]\n return x, y", "def scaleRanges(ranges, dims=(0,1,2)):\n max_pos_span = np.max([ranges[dim][1] - ranges[dim][0] for dim in\n dims])\n for k in ranges:\n ranges[k] = list(ranges[k])\n\n for dim in dims:\n midpoint = 0.5 * (ranges[dim][1] + ranges[dim][0])\n # import pdb; pdb.set_trace()\n ranges[dim][1] = midpoint + 0.5 * max_pos_span\n ranges[dim][0] = midpoint - 0.5 * max_pos_span", "def eval_map(self, mode='segm'):\n if mode not in ['bbox', 'segm']:\n raise NotImplementedError(\"Mode [{}] doesn't been implemented, choose from [bbox, segm]!\".format(mode))\n\n # eval map\n Gt = COCO(self.gt_file)\n Dt = Gt.loadRes(self.dt_file)\n\n evalObj = COCOeval(Gt, Dt, mode)\n imgIds = sorted(Gt.getImgIds())\n evalObj.params.imgIds = imgIds\n evalObj.evaluate()\n evalObj.accumulate()\n evalObj.summarize()", "def _merge_boundaries(self):\n \n optical_seg = self._amalgamated_optical_segments\n if bool(optical_seg):\n optical_seg[\"catagory\"] = OPTICAL * tf.ones_like(\n optical_seg[\"x_start\"],\n dtype=tf.int64\n )\n self._optical_seg_count = tf.shape(\n optical_seg[\"x_start\"],\n out_type=tf.int64\n )[0]\n else:\n self._optical_seg_count = 0\n \n stop_seg = self._amalgamated_stop_segments\n if bool(stop_seg):\n stop_seg[\"catagory\"] = STOP * tf.ones_like(\n stop_seg[\"x_start\"],\n dtype=tf.int64\n )\n self._stop_seg_count = tf.shape(\n stop_seg[\"x_start\"],\n out_type=tf.int64\n )[0]\n else:\n self._stop_seg_count = 0\n \n target_seg = self._amalgamated_target_segments\n if bool(target_seg):\n target_seg[\"catagory\"] = TARGET * tf.ones_like(\n target_seg[\"x_start\"],\n dtype=tf.int64\n )\n self._target_seg_count = tf.shape(\n target_seg[\"x_start\"],\n out_type=tf.int64\n )[0]\n else:\n self._target_seg_count = 0\n \n self._merged_segments = amalgamate(\n [optical_seg, stop_seg, target_seg], \n SEGMENT_GEO_SIG | {\"catagory\"}\n )\n \n optical_arc = self._amalgamated_optical_arcs\n if bool(optical_arc):\n optical_arc[\"catagory\"] = OPTICAL * tf.ones_like(\n optical_arc[\"x_center\"],\n dtype=tf.int64\n )\n self._optical_arc_count = tf.shape(\n optical_arc[\"x_center\"],\n out_type=tf.int64\n )[0]\n else:\n self._optical_arc_count = 0\n \n stop_arc = self._amalgamated_stop_arcs\n if bool(stop_arc):\n stop_arc[\"catagory\"] = STOP * tf.ones_like(\n stop_arc[\"x_center\"],\n dtype=tf.int64\n )\n self._stop_arc_count = tf.shape(\n stop_arc[\"x_center\"],\n out_type=tf.int64\n )[0]\n else:\n self._stop_arc_count = 0\n \n target_arc = self._amalgamated_target_arcs\n if bool(target_arc):\n target_arc[\"catagory\"] = TARGET * tf.ones_like(\n target_arc[\"x_center\"],\n dtype=tf.int64\n )\n self._target_arc_count = tf.shape(\n target_arc[\"x_center\"],\n out_type=tf.int64\n )[0]\n else:\n self._target_arc_count = 0\n \n self._merged_arcs = amalgamate(\n [optical_arc, stop_arc, target_arc], \n ARC_GEO_SIG | {\"catagory\"}\n )", "def _get_colorbar_limits(self):\n if self.boundaries is not None:\n C = self.boundaries\n if self.extend in [\"min\", \"both\"]:\n C = C[1:]\n\n if self.extend in [\"max\", \"both\"]:\n C = C[:-1]\n return min(C), max(C)\n else:\n return self.get_clim()", "def plot_figure11():\n height_ceilings = [200., 300., 400.]\n height_ceiling_ids = [list(height_range_ceilings).index(height_ceiling) for height_ceiling in height_ceilings]\n\n baseline_height_ceiling = 500.\n baseline_height_ceiling_id = list(height_range_ceilings).index(baseline_height_ceiling)\n\n plot_item00 = {\n 'data': 100.-nc.variables[\"p_ceiling_rank40\"][height_ceiling_ids[0], :, :],\n 'contour_fill_levels': np.linspace(50, 100, 21),\n 'contour_line_levels': [70., 80., 90., 95.],\n 'contour_line_label_fmt': '%.0f',\n 'colorbar_ticks': np.linspace(50, 100, 21)[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability [%]',\n 'extend': 'min',\n }\n plot_item01 = {\n 'data': 100.-nc.variables[\"p_ceiling_rank40\"][height_ceiling_ids[1], :, :],\n 'contour_fill_levels': np.linspace(70, 100, 21),\n 'contour_line_levels': [70., 80., 90., 95.],\n 'contour_line_label_fmt': '%.0f',\n 'colorbar_ticks': np.linspace(70, 100, 21)[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability [%]',\n 'extend': 'min',\n }\n plot_item02 = {\n 'data': 100.-nc.variables[\"p_ceiling_rank40\"][height_ceiling_ids[2], :, :],\n 'contour_fill_levels': np.linspace(80, 100, 21),\n 'contour_line_levels': [70., 80., 90., 95.],\n 'contour_line_label_fmt': '%.0f',\n 'colorbar_ticks': np.linspace(80, 100, 21)[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability [%]',\n 'extend': 'min',\n }\n\n column_titles = [\"200 m\", \"300 m\", \"400 m\"]\n plot_items = [plot_item00, plot_item01, plot_item02]\n\n eval_contour_fill_levels(plot_items)\n plot_panel_1x3_seperate_colorbar(plot_items, column_titles)\n\n linspace10 = np.linspace(0., 11., 21)\n plot_item10 = {\n 'data': -(100.-nc.variables[\"p_ceiling_rank40\"][height_ceiling_ids[0], :, :])+\n (100.-nc.variables[\"p_ceiling_rank40\"][baseline_height_ceiling_id, :, :]),\n 'contour_fill_levels': linspace10,\n 'contour_line_levels': sorted([1.1]+list(linspace10[::4])),\n 'contour_line_label_fmt': '%.1f',\n 'colorbar_ticks': linspace10[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability decrease [%]',\n }\n linspace11 = np.linspace(0., 23., 21)\n plot_item11 = {\n 'data': (100.-nc.variables[\"p_ceiling_rank40\"][height_ceiling_ids[1], :, :])-\n (100.-nc.variables[\"p_ceiling_rank40\"][baseline_height_ceiling_id, :, :]),\n 'contour_fill_levels': linspace11,\n 'contour_line_levels': sorted([2.3]+list(linspace11[::4])),\n 'contour_line_label_fmt': '%.1f',\n 'colorbar_ticks': linspace11[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability increase [%]',\n }\n linspace12 = np.linspace(0., 38., 21)\n plot_item12 = {\n 'data': (100.-nc.variables[\"p_ceiling_rank40\"][height_ceiling_ids[2], :, :])-\n (100.-nc.variables[\"p_ceiling_rank40\"][baseline_height_ceiling_id, :, :]),\n 'contour_fill_levels': linspace12,\n 'contour_line_levels': sorted([3.8]+list(linspace12[::4])),\n 'contour_line_label_fmt': '%.1f',\n 'colorbar_ticks': linspace12[::4],\n 'colorbar_tick_fmt': '{:.0f}',\n 'colorbar_label': 'Availability increase [%]',\n }\n\n column_titles = None\n plot_items = [plot_item10, plot_item11, plot_item12]\n\n eval_contour_fill_levels(plot_items)\n plot_panel_1x3_seperate_colorbar(plot_items, column_titles)", "def cuts(self) -> list[list[int]]:\n if self._cuts is not None:\n return self._cuts\n width = self.width\n height = self.height\n screen_region = Region(0, 0, width, height)\n cuts_sets = [{0, width} for _ in range(height)]\n\n if self.map is not None:\n for region, order, clip in self.map.values():\n region = region.intersection(clip)\n if region and (region in screen_region):\n region_cuts = region.x_extents\n for y in region.y_range:\n cuts_sets[y].update(region_cuts)\n\n # Sort the cuts for each line\n self._cuts = [sorted(cut_set) for cut_set in cuts_sets]\n return self._cuts", "def clipper(data, data_def=None):\r\n\r\n data = data.copy()\r\n cols = data_def.loc[np.logical_or(~pd.isnull(data_def.min_high), ~pd.isnull(data_def.max_low))].index\r\n for c in cols:\r\n mn = data_def.max_low[c]\r\n mx = data_def.min_high[c]\r\n data[c] = np.clip(data[c].values, mn, mx)\r\n return data", "def build_area_containment_mapping(max_area = (4, 5)):\n contained_by = { max_area : [] }\n if max_area == (2, 2):\n return contained_by\n sub_area_1 = tuple(sorted([max(max_area[0] - 1, 2), max_area[1]]))\n sub_area_2 = tuple(sorted([max_area[0], max(max_area[1] - 1, 2)]))\n for sub_area in [sub_area_1, sub_area_2]:\n if sub_area not in contained_by:\n sub_map = build_area_containment_mapping(sub_area)\n sub_map[sub_area] += [max_area]\n for area in sub_map:\n contained_by[area] = contained_by.get(area, [])\n for sup_area_ in sub_map[area]:\n if sup_area_ not in contained_by[area]:\n contained_by[area].append(sup_area_)\n return contained_by", "def callback_UpdateMap(cm, mod_units, window):\n for mod in mod_units:\n # Boost is not implemented yet, just a potential way to link different\n # bioprocesses together via sideFlows.\n # Ex) swtichgrass as a source of extra cellulose\n\n if mod[0:5] != 'boost':\n # Updates a mod of the window . . .\n window[mod].update(cm[mod]['name'])\n\n return None", "def save_maps(self,map_options=None,save=None,show=True,match_cmap_limits=True,schedule_diagram=True) :\n\n if map_options is not None :\n self.map_options.update(map_options)\n\n if save is not None and isinstance(save,bool) :\n self.map_options['save'] = save\n\n if match_cmap_limits :\n self.map_options['cmap_limits'] = [np.nanmin(self.maps),np.nanmax(self.maps)]\n if self.map_options['cmap_limits'][0] < 0.1 * self.map_options['cmap_limits'][1] :\n self.map_options['cmap_limits'][0] = 0\n\n for i in range(self.num_maps) :\n opts = self.map_options\n opts['title'] = self.titles[i]\n if self.filenames is not None :\n opts['img_filename'] = self.filenames[i]\n if schedule_diagram :\n opts['schedule'] = self.hist_specs[self.map_specs['hist'][i]]['exposure_schedule']\n render_map(\n self.maps[i,:,:],\n lat=self.lat,\n lon=self.lon,\n cbar_label=self.hist_specs[self.map_specs['hist'][i]]['units'],\n show=show,\n **opts)", "def rcs(azmap, rmap, elmap, areaeffmap, sigma0map, vismap, rpol, azpol,\n elpol, DEM_res, DEM_xmin, DEM_ymin, rad_x, rad_y, beamwidth,\n pulsewidth, range_weighting=True, az_conv=0,\n raster_oversampling=1, verbose=True):\n\n nrows, ncols = azmap.shape\n area_unweighted = areaeffmap * sigma0map\n\n pulselength = pulsewidth * 3.e8 / 2. # [m]\n if az_conv is not None:\n az_conv_offset = az_conv / 2.\n else:\n az_conv_offset = 0\n\n beamwidth_rad = beamwidth * np.pi / 180.\n\n if not range_weighting:\n range_weight = 1 # unity\n\n if raster_oversampling == 0:\n N = 1\n elif raster_oversampling == 1:\n N = int(np.ceil(2 * DEM_res / pulselength))\n else:\n N = raster_oversampling\n\n if N != 1:\n # New dimensions\n nc = N * ncols\n nr = N * nrows\n\n # repeat the values NxN, equivalent of rebin in IDL\n elvals = np.repeat(np.repeat(elmap, N, axis=0), N, axis=1)\n areavals = np.repeat(np.repeat(area_unweighted / N ** 2,\n N, axis=0), N, axis=1)\n visvals = np.repeat(np.repeat(vismap, N, axis=0), N, axis=1)\n\n # New x- and y-vectors\n xvec = np.arange(nr) * DEM_res / N + DEM_xmin\n yvec = np.arange(nc) * DEM_res / N + DEM_ymin\n\n xdiff = (xvec - rad_x)\n ydiff = (yvec - rad_y)\n\n # New distance from radar map\n X, Y = np.meshgrid(xdiff, ydiff)\n rvals = np.sqrt(X ** 2 + Y ** 2)\n\n # New azimuth map\n azmap_rad = (np.arctan2(X, Y) + 2 * np.pi) % (2 * np.pi)\n azvals = azmap_rad * 180. / np.pi\n else:\n rvals = rmap\n azvals = azmap\n azmap_rad = azvals * np.pi / 180.\n elvals = elmap\n areavals = area_unweighted\n visvals = vismap\n\n elmap_rad = elvals * np.pi / 180.\n elevations_rad = np.array(elpol) * np.pi / 180.\n\n # Define the area around a point P(range, azimuth) where the cells\n # have a contribution to the RCS. This area is defined with the\n # range limits from range-dr_offset to range+dr_offset and the\n # azimuth limits from azimuth-daz_offset to azimuth+daz_offset.\n #\n # For a Gaussian antenna, azimuth offset more than 2*HPBW does not a\n # have remarkable contribution.\n # With a rectangular pulse and a matched filter cells farer away\n # than pulse length does not a have remarkable contribution.\n\n daz_offset = (2. * beamwidth) + az_conv_offset # [deg]\n dr_offset = pulselength # [m]\n\n nazim = len(azpol)\n nrange = len(rpol)\n # pyart storage format: 2D arrays (naz * nel, nranges)\n rcspolarmap = np.zeros((nazim * len(elpol), nrange)) + np.nan\n\n for rind in range(nrange):\n if verbose:\n logging.info(f'Computing range bin {rpol[rind]:2.1f}')\n rr = rpol[rind]\n\n indr = np.logical_and(np.logical_and(rvals >= rr - dr_offset,\n rvals < rr + dr_offset),\n visvals > 0)\n\n if not np.any(indr):\n continue\n\n indr = np.where(indr)\n\n for azind in range(nazim):\n az = azpol[azind]\n # Inside the loops over range (rr) and azimuth (az), the\n # coordinates (rr, az) describe the point P(rr, az) for which\n # the RCS is calculated. If more than one DEM cell is within\n # the area from az-daz/2 to az+daz/2 and from rr-dr/2 to\n # rr+dr/2, the calculated RCS value is set to all of these\n # cells (next neighbor).\n\n # Get area around rr and az\n azmin = az - daz_offset\n azmax = az + daz_offset\n if azmin < 0:\n azmin = 360. + azmin\n indaz = np.logical_or(np.logical_and(azvals[indr] >= 0,\n azvals[indr] < azmax),\n np.logical_and(azvals[indr] >= azmin,\n azvals[indr] <= 360.))\n elif azmax > 360:\n azmax = azmax - 360.\n indaz = np.logical_or(np.logical_and(azvals[indr] >= azmin,\n azvals[indr] <= 360),\n np.logical_and(azvals[indr] >= 0,\n azvals[indr] < azmax))\n else:\n indaz = np.logical_and(azvals[indr] >= azmin,\n azvals[indr] < azmax)\n\n # Cells that contribute to the cells to set indset\n inda = tuple([indr[0][indaz], indr[1][indaz]])\n\n # Calculate offsets in azimuth and elevation to the\n # point P(rr,az) and the elevation angle of the antenna.\n\n daz_area = azmap_rad[inda] - (az * np.pi / 180.)\n\n indaz = daz_area > np.pi\n daz_area[indaz] = daz_area[indaz] - 2. * np.pi\n\n indaz = daz_area < -np.pi\n daz_area[indaz] = daz_area[indaz] + 2. * np.pi\n\n if range_weighting:\n # Get the weighting factor due to the range offset.\n range_weight = range_weights(rvals[inda], rr,\n pulselength)\n\n ind_rzero = rvals[inda] <= 0.0\n if np.any(ind_rzero):\n continue\n\n for iel, el in enumerate(elevations_rad):\n del_area = elmap_rad[inda] - el\n\n # Get the two-way weighting factor due to the azimuth offset\n # to the main antenna direction (assuming a Gaussian antenna\n # pattern).\n ant_weight = antenna_pattern_gauss(\n daz_area,\n del_area,\n beamwidth_rad,\n twoway=True,\n az_conv=az_conv *\n np.pi /\n 180.,\n units='rad')\n\n # RCS = SUM_j sigma_j\n # = SUM_j sigma0_j * A_eff_j * fa(dphi_j,dteta_j)^2 * fr(drange)\n # where\n # sigma_j : Backscattering cross section of each cell [m^2]\n # sigma0_j : Sigma naught of each cell [1]\n # A_eff_j : Effective area of each cell [m^2]\n # fa : One-way weighting function due to the azimuth\n # and elevation offsets.\n # fr : Range weighting function due to the range offset\n\n # RCS contribution of each cell inside the contribution\n # area.\n rcs_area = ant_weight * range_weight * areavals[inda]\n # Sum up all the contributions\n rcs = np.nansum(rcs_area)\n\n if rcs < RCS_MIN:\n rcs = np.nan\n\n # Set rcs to all values inside the set area.\n\n rcspolarmap[azind + iel * nazim, rind] = rcs\n return rcspolarmap", "def get_cropped_heightmap(state,heightmap):\r\n map_x, map_y = state[:2]\r\n th = state[2]\r\n windowsize = 100\r\n\r\n #OK, to properly get the heightmap, we need to 1. translate to vehicle origin. 2. Rotate by theta 3. translate by -windowsize/2 to center\r\n HTM_trans = np.array([[1., 0., map_x], [0., 1., map_y], [0., 0., 1.]])\r\n HTM_rot = np.array([[cos(th), -sin(th), 0.], [sin(th), cos(th), 0.], [0., 0., 1.]])\r\n HTM_center = np.array([[1., 0., -windowsize//2], [0., 1., -windowsize//2], [0., 0., 1.]])\r\n HTM = np.matmul(HTM_trans, np.matmul(HTM_rot, HTM_center))\r\n heightmap_tr = skimage.transform.warp(heightmap, ProjectiveTransform(matrix=HTM))\r\n heightmap_out = heightmap_tr[:windowsize, :windowsize]\r\n\r\n return heightmap_out", "def _map_columns_step(self, op: data_algebra.data_ops_types.OperatorPlatform, *, data_map: Dict[str, Any]):\n if op.node_name != \"MapColumnsNode\":\n raise TypeError(\n \"op was supposed to be a data_algebra.data_ops.MapColumnsNode\"\n )\n res = self._compose_polars_ops(op.sources[0], data_map=data_map)\n if isinstance(res, pl.LazyFrame):\n # work around https://github.com/pola-rs/polars/issues/5882#issue-1507040380\n res = res.collect()\n res = res.rename(op.column_remapping)\n res = res.select(op.columns_produced())\n if self.use_lazy_eval and isinstance(res, pl.DataFrame):\n res = res.lazy()\n return res", "def populate_df_with_macro_cats(df, level_maps):\n for level_map in level_maps:\n for summary_att, atts in level_map.items():\n df[summary_att] = df[atts].sum(axis=1).apply(lambda e: 1 if e >= 1 else 0)\n\n return df", "def _draw_center_region_maps(self, top_line: ndarray, bot_line: ndarray,\n center_line: ndarray,\n center_region_mask: ndarray,\n top_height_map: ndarray,\n bot_height_map: ndarray, sin_map: ndarray,\n cos_map: ndarray,\n region_shrink_ratio: float) -> None:\n\n assert top_line.shape == bot_line.shape == center_line.shape\n assert (center_region_mask.shape == top_height_map.shape ==\n bot_height_map.shape == sin_map.shape == cos_map.shape)\n assert isinstance(region_shrink_ratio, float)\n\n h, w = center_region_mask.shape\n for i in range(0, len(center_line) - 1):\n\n top_mid_point = (top_line[i] + top_line[i + 1]) / 2\n bot_mid_point = (bot_line[i] + bot_line[i + 1]) / 2\n\n sin_theta = self.vector_sin(top_mid_point - bot_mid_point)\n cos_theta = self.vector_cos(top_mid_point - bot_mid_point)\n\n tl = center_line[i] + (top_line[i] -\n center_line[i]) * region_shrink_ratio\n tr = center_line[i + 1] + (\n top_line[i + 1] - center_line[i + 1]) * region_shrink_ratio\n br = center_line[i + 1] + (\n bot_line[i + 1] - center_line[i + 1]) * region_shrink_ratio\n bl = center_line[i] + (bot_line[i] -\n center_line[i]) * region_shrink_ratio\n current_center_box = np.vstack([tl, tr, br, bl]).astype(np.int32)\n\n cv2.fillPoly(center_region_mask, [current_center_box], color=1)\n cv2.fillPoly(sin_map, [current_center_box], color=sin_theta)\n cv2.fillPoly(cos_map, [current_center_box], color=cos_theta)\n\n current_center_box[:, 0] = np.clip(current_center_box[:, 0], 0,\n w - 1)\n current_center_box[:, 1] = np.clip(current_center_box[:, 1], 0,\n h - 1)\n min_coord = np.min(current_center_box, axis=0).astype(np.int32)\n max_coord = np.max(current_center_box, axis=0).astype(np.int32)\n current_center_box = current_center_box - min_coord\n box_sz = (max_coord - min_coord + 1)\n\n center_box_mask = np.zeros((box_sz[1], box_sz[0]), dtype=np.uint8)\n cv2.fillPoly(center_box_mask, [current_center_box], color=1)\n\n inds = np.argwhere(center_box_mask > 0)\n inds = inds + (min_coord[1], min_coord[0])\n inds_xy = np.fliplr(inds)\n top_height_map[(inds[:, 0], inds[:, 1])] = self._dist_point2line(\n inds_xy, (top_line[i], top_line[i + 1]))\n bot_height_map[(inds[:, 0], inds[:, 1])] = self._dist_point2line(\n inds_xy, (bot_line[i], bot_line[i + 1]))", "def coverPlots(self, plotX, plotY, iCiv):\n\t\t\n\t\tfor x in range(plotX-1, plotX+2):\n\t\t\tfor y in range(plotY-1, plotY+2):\n\t\t\t\tgc.getMap().plot(x, y).setRevealed(iCiv, False, True, -1);", "def render_map(map,\nlat=None,\nlon=None,\ntitle=None,\nsave=True,\nshow=True,\nschedule=None,\nschedule_bbox=(-0.03,0,1,0.91),\nimg_filename=None,\nimg_dir=\"\",\nimg_size=[20,15],\nimg_dpi=300,\nimg_filetype=\"png\",\nbrdr_nation=True,\nbrdr_nation_rgba=[0,0,0,1],\nbrdr_state=True,\nbrdr_state_rgba=[0,0,0,0.75],\ncmap=\"gist_ncar\",\ncmap_limits=None,\ncbar=True,\ncbar_limits=None,\ncbar_label=None,\ncountry_focus=\"CHE\",\ngridlines=True,\ngridlines_dms=False,\nmch_logo=True) :\n\n # TODO: Add custom sizing and resolution specifications\n fig = plt.figure(figsize=(img_size[0]/2.54,img_size[1]/2.54))\n\n # TODO: Accept custom projections\n # proj = ccrs.Mercator()\n proj = ccrs.Orthographic(central_longitude=(lon[0]+lon[-1])/2, central_latitude=(lat[0]+lat[-1])/2)\n\n # TODO: Add support for multiple plots per figure (too complex? consider use cases)\n ax = fig.add_subplot(1,1,1,projection = proj)\n\n # TODO: Increase flexibility of borders consideration\n if brdr_state :\n state_brdrs = cfeat.NaturalEarthFeature(\n category='cultural',\n name='admin_1_states_provinces_lines',\n scale='10m',\n facecolor='none')\n ax.add_feature(state_brdrs,linestyle=\"--\",edgecolor=tuple(brdr_state_rgba),linewidth=0.5)\n if brdr_nation :\n ax.add_feature(cfeat.BORDERS,edgecolor=tuple(brdr_nation_rgba))\n\n if country_focus is not None :\n shpfilename = shapereader.natural_earth(resolution='10m',\n category='cultural',name='admin_0_countries')\n reader = shapereader.Reader(shpfilename)\n countries = reader.records() \n # this is a very janky search for Switzerland, but it's ultimately simpler than\n # making geopandas a requirement for the library\n for country in countries :\n if country.attributes['ADM0_A3'] == country_focus :\n break\n assert country.attributes['ADM0_A3'] == country_focus, \"country_focus input not recognised\"\n poly = country.geometry\n\n msk_proj = proj.project_geometry (poly, ccrs.Geodetic()) # project geometry to the projection used by stamen\n\n # plot the mask using semi-transparency (alpha=0.65) on the masked-out portion\n ax.add_geometries( msk_proj, proj, facecolor='white', edgecolor='none', alpha=0.8)\n\n # TODO: Consider first-last versus min-max - how can we avoid accidentally flipping images\n extents=[lon[0],lon[-1],lat[0],lat[-1]]\n ax.set_extent(extents,crs=ccrs.Geodetic())\n\n # this code correctly translate the lat/lon limits into the projected coordinates\n extents_proj = proj.transform_points(ccrs.Geodetic(),np.array(extents[:2]),np.array(extents[2:]))\n extents_proj = extents_proj[:,:2].flatten(order='F')\n\n if gridlines :\n ax.gridlines(draw_labels=True, dms=gridlines_dms, x_inline=False, y_inline=False,linewidth=0.25,\n ylocs=[46,46.5,47,47.5])\n\n # TODO: Custom colormaps, interpolation, cropping\n\n # Upscale matrix for better reprojection\n # f = interp2d(lon, lat, map, kind='linear')\n # latnew = np.linspace(lat[0], lat[-1], (len(lat)-1)*3+1)\n # lonnew = np.linspace(lon[0], lon[-1], (len(lon)-1)*3+1)\n # mapnew = f(lonnew, latnew)\n\n # Upscale matrix for better reprojection\n mapnew = zoom(map,3)\n\n # show map with given cmap and set cmap limits\n im = ax.imshow(mapnew,extent=extents,transform=ccrs.PlateCarree(),\n origin='lower',cmap=cmap)\n if cmap_limits is not None :\n im.set_clim(cmap_limits[0],cmap_limits[1])\n\n # colorbar\n # TODO: Add support for horizontal vertical option\n if cbar :\n cb = plt.colorbar(im, ax=ax, orientation='horizontal',pad=0.05,fraction=0.05)\n cb.ax.set_xlabel(cbar_label)\n\n # show schedule diagram\n if schedule is not None :\n ax2 = inset_axes(ax, width=\"25%\", height=\"25%\", loc=2,\n axes_class = get_projection_class('polar'),\n bbox_to_anchor=tuple(schedule_bbox),\n bbox_transform=ax.transAxes)\n schedule_clock(ax2,schedule,title=\"Exposure schedule\")\n\n # TODO: Add more advanced title interpretation (i.e. smart date placeholder)\n if title is not None :\n ax.set_title(title)\n\n if mch_logo :\n ex = ax.get_extent()\n mch_logo_img = plt.imread('python_tamer/mch_logo.png')\n mch_logo_width = 0.15\n mch_logo_pad = 0\n # some maths to work out position, note image aspect ratio 5:1\n mch_extents = [ex[1]-(ex[1]-ex[0])*mch_logo_width-(ex[1]-ex[0])*mch_logo_pad,\n ex[1]-(ex[1]-ex[0])*mch_logo_pad,\n ex[2]+(ex[3]-ex[2])*mch_logo_pad,\n ex[2]+0.2*(ex[1]-ex[0])*mch_logo_width+(ex[3]-ex[2])*mch_logo_pad]\n # zorder puts image on top (behind mask otherwise for some reason)\n ax.imshow(mch_logo_img,extent=mch_extents,zorder=12)\n\n # TODO: Add plot title, small textbox description, copyright from dataset, ticks and gridlines\n if save :\n # Generate timestamp filename if relying on default\n if img_filename is None :\n if title is not None :\n img_filename = format_filename(title)\n else :\n img_filename=dt.datetime.now().strftime('%Y%m%d_%H%M%S_%f')\n elif img_filename == \"timestamp\" :\n img_filename=dt.datetime.now().strftime('%Y%m%d_%H%M%S_%f')\n\n plt.savefig(img_dir+img_filename+\".\"+img_filetype,\n bbox_inches=\"tight\",dpi=img_dpi)\n\n if show :\n plt.show()", "def map(stop_id,base,future,colmn_per,colmn_per_str,colmn_diff,df,col_func,rad_func,lat,lon):\n\n\n #sets the map zoomed into san fran with a scale bar\n mapa = folium.Map([37.765, -122.45],\n zoom_start=13,\n tiles='cartodbpositron',\n control_scale = True)\n \n #sets the layers up so that marks can be added to it (NEED TO CHANGE WHEN THE DATA IM MAPPING CHANGES!!!)\n missing09_group = folium.FeatureGroup(name = 'Stops Missing in 2009')\n missing16_group = folium.FeatureGroup(name = 'Stops Missing in 2016')\n missing_both_group = folium.FeatureGroup(name = 'Stops Missing in Both Years')\n good_group = folium.FeatureGroup(name = 'Model Accuracy Difference (2016 - 2009)')\n \n for name, row in df.iterrows():\n #make all of the stops missing in both years purple with a radius of 20 \n if np.isnan(row[base]) & np.isnan(row[future]):\n \n html= \"\"\" <h2> STOP \"\"\" + str(row[stop_id]) + \"\"\" </h2> \n <p> \n 2009 Name: Missing <br>\n 2016 Name: Missing </p> \n <p> \n Percent Difference: N/A <br> \n Difference: N/A </p> \n <p> 2009 Value: Missing <br> \n 2016 Value: Missing </p> \"\"\"\n \n iframe = folium.IFrame(html=html, width=300, height=150)\n pop_up = folium.Popup(iframe, max_width=2650)\n \n folium.CircleMarker([row[lat], row[lon]], \n color='Purple',\n fill_color='Purple', \n radius= 5,\n fill_opacity = 0.3, popup=pop_up).add_to(missing_both_group)\n\n\n\n\n # make all of the bus stops missing in 2009 sea green \n elif np.isnan(row[base]) == True: \n \n html= \"\"\" <h2> STOP \"\"\" + str(row[stop_id]) + \"\"\" </h2> \n <p> \n 2009 Name: Missing <br>\n 2016 Name: \"\"\" + str(row['STOP NAME']) + \"\"\" </p> \n <p> \n Percent Difference: N/A <br> \n Difference: N/A </p> \n <p> 2009 Value: Missing <br> \n 2016 Value: \"\"\" + str(round(row[future])) + \"\"\" </p> \"\"\"\n \n iframe = folium.IFrame(html=html, width=300, height=150)\n pop_up = folium.Popup(iframe, max_width=2650)\n \n folium.CircleMarker([row[lat], row[lon]], \n color='#3CB371',\n fill_color='#3CB371', \n radius=rad_func(row[future]),\n fill_opacity = 0.3, popup=pop_up).add_to(missing09_group)\n \n \n # make all of the bus stops missing in 2016 maroon \n elif np.isnan(row[future]) == True: \n\n \n #if pd.isnull(row['STOP_NAME_09']):\n # row['STOP_NAME_09'] = 'Missing '\n html=\"\"\"\n <h2> STOP \"\"\" + str(row[stop_id]) + \"\"\" </h2>\n <p> \n 2009 Name: \"\"\" + str(row['STOP']) + \"\"\" <br>\n 2016 Name: Missing </p> \n <p> \n Percent Difference: N/A <br>\n Difference: N/A </p>\n <p> \n 2009 Value: \"\"\" + str(row[base]) + \"\"\" <br>\n 2016 Value: Missing </p>\"\"\"\n \n iframe = folium.IFrame(html=html, width=300, height=150)\n pop_up = folium.Popup(iframe, max_width=2650)\n\n folium.CircleMarker([row[lat], row[lon]], \n color='#800000',\n fill_color='#800000', \n radius=rad_func(row[base]),\n fill_opacity = 0.3, popup=pop_up).add_to(missing16_group)\n \n \n \n#when both stops have a value of 0 then the percent difference is calculated as a nan and causes issues with the color and radius function\n#since the change is 0 (0 to 0) we set the color and radius equal to what it would have been set by the radius and color function (Dark Grey and a radius of 3 map units) \n elif row[future] == 0 and row[base] == 0:\n # if pd.isnull(row['STOP_NAME_09']):\n #row['STOP_NAME_09'] = 'Missing '\n #elif pd.isnull(row['STOP_NAME_16']):\n #row['STOP_NAME_16'] = 'Missing '\n\n html=\"\"\"\n <h2> STOP \"\"\" + str(row[stop_id]) + \"\"\" </h2>\n <p> \n 2009 Name: \"\"\" + row['STOP'] + \"\"\" <br>\n <br>\n 2016 Name: \"\"\" + row['STOP NAME'] + \"\"\" </p> \n <p> \n Percent Difference: 0% <br>\n Difference: \"\"\" + str(round(row[colmn_diff])) + \"\"\" </p>\n <p> 2009 Value: \"\"\" + str(round(row[base])) + \"\"\" <br>\n 2016 Value: \"\"\" + str(round(row[future])) + \"\"\" </p> \"\"\"\n \n iframe = folium.IFrame(html=html, width=300, height=150)\n pop_up = folium.Popup(iframe, max_width=2650)\n \n folium.CircleMarker([row[\"LAT\"], row[\"LON\"]], \n color='DarkGray',\n fill_color='DarkGray', \n radius= 5,\n fill_opacity = 0.3, popup=pop_up).add_to(good_group) \n\n \n #based on percent difference map the bus stop ranging from dark green (high % gain) to light green (medium % gain) to grey (low % gain/loss) to light red (low % loss) to dark red (high % loss) \n else:\n #takes care of a bug when there is a stop name in one year but not the other and a bug of having an infinite percent difference when the base year is zero \n if row[base] == 0:\n row[base] = 0.00001 \n row[colmn_per] = ((row[future] - row[base])/row[base])*100\n \n #if pd.isnull(row['STOP_NAME_09']):\n #row['STOP_NAME_09'] = 'Missing '\n #elif pd.isnull(row['STOP_NAME_16']):\n #row['STOP_NAME_16'] = 'Missing '\n \n\n html=\"\"\"\n <h2> STOP: \"\"\" + str(row[stop_id]) + \"\"\" </h2>\n <p> \n 2009 Name: \"\"\" + row['STOP'] + \"\"\" <br>\n <br>\n 2016 Name: \"\"\" + row['STOP NAME'] + \"\"\" </p> \n <p> \n Percent Difference: \"\"\" + str(round(row[colmn_per])) + \"\"\"%\n <br>\n Difference: \"\"\" + str(round(row[colmn_diff])) + \"\"\"\n </p>\n <p>\n 2009 Value: \"\"\" + str(round(row[base])) + \"\"\"\n <br>\n 2016 Value: \"\"\" + str(round(row[future])) + \"\"\"\n </p>\"\"\"\n \n iframe = folium.IFrame(html=html, width=300, height=150)\n pop_up = folium.Popup(iframe, max_width=2650)\n \n folium.CircleMarker([row[lat], row[lon]], \n color=col_func(row[colmn_per]), \n fill_color=col_func(row[colmn_per]), \n radius=rad_func(row[colmn_diff]),\n fill_opacity = 0.3, popup=pop_up).add_to(good_group)\n \n \n missing09_group.add_to(mapa)\n missing16_group.add_to(mapa)\n missing_both_group.add_to(mapa)\n good_group.add_to(mapa)\n folium.LayerControl().add_to(mapa)\n\n return mapa", "def borra_overlaps(self):\r\n nomTabla=self.nomTabla.split(\".\")[1]\r\n dicCondWhere={}\r\n dicCondWhere[\"id_trabajo\"]=self.oUtiles.id_trabajo\r\n if nomTabla == \"ed_fincas\":\r\n nomTablaOverlaps=\"ed_src\" + str(self.oUtiles.src_trabajo) + \".\" + \"ed_overlaps_fincas\"\r\n nomTablaGaps=\"ed_src\" + str(self.oUtiles.src_trabajo) + \".\" + \"ed_gaps_fincas\"\r\n else:\r\n nomTablaOverlaps=\"src\" + str(self.oUtiles.src_trabajo) + \".\" + \"overlaps_fincas\"\r\n nomTablaGaps=\"src\" + str(self.oUtiles.src_trabajo) + \".\" + \"gaps_fincas\"\r\n self.oUtiles.oConsultasPg.deleteDatos(nombreTabla=nomTablaOverlaps,dicCondWhere=dicCondWhere)\r\n self.oUtiles.oConsultasPg.deleteDatos(nombreTabla=nomTablaGaps,dicCondWhere=dicCondWhere)", "def plotNetwork(module_dict, arch):\n # Not a great way of doing it but it'll do for now\n min_val = 0\n max_val = 0\n for name, module in module_dict.items():\n if np.amin(module) < min_val:\n min_val = np.amin(module)\n if np.amax(module) > max_val:\n max_val = np.amax(module)\n\n print(min_val)\n print(max_val)\n list_keys = list(module_dict)\n num_layers = len(module_dict)\n num_cols = math.ceil(math.sqrt(num_layers))\n num_rows = math.ceil(num_layers/num_cols)\n fig, axes = plt.subplots(num_cols, num_rows, figsize=(num_cols*10, num_rows*10))\n\n for i, ax in zip(range(num_cols*num_rows), axes.flat):\n if i < num_layers:\n sub = sns.heatmap(module_dict[list_keys[i]], cmap=sns.diverging_palette(240, 10, s=100, as_cmap=True), \n center=0.00, cbar_kws={\"shrink\": 0.85}, xticklabels=False, yticklabels=False, square=True, ax=ax)\n ax.set_title(list_keys[i], fontsize=20)\n # make frame visible\n for _, spine in sub.spines.items():\n spine.set_visible(True)\n spine.set_linewidth(2) \n else:\n fig.delaxes(ax)\n\n\n if not os.path.exists('plots'):\n os.makedirs('plots')\n\n fig.savefig('plots/{architecture}full_network.png'.format(architecture=arch), transparent=True)", "def translate(value, from_min, from_max, to_min, to_max):\n # Figure out how 'wide' each range is\n left_span = from_max - from_min\n right_span = to_max - to_min\n\n # Convert the left range into a 0-1 range (float)\n value_scaled = float(value - from_min) / float(left_span)\n\n # Convert the 0-1 range into a value in the right range.\n return to_min + (value_scaled * right_span)" ]
[ "0.53061557", "0.5123476", "0.50569475", "0.5041872", "0.4931829", "0.49258262", "0.4896154", "0.4868902", "0.4862584", "0.48051956", "0.48001003", "0.47694784", "0.47593305", "0.47555342", "0.4747587", "0.47454724", "0.47438157", "0.47292355", "0.47042343", "0.46995413", "0.46734232", "0.46683073", "0.46559906", "0.4649211", "0.46427724", "0.46302462", "0.46177948", "0.4608795", "0.4604277", "0.46033227", "0.46020344", "0.4577049", "0.45766228", "0.45682374", "0.45664078", "0.45662212", "0.45601267", "0.45539796", "0.45497677", "0.45496884", "0.4549541", "0.4541684", "0.4530505", "0.4520135", "0.451919", "0.4509491", "0.44928536", "0.4490026", "0.44844934", "0.44674915", "0.44642806", "0.44533324", "0.4435123", "0.44338098", "0.44321153", "0.4428326", "0.44246277", "0.44245443", "0.4422402", "0.44211072", "0.44165355", "0.44159058", "0.44138414", "0.4412996", "0.4412037", "0.4405864", "0.4401074", "0.44000575", "0.4399741", "0.43972448", "0.4393874", "0.43925932", "0.438952", "0.43879795", "0.43848947", "0.4379572", "0.4378683", "0.43774498", "0.43751118", "0.43730426", "0.43719506", "0.43715408", "0.43708485", "0.4370008", "0.43696287", "0.43675095", "0.4366618", "0.43639007", "0.43637642", "0.4360796", "0.43537116", "0.43535203", "0.43488705", "0.43391228", "0.43378136", "0.4332595", "0.43297935", "0.432897", "0.4328915", "0.43257582" ]
0.77395713
0
Creates a clone of the Layout with the nthframe for each Element.
def _get_frame(self, key): layout_frame = self.layout.clone(shared_data=False) keyisint = isinstance(key, int) if not isinstance(key, tuple): key = (key,) nthkey_fn = lambda x: zip(tuple(x.name for x in x.kdims), list(x.data.keys())[min([key[0], len(x)-1])]) if key == self.current_key: return self.current_frame else: self.current_key = key for path, item in self.layout.items(): if self.dynamic == 'open': if keyisint: counts = item.traverse(lambda x: x.counter, (DynamicMap,)) if key[0] >= counts[0]: item.traverse(lambda x: next(x), (DynamicMap,)) dim_keys = item.traverse(nthkey_fn, (DynamicMap,))[0] else: dim_keys = zip([d.name for d in self.dimensions if d in item.dimensions('key')], key) self.current_key = tuple(k[1] for k in dim_keys) elif item.traverse(lambda x: x, [DynamicMap]): with dimensionless_cache(item, not self._force or not self.drawn): key, frame = util.get_dynamic_item(item, self.dimensions, key) layout_frame[path] = frame continue elif self.uniform: dim_keys = zip([d.name for d in self.dimensions if d in item.dimensions('key')], key) else: dim_keys = item.traverse(nthkey_fn, (HoloMap,))[0] if dim_keys: obj = item.select((HoloMap,), **dict(dim_keys)) if isinstance(obj, HoloMap) and len(obj) == 0: continue else: layout_frame[path] = obj else: layout_frame[path] = item traverse_setter(self, '_force', False) self.current_frame = layout_frame return layout_frame
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_layout(self):\n\n for h in range(0, self.num_layout_heads):\n self.set_random_layout(h)\n self.set_sliding_window_layout(h)\n self.set_global_layout_itc(h)\n\n self.check_and_propagate_first_head_layout()\n return self.layout", "def clone(self):\n return _libsbml.Layout_clone(self)", "def make_layout(self):\n\n for h in range(0, self.num_layout_heads):\n self.set_random_layout(h)\n self.set_local_layout(h)\n self.set_global_layout(h)\n\n self.check_and_propagate_first_head_layout()\n return self.layout", "def clone(self):\n return _libsbml.LayoutExtension_clone(self)", "def create_layout( self ):", "def make_layout(self):\n\n for h in range(0, self.num_layout_heads):\n self.set_sliding_window_layout(h)\n self.set_global_layout(h)\n\n self.check_and_propagate_first_head_layout()\n return self.layout", "def render(self, n: int):\n if int(n) == 1:\n self.layout.children = [self.figures[0]]\n elif int(n) == 2:\n self.layout.children = [self.figures[0], self.figures[1]]\n elif int(n) == 3:\n self.layout.children = [\n self.figures[0],\n self.figures[1],\n self.figures[2],\n ]", "def clone(self):\n return _libsbml.ListOfLayouts_clone(self)", "def make_layout(self):\n self.layout[:, :, :] = 1\n return self.layout", "def make_layout(self):\n\n for h in range(0, self.num_layout_heads):\n self.set_local_layout(h)\n self.set_global_layout(h)\n\n self.check_and_propagate_first_head_layout()\n return self.layout", "def buildmainframe(self):\n self.mainframewidgets=[]\n for x in range(3):\n thislabel = Label(self.mainframe, text=str(x))\n thislabel.grid()\n self.mainframewidgets.append(thislabel)", "def build_frames(self):\n self.cntrl_frame = tk.PanedWindow(self.root)\n self.cntrl_frame.pack(side = tk.TOP, padx = 1, pady = 1, fill = tk.Y)\n self.info_frame_1 = tk.PanedWindow(self.root)\n self.info_frame_1.pack(side = tk.TOP, padx = 1, pady = 2, fill = tk.Y)", "def _create_left_summary_frame(self):\n self.frames.append(tk.Frame(self.master))\n self.frames[6].grid(column=0, row=2, sticky=\"ew\")\n self.getall_button = (tk.Button(self.frames[6],\n text=\"All Indiv stats\"))\n self.getall_button.grid(column=0, row=0, sticky=\"nesw\")\n self.go_button = (tk.Button(self.frames[6], text=\"Team Stats\"))\n self.go_button.grid(column=1, row=0, sticky=\"nesw\")\n self.frames[6].columnconfigure(0, weight=1)\n self.frames[6].columnconfigure(1, weight=1)", "def createModFrame(self, number,name):\n frame = self.data.modFrames[number]\n display = frame(self.container, self)\n display.grid(row=0, column=0, sticky=\"nsew\")\n if(name not in self.data.activeMod):self.data.activeMod[name]= module()\n self.data.activeMod[name].modFramesNext.append(display)", "def __createLayout(self):\r\n self.__createCanvas()\r\n self.__createButton()\r\n self.__createInputFunction()\r\n self.__createLimits()\r\n self.__styleLayout()", "def create_flowbox(self, flowbox, frame_list):\n\n for num_frame in frame_list:\n grid = Gtk.Grid()\n btn = self.new_thumbnail_button(num_frame)\n\n widget_cls_label = Gtk.Label()\n widget_cls_label.set_text(\"?\")\n widget_cls_label.set_size_request(20, 20)\n widget_cls_label.connect(\"draw\", self.area_on_draw, {'frame': num_frame, 'widget_label': widget_cls_label})\n # Add drawing area\n grid.add(btn)\n grid.attach_next_to(widget_cls_label, btn, Gtk.PositionType.BOTTOM, 1, 2)\n\n flowbox.add(grid)\n self.flowbox_layout = flowbox", "def create_board(self):\n for field in self.fields_start_locs:\n loc = self.loc_to_view(field[0], field[1])\n new_field = Field(loc, self.piece_size)\n self.sprite_group.add(new_field)\n self.click_sprites.add(new_field)\n self.fields.append(new_field)", "def gui_layout(self) -> List[List[sg.Element]]:\n tabs = []\n if self.label != \"new\":\n tabs.append(sg.Tab(\"View\", self.gui_layout_view(), key=self.key_gen(\"view\")))\n\n tabs.append(sg.Tab(\"Edit\", self.gui_layout_edit(), key=self.key_gen(\"edit\")))\n\n return [[sg.TabGroup([tabs], key=\"controller_%s_tabs\" % self.label)]]", "def clone(self):\n return _libsbml.LayoutModelPlugin_clone(self)", "def _create_mid_summary_frame(self):\n self.frames.append(tk.Frame(self.master))\n self.frames[7].grid(column=1, row=2, sticky=\"ew\")\n # Create left empty frame within\n self.left_summary = tk.Frame(self.frames[7])\n self.left_summary.grid(column=0, row=0, sticky=\"ew\")\n # Create mid empty frame within\n self.mid_summary = tk.Frame(self.frames[7])\n self.mid_summary.grid(column=1, row=0, sticky=\"ew\")\n # Create right frame with labels within\n self.right_summary = tk.Frame(self.frames[7])\n self.right_summary.grid(column=2, row=0, sticky=\"ns\")\n self.labels.append([])\n self.labels[3].append(tk.Label(self.right_summary,\n text=\"Eq. Weight Ave\", relief=\"ridge\"))\n self.labels[3][0].grid(column=0, row=0, sticky=\"ns\", padx=5, pady=5)\n self.labels[3].append(tk.Label(self.right_summary, text=\"Average\",\n relief=\"ridge\"))\n self.labels[3][1].grid(column=0, row=1, sticky=\"ns\", padx=5, pady=5)\n #self.frames[7].columnconfigure(0, weight=1)\n #self.frames[7].rowconfigure(0, minsize=60)\n #self.frames[7].columnconfigure(1, weight=1)\n #self.frames[7].columnconfigure(2, weight=1)", "def _layout(self):\n top = 2.0\n y = 0.0\n x = 0.0\n maxend = 0.0\n for track in self._tracks:\n track.set_view(self.view.species, self.view.seqname, \n self.view.start, self.view.end)\n tracksize = track.get_size()\n y -= tracksize[1]\n track.set_pos(track.pos_offset[0] + x, track.pos_offset[1] + y)\n self.size = [self.view.end - self.view.start + 1, 0 - y]", "def create_panel(self):\n # Main Frame creation\n frame1 = Frame(self.window)\n frame1.pack(fill=\"both\")\n tablayout = Notebook(frame1)\n \n ##### TRACKER #####\n tab = Frame(tablayout) # creating 1st nested frame\n tab.pack(fill=\"both\")\n table = Frame(tab)\n table.pack(fill=\"both\")\n self.show_table(self.t.timeline[\"week\" + str(self.week)], table) # Grids the week with data\n self.add_buttons(tab, table)\n tablayout.add(tab, text=\"Current Week\") \n \n \n ##### STATS #####\n tab = Frame(tablayout) # creating 2nd nested frame\n tab.pack(fill=\"both\")\n self.stats.create_canvas(tab)\n\n\n # once its packed you can add it to the window object under a title\n tablayout.add(tab, text=\"Statistics\") \n tablayout.pack(fill=\"both\") # once everything is done now you pack the tablayout", "def createFrame(self):\n \n tkinterListBoxDialog.createFrame(self)\n self.addFrameButtons()", "def make_sub_views(root, loi, func):\n item_frame = Frame(root)\n item_frame.grid()\n for i, item in enumerate(loi):\n func(item_frame, item).pack(side=\"left\", fill=\"y\")", "def create_panel(self):\n panel = QWidget()\n panel.setContentsMargins(0, 0, 0, 0)\n\n layout = QGridLayout(panel)\n layout.setContentsMargins(0, 0, 0, 0)\n layout.setSpacing(0)\n\n # layout.setRowMinimumHeight(2, 1)\n\n return layout, panel", "def _create_right_info_frame(self, headers):\n\n self.frames.append(tk.Frame(self.master))\n self.labels.append([])\n\n for column, name in enumerate(headers):\n self.frames[4].columnconfigure(column, weight=1, minsize=60)\n self.header_values[name] = []\n for row in range(5):\n if column == 9:\n self.header_values[name].append(tk.StringVar())\n else:\n self.header_values[name].append(tk.DoubleVar())\n self.header_values[name][row].set(self.default_values\n ['ri']\n [(column*5)+row])\n self.labels[2].append(tk.Label(self.frames[4],\n textvariable=self.\n header_values[name]\n [row], relief=\"ridge\"))\n self.labels[2][(column*5)+row].grid(column=column, row=row,\n sticky=\"nsew\", padx=8, pady=10)\n self.frames[4].rowconfigure(row, weight=1, minsize=50)\n self.frames[4].grid(column=2, row=1, sticky=\"ew\", padx=10, pady=10)", "def updatePlotLayoutGrid(self):\n\n print('updatePlotLayoutGrid()')\n plotLayoutType = self.plotLayoutType # 1x, 1x2, 2x1, 2x2\n if plotLayoutType == '1x':\n numPlots = 1\n elif plotLayoutType == '1x2':\n numPlots = 2\n elif plotLayoutType == '2x1':\n numPlots = 2\n elif plotLayoutType == '2x2':\n numPlots = 4\n\n # remove all widgets from self.plotLayout\n n = self.plotLayout.count()\n for i in range(n):\n item = self.plotLayout.itemAt(i)\n if item is None:\n print(' warning: updatePlotLayoutGrid() got None item at step', i)\n continue\n widget = item.widget()\n print(' updatePlotLayoutGrid() removing i:', i, 'item:', type(item))\n self.plotLayout.removeWidget(widget)\n #self.plotLayout.removeItem(item)\n\n state = self.getState()\n for i in range(numPlots):\n if i==0:\n row = 0\n col = 0\n elif i==1:\n if plotLayoutType == '1x2':\n row = 0\n col = 1\n elif plotLayoutType == '2x1':\n row = 1\n col = 0\n elif plotLayoutType == '2x2':\n row = 0\n col = 1\n elif i==2:\n row = 1\n col = 0\n elif i==3:\n row = 1\n col = 1\n #\n oneCanvas = myMplCanvas(plotNumber=i)\n oneCanvas.myUpdate(state) # initial plot\n oneCanvas.signalSelectFromPlot.connect(self.slotSelectFromPlot)\n self.signalCancelSelection.connect(oneCanvas.slotCancelSelection)\n self.myPlotCanvasList[i] = oneCanvas\n\n #\n self.plotLayout.addWidget(oneCanvas, row, col)\n\n # connect each canvas to all other canvas\n for i in range(numPlots):\n iCanvas = self.myPlotCanvasList[i]\n iCanvas.signalSelectSquare.connect(self.slotSelectSquare)\n # feb 2023, I was connecting this twice and was getting 2x calls for each click\n #iCanvas.signalSelectFromPlot.connect(self.slotSelectFromPlot)\n for j in range(numPlots):\n #if i==j:\n # continue\n jCanvas = self.myPlotCanvasList[j]\n iCanvas.signalSelectFromPlot.connect(jCanvas.slotSelectInd)\n iCanvas.signalSelectSquare.connect(jCanvas.slotSelectSquare)\n\n #\n # select the firsr plot\n self.myPlotCanvasList[0].signalSelectSquare.emit(0, None) #slotSelectSquare(0)", "def add_questions(self):\r\n for question_id in self.question_ids:\r\n question = self.questions[question_id]\r\n self.frame_layout.addWidget(question.create_frame(), alignment=QtCore.Qt.AlignLeft)", "def gui_layout_view(self) -> List[List[sg.Element]]:\n return []", "def _generate_layout(self):\n\n pass", "def _clone_layout_placeholders(self, slidelayout):\n latent_ph_types = (PH_TYPE_DT, PH_TYPE_SLDNUM, PH_TYPE_FTR)\n for sp in slidelayout.shapes:\n if not sp.is_placeholder:\n continue\n ph = Placeholder(sp)\n if ph.type in latent_ph_types:\n continue\n self.__clone_layout_placeholder(ph)", "def create_layout( self ):\n\n # highlight all of our widgets so we can debug layouts.\n # XXX: debugging support.\n self.setStyleSheet( \"border: 1px solid black\" )\n\n editing_layout = QGridLayout()\n editing_layout.addWidget( QLabel( \"Art Record ID:\" ),\n 0, 0 )\n editing_layout.addWidget( QLabel( \"{:d}\".format( self.record[\"id\"] ) ),\n 0, 1 )\n\n # 1st\n editing_layout.addWidget( self.artTypeComboLabel,\n 1, 0 )\n editing_layout.addWidget( self.artTypeComboBox,\n 1, 1 )\n\n # 2nd\n editing_layout.addWidget( self.artSizeComboLabel,\n 2, 0 )\n editing_layout.addWidget( self.artSizeComboBox,\n 2, 1 )\n\n # 3rd\n editing_layout.addWidget( self.artQualityComboLabel,\n 3, 0 )\n editing_layout.addWidget( self.artQualityComboBox,\n 3, 1 )\n\n # 4th\n editing_layout.addWidget( self.artProcessingStateComboLabel,\n 4, 0 )\n editing_layout.addWidget( self.artProcessingStateComboBox,\n 4, 1 )\n\n # 5th\n editing_layout.addWidget( self.artArtistsListLabel,\n 0, 3 )\n editing_layout.addWidget( self.artArtistsListView,\n 1, 3,\n 4, 1 )\n\n # 6th\n editing_layout.addWidget( self.artAssociatesListLabel,\n 0, 5 )\n editing_layout.addWidget( self.artAssociatesListView,\n 1, 5,\n 4, 1 )\n\n # 7th\n editing_layout.addWidget( self.artVandalsListLabel,\n 0, 7 )\n editing_layout.addWidget( self.artVandalsListView,\n 1, 7,\n 4, 1 )\n\n # 8th\n editing_layout.addWidget( self.artTagsLabel,\n 6, 0 )\n editing_layout.addWidget( self.artTagsLineEdit,\n 6, 1,\n 1, 7 )\n\n # 9th\n editing_layout.addWidget( self.artDateLabel,\n 5, 0 )\n editing_layout.addWidget( self.artDateLineEdit,\n 5, 1 )\n\n # vertical layout of the photo preview and everything else.\n main_layout = QVBoxLayout()\n main_layout.setContentsMargins( 0, 0, 0, 0 )\n main_layout.setSpacing( 0 )\n\n main_layout.addWidget( self.previewArea )\n main_layout.addLayout( editing_layout )\n main_layout.setStretchFactor( self.previewArea, 1 )\n\n self.centralWidget = QGroupBox()\n self.centralWidget.setLayout( main_layout )\n\n self.setCentralWidget( self.centralWidget )", "def define_panel_structure(self):\n w, h = self.parent.GetSize()\n self.vbox = wx.BoxSizer(wx.VERTICAL)\n self.sizer1 = wx.BoxSizer(wx.VERTICAL)\n self.sizer1.SetMinSize(wx.Size(w/13, h*2/5))\n\n self.sizer2 = wx.BoxSizer(wx.VERTICAL)\n self.sizer3 = wx.FlexGridSizer(9, 2, 4, 1)\n self.sizer4 = wx.BoxSizer(wx.VERTICAL)\n self.sizer5 = wx.BoxSizer(wx.VERTICAL)\n\n self.vbox.Add(self.sizer5, 0, wx.EXPAND | wx.ALL, 1)\n self.vbox.Add(self.sizer1, 1, wx.EXPAND | wx.ALL, 0)\n self.vbox.Add(self.sizer2, 0, wx.EXPAND | wx.ALL, 1)\n self.vbox.Add(self.sizer3, 0, wx.EXPAND | wx.ALL, 10)\n # self.vbox.Add(self.sizer4, 0, wx.EXPAND|wx.ALL,5)\n\n self.SetSizer(self.vbox)", "def _create_right_name_frame(self, headers):\n\n self.frames.append(tk.LabelFrame(self.master))\n self.labels.append([])\n\n for i, name in enumerate(headers):\n self.labels[1].append(tk.Label(self.frames[1], text=name,\n relief=\"sunken\"))\n self.labels[1][i].grid(column=i, row=0, sticky=\"ew\")\n self.frames[1].columnconfigure(i, weight=1, minsize=60)\n self.frames[1].grid(column=2, row=0, sticky=\"ew\", padx=10, pady=10)", "def __init__(self, *args):\n this = _libsbml.new_LayoutExtension(*args)\n try: self.this.append(this)\n except: self.this = this", "def setup_64_bit_display_1(self):\n main_frame = Gtk.Frame(label=\"Main Frame 0\")\n main_frame.set_label_align(0.1,0.5)\n main_frame.get_style_context().add_class(\"frame_main\")\n \n self.main_frame_list.append(main_frame)\n \n # Increment placement on window grid based on length of mainframe\n position = len(self.main_frame_list)\n self.grid.attach(self.main_frame_list[-1], 0,position,1,1) \n \n # Grid_frame. Grid for the 16 x nibble frames\n grid_frame = Gtk.Grid()\n self.main_frame_list[-1].add(grid_frame) \n \n # Temp lists. These get wiped out. Only good for the current pass \n frame_nibble_list = []\n grid_nibble_list = []\n frame_bit_list = [] \n button_bit_list = []\n s = \"\"\n \n for i in range(2): # rows 2**5 = 32\n # i is reserved for producing two rows\n for j in range(8): # nibbles per row 2**2 = 4\n # Create the 16 nibble frames and place in frame_nibble_list\n frame_nibble = Gtk.Frame(label=str(i*8+j).zfill(2))\n frame_nibble.get_style_context().add_class(\"frame_nibble\")\n frame_nibble.set_label_align(0.5,0.5)\n frame_nibble_list.append(frame_nibble)\n\n # Place a grid in each of the 16 nibble frame\n grid_nibble = Gtk.Grid()\n grid_nibble_list.append(grid_nibble)\n frame_nibble_list[i*8+j].add(grid_nibble_list[i*8+j])\n\n # Attach the nibble frames (and their nibble grids) into the grid frame.\n # Two rows, bottom 0 to 7 and top 8 to 15 \n if i*8+j < 8:\n grid_frame.attach(frame_nibble_list[i*8+j], 8-j,1,1,1)\n else:\n grid_frame.attach(frame_nibble_list[i*8+j], 8-j,0,1,1) \n \n for k in range(4): # bit per frame i*32+j*4+k\n #Create the 4 bit frames to insert into grid_nibbles in each frame_nibble\n frame_bit = Gtk.Frame(label=str(i*32+j*4+k).zfill(2))\n #print(i*32+j*4+k)\n frame_bit.get_style_context().add_class(\"frame_bit\")\n frame_bit.set_label_align(0.5,0.5)\n \n # Add the IEEE 754 colouring for sign, exponent and fraction.\n if i*32+j*4+k <= 51:\n frame_bit.get_style_context().add_class(\"colour_0\")\n elif i*32+j*4+k >= 52 and i*32+j*4+k <= 62:\n frame_bit.get_style_context().add_class(\"colour_1\") \n else:\n frame_bit.get_style_context().add_class(\"colour_2\")\n \n # Button bits. Add into each frame bit. Label set to 0.\n button_bit = Gtk.Button(label=\"0\")\n button_bit.get_style_context().add_class(\"button_bit\")\n button_bit.connect(\"clicked\", self.cb_button_bit, i*32+j*4+k) \n button_bit_list.append(button_bit)\n \n # Add the button bits to their frames\n frame_bit.add(button_bit_list[i*32+j*4+k]) \n frame_bit_list.append(frame_bit) \n \n # Bottom row 0 to 31 and top row 32 to 63\n if not i: \n grid_nibble_list[i*8+j].attach(frame_bit_list[i*32+j*4+k], \n 32-i*32+j*4-k,1,1,1)\n else:\n grid_nibble_list[i*8+j].attach(frame_bit_list[i*32+j*4+k], \n 64-i*32+j*4-k,0,1,1) \n\n # Permanent lists. Plus, self.main_frame_list\n self.main_button_bit_list.append(button_bit_list) \n self.main_frame_nibble_list.append(frame_nibble_list)\n self.main_frame_bit_list.append(frame_bit_list)", "def create_layout(layout_list):\n qt_button_layout = QGridLayout()\n\n for row in range(len(layout_list)):\n for column in range(len(layout_list[row])):\n user_input_object = layout_list[row][column]\n qt_button_layout.addWidget(user_input_object, row, column, 1, 1)\n\n return qt_button_layout", "def _initialize_widgets(self):\n self.outer_board = [[Frame(self.root, bd = self.FRAME_BORDER_WIDTH, \n relief = self.FRAME_RELIEF) \n for _ in range(self.BOARD_DIM)] \n for _ in range(self.BOARD_DIM)]\n self.inner_boards = [[self._generate_inner_board(r, c) \n for c in range(self.BOARD_DIM)]\n for r in range(self.BOARD_DIM)]", "def __init__(self,\n num_heads=8,\n seq_len=1024,\n block=16,\n different_layout_per_head=False):\n super().__init__(num_heads, seq_len, block, different_layout_per_head)\n self.make_layout()", "def create_containers(self):\r\n self.container_widgets.update({\r\n \"main_frame\": tk.Frame(master=self)})\r\n self.container_widgets.update({\r\n \"panel_frame\": tk.Frame(master=self.container_widgets[\"main_frame\"]),\r\n \"order_frame\": tk.Frame(master=self.container_widgets[\"main_frame\"],\r\n width=const.ORDER_FRAME_SIZE[\"width\"],\r\n height=const.ORDER_FRAME_SIZE[\"height\"])})\r\n self.container_widgets.update({\r\n \"order_canvas\": tk.Canvas(master=self.container_widgets[\"order_frame\"])})\r\n self.container_widgets.update({\r\n \"orders_container\": tk.Frame(master=self.container_widgets[\"order_canvas\"]),\r\n \"orders_scrollbar\": tk.Scrollbar(master=self.container_widgets[\"order_frame\"],\r\n orient=\"vertical\",\r\n command=self.container_widgets[\"order_canvas\"].yview)\r\n })", "def create_grid(self, main_frame: tk.LabelFrame) -> None:\n for square_row in range(3):\n for square_column in range(3):\n square = tk.Frame(main_frame, highlightbackground='black', highlightcolor='red',\n highlightthickness=1, width=120, heigh=120, padx=0)\n square.grid(row=square_row, column=square_column)\n self.create_cells_and_entries(square, square_row)\n return None", "def do_layout(self):\n self.define_panel_structure()\n self.layout_selection()\n self.layout_data_list()\n self.layout_batch()\n self.layout_button()", "def __init__(self, size: int):\n self.size = size\n self.window = []", "def __init__(self):\n super(GraphVisualizer, self).__init__()\n\n self._layout = QGridLayout()\n self.setLayout(self._layout)\n\n self._next_column = 0\n\n self._columns = []", "def create_layout( self ):\n\n # XXX: debugging layout\n self.setStyleSheet( \"border: 1px solid black\" )\n\n selection_layout = QVBoxLayout()\n selection_layout.setContentsMargins( 0, 0, 0, 0 )\n selection_layout.setSpacing( 0 )\n selection_layout.addWidget( self.selectionView )\n\n selection_type_layout = QHBoxLayout()\n selection_type_layout.setContentsMargins( 0, 0, 0, 0 )\n selection_type_layout.setSpacing( 0 )\n selection_type_layout.addWidget( self.selectionBoxLabel )\n selection_type_layout.addWidget( self.selectionBox )\n selection_type_layout.setStretchFactor( self.selectionBox, 1 )\n\n selection_layout.addLayout( selection_type_layout )\n selection_layout.setStretchFactor( self.selectionView, 1 )\n\n info_layout = QVBoxLayout()\n info_layout.setContentsMargins( 0, 0, 0, 0 )\n info_layout.setSpacing( 0 )\n\n stats_layout = QGridLayout()\n stats_layout.setContentsMargins( 0, 0, 0, 0 )\n stats_layout.setVerticalSpacing( 1 )\n stats_layout.setHorizontalSpacing( 10 )\n\n stats_layout.addWidget( QLabel( \"State:\" ),\n 0, 0 )\n stats_layout.addWidget( self.infoStateLabel,\n 0, 1 )\n\n stats_layout.addWidget( QLabel( \"Art Records:\" ),\n 1, 0 )\n stats_layout.addWidget( self.infoSummaryLabel,\n 1, 1 )\n\n stats_layout.addWidget( QLabel( \"Location:\" ),\n 2, 0 )\n stats_layout.addWidget( self.infoLocationLabel,\n 2, 1 )\n\n stats_layout.addWidget( QLabel( \"Taken:\" ),\n 3, 0 )\n stats_layout.addWidget( self.infoTakenLabel,\n 3, 1 )\n\n stats_layout.addWidget( QLabel( \"Tags:\" ),\n 4, 0 )\n stats_layout.addWidget( self.infoTagsLabel,\n 4, 1 )\n\n stats_layout.setColumnStretch( 1, 1 )\n\n info_layout.addWidget( self.previewArea )\n info_layout.addLayout( stats_layout )\n info_layout.setStretchFactor( self.previewArea, 1 )\n\n self.centralWidget = QWidget()\n self.centralWidget.setLayout( info_layout )\n\n self.selection_dock.widget().setLayout( selection_layout )\n\n self.addDockWidget( Qt.LeftDockWidgetArea, self.selection_dock )\n\n self.setCentralWidget( self.centralWidget )", "def elements_page(self):\n self.list_frame = tk.Frame(self.page)\n self.data_frame = tk.Frame(self.page)\n p2_label_2 = ttk.Label(self.page, text=self.lang.VP_CON_DDBB, font=FONT_BENVINGUDA)\n p2_button_1 = ttk.Button(self.page, text=self.lang.BACK, command=self.tornar_main)\n self.crear_elements_viewer()\n self.page.grid(row=0, column=0, sticky=\"NESW\")\n p2_label_2.pack(pady=20)\n p2_button_1.pack(pady=0, ipadx=15)\n self.list_frame.pack()\n self.data_frame.pack()\n self.p2_frame_list.pack(pady=15, padx=5, expand=False, side=tk.LEFT)\n self.evo_frame.pack(pady=15, padx=5, expand=False, side=tk.RIGHT)\n self.p2_frame_list_2.pack(pady=15, padx=5, expand=False, side=tk.RIGHT)\n self.p2_frame_list_1.pack(pady=15, padx=5, expand=False, side=tk.RIGHT)\n self.p2_frame_elements.pack(pady=0, padx=10, side=tk.LEFT)\n self.p2_frame_img.grid(row=1, column=1, pady=5, padx=20, sticky=\"w\")\n self.p2_frame_metadata.grid(row=1, column=2, pady=20, padx=20, sticky=\"w\")", "def revert_frames(self, n_frames: int = 25) -> None:\n if self.__layout.count() > 0 and self.__trajectory_writer is not None and self.mol_widget is not None:\n self.__trajectory_writer.pause()\n traj = self.__trajectory_writer.get_trajectory()\n n_back = min(n_frames, len(traj) - 1)\n wanted_frame = traj[-n_back]\n atoms = utils.AtomCollection(traj.elements, wanted_frame)\n # delete all frames after the wanted one\n # slices not supported in python bindings of delitem of trajectory when writing this\n for _ in range(n_back):\n del traj[-1]\n self.mol_widget.update_molecule(atoms=atoms)\n self.__trajectory_writer.unpause()", "def adjustFrame(self, shape):\n self.ht = shape[0]\n self.wd = shape[1]\n self.outframe = np.zeros((self.ht, self.wd, 3), np.uint8)\n self.hasSize = True", "def crear_elements_viewer(self):\n\n self.p2_frame_list = tk.Frame(self.list_frame, borderwidth=2, relief=\"groove\")\n self.p2_label_info = ttk.Label(self.p2_frame_list, text=self.lang.VP_PAC_ID, font=FONT_TITOL)\n self.p2_label_info.pack()\n scrollbar = tk.Scrollbar(self.p2_frame_list)\n scrollbar.pack(side=tk.RIGHT, fill=tk.Y)\n self.llista = tk.Listbox(self.p2_frame_list, yscrollcommand=scrollbar.set, width=15, height=7)\n self.llista.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)\n scrollbar.config(command=self.llista.yview)\n\n self.p2_frame_list_1 = tk.Frame(self.list_frame, borderwidth=2, relief=\"groove\")\n self.p2_label_info_1 = ttk.Label(self.p2_frame_list_1, text=self.lang.VP_LOC, font=FONT_TITOL)\n self.p2_label_info_1.pack()\n scrollbar_1 = tk.Scrollbar(self.p2_frame_list_1)\n scrollbar_1.pack(side=tk.RIGHT, fill=tk.Y)\n self.llista_1 = tk.Listbox(self.p2_frame_list_1, yscrollcommand=scrollbar_1.set, width=15, height=7)\n self.llista_1.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)\n scrollbar_1.config(command=self.llista_1.yview)\n\n self.p2_frame_list_2 = tk.Frame(self.list_frame, borderwidth=2, relief=\"groove\")\n self.p2_label_info_2 = ttk.Label(self.p2_frame_list_2, text=self.lang.VP_DATE, font=FONT_TITOL)\n self.p2_label_info_2.pack()\n scrollbar_2 = tk.Scrollbar(self.p2_frame_list_2)\n scrollbar_2.pack(side=tk.RIGHT, fill=tk.Y)\n self.llista_2 = tk.Listbox(self.p2_frame_list_2, yscrollcommand=scrollbar_2.set, width=15, height=7)\n self.llista_2.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)\n scrollbar_2.config(command=self.llista_2.yview)\n\n self.evo_frame = tk.Frame(self.list_frame)\n self.evo_button = ttk.Button(self.evo_frame, text=self.lang.VP_EVO, command=self.evo_selected)\n self.evo_button.pack()\n self.evo_button.pack_forget()\n\n\n self.p2_frame_elements = tk.Frame(self.data_frame, borderwidth=2, relief=\"groove\")\n self.p2_frame_img = tk.Frame(self.p2_frame_elements)\n self.p2_frame_metadata = tk.Frame(self.p2_frame_elements, width = 20)\n self.p2_label_metadata_code = tk.Label(self.p2_frame_metadata, text=\"\", font=FONT_MSG, width= 15, anchor=\"w\")\n self.p2_label_metadata_code.pack(pady=5)\n self.p2_label_metadata_grade = tk.Label(self.p2_frame_metadata, text=\"\", font=FONT_MSG, width=15, anchor=\"w\")\n self.p2_label_metadata_grade.pack(pady=5)\n self.p2_label_metadata_cm = ttk.Label(self.p2_frame_metadata, text=\"\", font=FONT_MSG, width=15, anchor=\"w\")\n self.p2_label_metadata_cm.pack(pady=5)\n self.assemble_img_frame()", "def clone(self):", "def addComponents(self):\n\n self.mainLayout = QVBoxLayout()\n self.setLayout(self.mainLayout)\n # title\n self.lblTitle = QLabel(self.title)\n self.mainLayout.addWidget(self.lblTitle)\n styleTitle = \"\"\"\nfont-size: 20px; \nfont-style:italic; \nfont-weight: bold; \nmargin:auto;\nmargin-bottom: 1px; \n\"\"\"\n self.lblTitle.setStyleSheet(styleTitle)\n\n # controls\n self.widgetControls = QWidget()\n self.layoutControls = QGridLayout()\n # self.layoutControls.setColumnStretch(0, 4)\n # self.layoutControls.setColumnStretch(1, 4)\n # self.layoutControls.setColumnStretch(2, 4)\n\n self.widgetControls.setLayout(self.layoutControls)\n self.mainLayout.addWidget(self.widgetControls)\n\n # buttons\n styleControls = \"\"\"\n width: 60px; \n height: 50px; \n \"\"\"\n self.buttons = []\n for i in range(self.shapeRow):\n self.buttons.append(self.generateColumnButtons())\n\n for i in range(self.shapeRow):\n for j in range(self.shapeColumn):\n self.buttons[i][j].setStyleSheet(styleControls)\n self.layoutControls.addWidget(self.buttons[i][j], i, j)", "def createFrame(self):\n \n self.outerFrame = f = Tk.Frame(self.frame)\n f.pack(expand=1,fill=\"both\")\n \n if self.label:\n labf = Tk.Frame(f)\n labf.pack(pady=2)\n lab = Tk.Label(labf,text=self.label)\n lab.pack()\n \n f2 = Tk.Frame(f)\n f2.pack(expand=1,fill=\"both\")\n \n self.box = box = Tk.Listbox(f2,height=20,width=30)\n box.pack(side=\"left\",expand=1,fill=\"both\")\n \n bar = Tk.Scrollbar(f2)\n bar.pack(side=\"left\", fill=\"y\")\n \n bar.config(command=box.yview)\n box.config(yscrollcommand=bar.set)", "def build_frames(dialbox):\n #Buttons Frame\n dialbox.button_frame = tk.Frame(dialbox.master_frame)\n dialbox.button_frame.grid(row=3, column=1)\n #Output Frame\n dialbox.output_frame = tk.Frame(dialbox.master_frame)\n dialbox.output_frame.grid(row=4, column=0, columnspan=2)", "def setFrameThickness(n=1):\n dislin.frame(n)", "def createFrame(self, module, name):\n if name not in self.data.frames:\n display = module(self.container, self)\n display.grid(row=0, column=0, sticky=\"nsew\")\n self.data.frames[name] = display", "def _create_left_name_frame(self, headers):\n self.frames.append(tk.LabelFrame(self.master))\n self.labels.append([])\n\n for i, name in enumerate(headers):\n self.labels[0].append(tk.Label(self.frames[0], text=name,\n relief=\"groove\"))\n self.labels[0][i].grid(column=i, row=0, sticky=\"ew\")\n self.frames[0].columnconfigure(i, weight=1, minsize=100)\n\n # For .grid one must modify their positions by referencing\n # their parents. Here: the LabelFrame is self.frames[0], and\n # in order to modify the positions, etc. of the Labels *IN*\n # the LabelFrame, one must modify the relevant coordinates in\n # the LabelFrame, not by referencing the Labels:\n # x = Label(parent, ...) {parent=LabelFrame}\n # x.grid(column_in_parent, row_in_parent)\n\n self.frames[0].grid(column=0, row=0, sticky=\"ew\", columnspan=1, padx=10, pady=10)", "def _create_left_info_frame(self, headers):\n\n self.frames.append(tk.Frame(self.master))\n self.entries.append([])\n\n for column, name in enumerate(headers):\n self.frames[2].columnconfigure(column, weight=1)\n self.user_values[name] = []\n for row in range(5):\n self.user_values[name].append(tk.StringVar())\n self.entries[0].append(tk.Entry(self.frames[2],\n textvariable=self.\n user_values[name][row]))\n self.entries[0][(column*5)+row].grid(column=column,\n row=row, sticky=\"nesw\", pady=10)\n self.frames[2].rowconfigure(row, weight=1, minsize=50)\n self.frames[2].grid(column=0, row=1, sticky=\"ew\", padx=10, pady=5)", "def init_layout(self):\n pass", "def layout_board(self):\n raise NotImplementedError", "def __update_diagrams(self):\n for diagram in self.__list:\n diagram.resize(280, 350)\n self.__group_layout.addWidget(diagram, 10, Qt.AlignTop)\n if self.__diagram_count == 1:\n self.add_stretch()\n self.__button_layout.insertWidget(0, self.__start_button)", "def _add_frame(self):\n w = QtWidgets.QWidget(self)\n self.layout().addWidget(w)\n w.setSizePolicy(Policy.Expanding, Policy.Maximum)\n w.setLayout(QtWidgets.QHBoxLayout())\n w.layout().setContentsMargins(0, 0, 0, 0)\n w.layout().setSpacing(0)\n return w", "def new_board(n: int) -> Board:\n\n return [[0 for _ in range(n)] for _ in range(n)]", "def __init__(self, parent=None, name=None, f=0):\n QFrame.__init__(self, parent, name, f)\n \n self.setAcceptDrops(1)\n self.Layout = QHBoxLayout(self,0,0,\"poolLayout\")\n \n self.backplate = QFrame(self, 'backplate')\n self.backplate.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n self.Layout.addWidget(self.backplate)\n \n self.trackFrames = []", "def create_layout( self ):\n # highlight all of our widgets so we can debug layouts.\n # XXX: debugging support.\n self.setStyleSheet( \"border: 1px solid black\" )\n\n # vertical layout of the photo preview and everything else.\n main_layout = QVBoxLayout()\n main_layout.setContentsMargins( 0, 0, 0, 0 )\n main_layout.setSpacing( 0 )\n\n # horizontal layout of everything else\n horizontal_layout = QHBoxLayout()\n horizontal_layout.setContentsMargins( 0, 0, 0, 0 )\n horizontal_layout.setSpacing( 0 )\n\n # vertical layout for the selection and the selection type.\n selection_layout = QVBoxLayout()\n selection_layout.setContentsMargins( 0, 0, 0, 0 )\n selection_layout.setSpacing( 0 )\n\n # selection type label/combo box.\n selection_type_layout = QHBoxLayout()\n selection_type_layout.setContentsMargins( 0, 0, 0, 0 )\n selection_type_layout.setSpacing( 0 )\n selection_type_layout.addWidget( self.selectionBoxLabel )\n selection_type_layout.addWidget( self.selectionBox )\n\n # art record creation/deletion buttons.\n record_modification_layout = QHBoxLayout()\n record_modification_layout.setContentsMargins( 0, 0, 0, 0 )\n record_modification_layout.setSpacing( 0 )\n record_modification_layout.addWidget( self.newRecordButton )\n record_modification_layout.addWidget( self.deleteRecordButton )\n\n selection_layout.addWidget( self.selectionView )\n selection_layout.addLayout( selection_type_layout )\n selection_layout.addLayout( record_modification_layout )\n selection_layout.setStretchFactor( self.selectionView, 1 )\n\n # selected art record information and photo record editing widgets.\n info_and_edit_layout = QGridLayout()\n info_and_edit_layout.setContentsMargins( 0, 0, 0, 0 )\n info_and_edit_layout.setVerticalSpacing( 0 )\n info_and_edit_layout.setHorizontalSpacing( 2 )\n\n # XXX: the layout of these labels is *awful*. need to fix this.\n art_header_label = QLabel( \"<b>Art Record:</b>\" )\n info_and_edit_layout.addWidget( art_header_label,\n 0, 0, 1, 4 )\n\n info_and_edit_layout.setRowStretch( 0, 1 )\n\n type_label = QLabel( \"Type:\" )\n info_and_edit_layout.addWidget( type_label,\n 1, 0 )\n info_and_edit_layout.addWidget( self.artTypeLabel,\n 1, 1 )\n\n size_label = QLabel( \"Size:\" )\n info_and_edit_layout.addWidget( size_label,\n 2, 0 )\n info_and_edit_layout.addWidget( self.artSizeLabel,\n 2, 1 )\n\n quality_label = QLabel( \"Quality:\" )\n info_and_edit_layout.addWidget( quality_label,\n 3, 0 )\n info_and_edit_layout.addWidget( self.artQualityLabel,\n 3, 1 )\n\n date_label = QLabel( \"Date:\" )\n info_and_edit_layout.addWidget( date_label,\n 4, 0 )\n info_and_edit_layout.addWidget( self.artDateLabel,\n 4, 1 )\n\n info_and_edit_layout.setColumnStretch( 1, 1 )\n\n artists_label = QLabel( \"Artists:\" )\n info_and_edit_layout.addWidget( artists_label,\n 1, 2 )\n info_and_edit_layout.addWidget( self.artArtistsLabel,\n 1, 3 )\n\n associates_label = QLabel( \"Associates:\" )\n info_and_edit_layout.addWidget( associates_label,\n 2, 2 )\n info_and_edit_layout.addWidget( self.artAssociatesLabel,\n 2, 3 )\n\n vandals_label = QLabel( \"Vandals:\" )\n info_and_edit_layout.addWidget( vandals_label,\n 3, 2 )\n info_and_edit_layout.addWidget( self.artVandalsLabel,\n 3, 3 )\n\n tags_label = QLabel( \"Tags:\" )\n info_and_edit_layout.addWidget( tags_label,\n 4, 2 )\n info_and_edit_layout.addWidget( self.artTagsLabel,\n 4, 3 )\n\n info_and_edit_layout.setColumnStretch( 3, 1 )\n\n photo_header_label = QLabel( \"<b>Photo Record:</b>\" )\n info_and_edit_layout.addWidget( photo_header_label,\n 5, 0, 1, 4 )\n\n info_and_edit_layout.setRowStretch( 5, 1 )\n\n info_and_edit_layout.addWidget( self.photoProcessingStateComboLabel,\n 6, 0 )\n info_and_edit_layout.addWidget( self.photoProcessingStateComboBox,\n 6, 1 )\n\n info_and_edit_layout.addWidget( self.photoTagsLabel,\n 7, 0 )\n info_and_edit_layout.addWidget( self.photoTagsLineEdit,\n 7, 1,\n 1, 3 )\n\n horizontal_layout.addLayout( selection_layout )\n horizontal_layout.addLayout( info_and_edit_layout )\n horizontal_layout.setStretchFactor( info_and_edit_layout, 1 )\n\n main_layout.addWidget( self.previewArea )\n main_layout.addLayout( horizontal_layout )\n main_layout.setStretchFactor( self.previewArea, 1 )\n\n self.centralWidget = QWidget()\n self.centralWidget.setLayout( main_layout )\n\n self.setCentralWidget( self.centralWidget )", "def createLayout(self):\n return _libsbml.LayoutModelPlugin_createLayout(self)", "def __init__(self, layout, player):\n self.layout = [x[:] for x in layout] #this state's layout is a copy\n self.height = len(layout[0])\n self.width = len(layout)\n self.who_played = player\n self.score = self._scoring() #score for this board", "def createWidgets(self):\n layout = QHBoxLayout()\n \n self.logsItem = TestsView.TestsView(parent=self, local = self.local)\n \n self.resumeView = ResumeView.TextualView(parent=self)\n if QtHelper.str2bool( Settings.instance().readValue( key = 'TestRun/hide-resume-view' ) ):\n self.hideResumeView()\n\n self.graphView = GraphView.FlowChartView(parent=self)\n self.logsView = TextualView.TextualView2(parent=self)\n self.hexLogsView = DetailedView.DetailedView(parent=self)\n \n self.displayTab = QTabWidget()\n\n hSplitter = QSplitter(self)\n hSplitter.setOrientation(Qt.Vertical)\n\n hSplitter.addWidget( self.resumeView )\n hSplitter.addWidget( self.logsView )\n hSplitter.addWidget( self.hexLogsView )\n\n self.displayTab.addTab(hSplitter, self.tr('Events') )\n self.displayTab.addTab(self.graphView, self.tr('Diagram') )\n \n defaultTab = Settings.instance().readValue( key = 'TestRun/default-tab-run' )\n self.displayTab.setCurrentIndex(int(defaultTab)) \n \n self.currentEdit = QLineEdit()\n self.currentEdit.setReadOnly(True)\n self.currentEdit.setStyleSheet(\"QLineEdit { background-color : #F0F0F0; color: grey; }\")\n\n leftFrame = QFrame()\n leftLayout = QVBoxLayout()\n leftLayout.setContentsMargins(0, 0, 0, 0) \n leftFrame.setLayout(leftLayout)\n\n leftLayout.addWidget(self.currentEdit)\n leftLayout.addWidget(self.displayTab)\n\n v_splitter = QSplitter(self) \n v_splitter.addWidget( self.logsItem )\n v_splitter.addWidget( leftFrame )\n v_splitter.setStretchFactor(1, 1)\n\n layout.addWidget(v_splitter)\n \n self.setLayout(layout)", "def init_layout(self):\n\n # create hbox layout (upper horizontal box).\n upper_hbox = QHBoxLayout()\n upper_hbox.setContentsMargins(0, 0, 0, 0)\n\n # set widgets to the hbox layout\n upper_hbox.addWidget(self.newFileBtn)\n upper_hbox.addWidget(self.openVideoBtn)\n upper_hbox.addWidget(self.openAnnotationBtn)\n upper_hbox.addWidget(self.saveBtn)\n upper_hbox.addWidget(self.HelpBtn)\n\n upper_hbox.addItem(self.spacerItem1)\n\n upper_hbox.addWidget(self.radioLabel)\n upper_hbox.addWidget(self.zoomRadio)\n upper_hbox.addWidget(self.wideRadio)\n\n upper_hbox.addItem(self.spacerItem2)\n upper_hbox.addWidget(self.resetBtn)\n upper_hbox.addWidget(self.errorLabel)\n\n\n # ---------------------------------------------------------------------------------------\n\n # create hbox layout (middle horizontal box).\n middle_hbox = QHBoxLayout()\n middle_hbox.setContentsMargins(0, 0, 0, 0)\n \n # set widgets to the hbox layout\n middle_hbox.addWidget(self.canvas)\n middle_hbox.addItem(self.spacerItem3)\n middle_hbox.addWidget(self.container)\n\n # ---------------------------------------------------------------------------------------\n\n # create hbox layout (lower horizontal box).\n lower_hbox = QHBoxLayout()\n lower_hbox.setContentsMargins(0, 0, 0, 0)\n\n # set widgets to the hbox layout\n lower_hbox.addWidget(self.playBtn)\n lower_hbox.addWidget(self.stopBtn)\n lower_hbox.addWidget(self.recordLabel)\n lower_hbox.addWidget(self.checkbox)\n lower_hbox.addWidget(self.speedComboLabel)\n lower_hbox.addWidget(self.speedCombo)\n lower_hbox.addWidget(self.slider)\n lower_hbox.addWidget(self.durationLabel)\n lower_hbox.addWidget(self.lengthLabel)\n\n\n # ---------------------------------------------------------------------------------------\n\n # create vbox layout (vertical box)\n vboxLayout = QVBoxLayout()\n vboxLayout.addLayout(upper_hbox)\n vboxLayout.addLayout(middle_hbox)\n vboxLayout.addLayout(lower_hbox)\n\n #self.setLayout(vboxLayout)\n self.setLayout(vboxLayout)", "def clone(self):\n \n return TTTBoard(self.dim, self.reverse, self.board)", "def _reshape(self):\n self._frame._setup(width=self._width,height=self._height,\n startx=self._x,starty=self._y)", "def create_right_left_containers(self) -> None:\n self.frame_left = tk.Frame(self, borderwidth=5, relief=tk.GROOVE)\n self.frame_left.grid(row=0, column=0)\n # self.window_left_info = None\n self.frame_right = tk.Frame(self, borderwidth=5, relief=tk.GROOVE)\n self.frame_right.grid(row=0, column=1)", "def _create_button_frame(self):\n\n self.frames.append(tk.Frame(self.master))\n self.row_buttons.append([])\n\n for row in range(5):\n self.row_buttons[0].append(tk.Button(self.frames[3],\n text=\"Go\", height=2, width=4))\n self.row_buttons[0][row].grid(column=0, row=row)\n self.frames[3].rowconfigure(row, weight=1, minsize=50)\n\n self.frames[3].grid(column=1, row=1, sticky=\"ew\")\n self.frames[3].columnconfigure(0, weight=1)", "def gui_layout_edit(self) -> List[List[sg.Element]]:\n\n return []", "def _naive_layout(self) -> List[Tuple[int, int]]:\n positions = []\n width = self.parent_component.width\n height = self.parent_component.height\n padding = self.parent_component.padding\n for child in self.parent_component.children:\n cwidth, cheight = child.width, child.height\n props = child.props\n top = 0\n left = 0\n # handle horizontal and vertical alignments\n if props.h_alignment is HAlignment.LEFT:\n left = child.margin[0] + padding[0]\n elif props.h_alignment is HAlignment.CENTER:\n left = width / 2 - cwidth / 2\n elif props.h_alignment is HAlignment.RIGHT:\n left = width - (child.margin[1] + cwidth + padding[1])\n \n if props.v_alignment is VAlignment.TOP:\n top = child.margin[2] + padding[2]\n elif props.v_alignment is VAlignment.CENTER:\n top = height / 2 - cheight / 2\n elif props.v_alignment is VAlignment.BOTTOM:\n top = height - (child.margin[3] + cheight + padding[3])\n \n offset = child.offset\n positions.append((left + offset[0], top + offset[1]))\n return positions", "def layout(self):\n pass", "def initUI(self):\n # Set the main layout component.\n self.mainLayout = QVBoxLayout()\n\n if(self.__itemListSize > 0):\n self.mainLayout.setSpacing(0)\n self.mainLayout.setContentsMargins(0, 0, 0, 0)\n\n # Build the loop for QHBoxLayout creation.\n for y in range(self.__ySize):\n # Creating the horizontal layout for X.\n horizontalLayout = QHBoxLayout()\n horizontalLayout.setSpacing(0)\n horizontalLayout.setSizeConstraint(QLayout.SetMaximumSize)\n horizontalLayout.setContentsMargins(0, 0, 0, 0)\n\n for x in range(self.__xSize):\n itemCount = x + y * self.__xSize\n\n if(itemCount < self.__itemListSize):\n # Create the widget.\n item = self.__itemList[itemCount]\n \n else:\n # Add empty string if no item available to keep grid.\n item = QLabel(\"\")\n \n item.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n item.setMinimumWidth(self.__parentGeometry.width()/self.__xSize - 10)\n horizontalLayout.addWidget(item)\n \n self.mainLayout.addLayout(horizontalLayout)\n \n else:\n self.textDisplay = QLabel(self.__emptyLabel)\n self.mainLayout.addWidget(self.textDisplay)\n\n # Set main layout to the window.\n self.setLayout(self.mainLayout)", "def create_sat_panels(self):\n\n # Dimensions of body\n SAT_SIZE = self.ANI_SCALE*self.SAT_SCALE*np.asarray(self.SAT_PROPS[\"Size\"])/2\n bx = SAT_SIZE[0]\n by = SAT_SIZE[1]\n bz = SAT_SIZE[2]\n\n # Panel length\n L = bx\n\n # Panels\n theta = self.PANEL_ANGLE*pi/180\n px1 = bx - L*sin(theta)\n py1 = by + L*cos(theta)\n pz1 = bz\n px2 = px1 + L*sin(theta)\n py2 = py1 + L*cos(theta)\n pz2 = pz1\n\n # Vertices\n V = [\n (-bx, by, -bz),\n (-bx, by, bz),\n (-px1, py1, pz1),\n (-px1, py1, -pz1),\n (-px1, py1, -pz1),\n (-px1, py1, pz1),\n (-px2, py2, pz2),\n (-px2, py2, -pz2),\n (-bx, -by, -bz),\n (-bx, -by, bz),\n (-px1, -py1, pz1),\n (-px1, -py1, -pz1),\n (-px1, -py1, -pz1),\n (-px1, -py1, pz1),\n (-px2, -py2, pz2),\n (-px2, -py2, -pz2)\n ]\n\n # Create faces\n F = [\n (0, 1, 2, 3),\n (4, 5, 6, 7),\n (8, 9, 10, 11),\n (12, 13, 14, 15)\n ]\n\n # Create building blocks of polydata\n sat = vtk.vtkPolyData()\n points = vtk.vtkPoints()\n polys = vtk.vtkCellArray()\n scalars = vtk.vtkFloatArray()\n\n # Load the point, cell and data attributes\n for i in range(len(V)):\n points.InsertPoint(i, V[i])\n for i in range(len(F)):\n polys.InsertNextCell(self.mkVtkIdList(F[i]))\n for i in range(len(V)):\n scalars.InsertTuple1(i, i)\n \n # Assign the pieces to the vtkPolyData.\n sat.SetPoints(points)\n del points\n sat.SetPolys(polys)\n del polys\n sat.GetPointData().SetScalars(scalars)\n del scalars\n\n # Mapper\n mapper = vtk.vtkPolyDataMapper()\n mapper.SetInputData(sat)\n mapper.ScalarVisibilityOff()\n\n # Actor\n actor = vtk.vtkActor()\n actor.SetMapper(mapper)\n actor.GetProperty().SetColor(0., 0., 0.8)\n actor.GetProperty().SetAmbient(0.5)\n actor.GetProperty().SetSpecular(.5)\n actor.GetProperty().SetSpecularPower(10.0)\n actor.GetProperty().SetDiffuse(0.2)\n\n # Move to sat position\n actor.SetPosition(0, 0, -self.SAT_PROPS[\"Alt\"])\n\n return actor", "def changement_frame(self):\n\n for widget in self.fenetre_scores.winfo_children():\n widget.pack_forget()\n\n for widget in self.fenetre_regles.winfo_children():\n widget.pack_forget()\n\n for widget in self.frame_jeu.winfo_children():\n widget.pack_forget()\n\n for widget in self.winfo_children():\n if widget != self.titre:\n widget.pack_forget()", "def layout(self, width, height):\n raise NotImplementedError", "def relayout(self): \n\t\t#self.urmaswin.Layout()\n\t\t#wx.CallAfter(self.urmaswin.Layout)\n\t\t#wx.CallAfter(self.visualizer.OnSize)", "def repeat(self, n, keys=None, axis=1, wrap_kwargs=None):\n repeated = reshape_fns.repeat(self._obj, n, axis=axis)\n if keys is not None:\n if axis == 1:\n new_columns = index_fns.combine_indexes(self.wrapper.columns, keys)\n return repeated.vbt.wrapper.wrap(\n repeated.values, **merge_dicts(dict(columns=new_columns), wrap_kwargs))\n else:\n new_index = index_fns.combine_indexes(self.wrapper.index, keys)\n return repeated.vbt.wrapper.wrap(\n repeated.values, **merge_dicts(dict(index=new_index), wrap_kwargs))\n return repeated", "def __init__(self, size):\n self.size = size\n self.n = 0\n self.elements = [[0, None] for i in range(size+1)]\n self.positions = [0 for i in range(size+1)]", "def create_layout_grid(self, convolve, first_kernel):\n\n layout_grid = QVBoxLayout()\n\n for i in range(3):\n layout_column = QHBoxLayout()\n\n for j in range(3):\n sb_kernel_cell = QSpinBox()\n sb_kernel_cell.setMinimum(-100)\n sb_kernel_cell.setMaximum(100)\n sb_kernel_cell.setAlignment(Qt.AlignCenter)\n\n if first_kernel:\n sb_kernel_cell.setValue(self.kernel1_values[i][j])\n else:\n sb_kernel_cell.setValue(self.kernel2_values[i][j])\n\n sb_kernel_cell.valueChanged.connect(\n lambda value, index=(i, j), first=first_kernel: convolve.update_kernel_value(index, value, first)\n )\n\n layout_column.addWidget(sb_kernel_cell)\n\n widget = QWidget()\n widget.setLayout(layout_column)\n layout_grid.addWidget(widget)\n\n return layout_grid", "def repeat(self, n, new_axis, domain=None):\n if new_axis is None:\n new_axis = self.get_new_axis_name()\n\n return stack_cuboids([self.copy() for i in xrange(n)], new_axis, domain)", "def _split_vars(self, n_row):\n self.embedding_row_ = self.embedding_[:n_row]\n self.embedding_col_ = self.embedding_[n_row:]\n self.embedding_ = self.embedding_row_\n return self", "def clone(self):\n return _libsbml.Port_clone(self)", "def __clone_layout_placeholder(self, layout_ph):\n id = self.__next_shape_id\n ph_type = layout_ph.type\n orient = layout_ph.orient\n shapename = self.__next_ph_name(ph_type, id, orient)\n\n sp = self.__new_placeholder_sp(layout_ph, id, ph_type, orient,\n shapename)\n self.__spTree.append(sp)\n shape = Shape(sp)\n self.__shapes.append(shape)\n return shape", "def initialize(self, frame):\n self.grid_size = 5\n\n Label(frame, text=\"Grid Size:\").grid(row=0)\n\n self.e1 = Scale(frame, from_=self.grid_size, to=25, orient=HORIZONTAL)\n self.e1.grid(row=0, column=1)\n\n return self.e1", "def make_tab(self, root):\n self.frame = Frame(root)\n self.make_entry(self.frame)\n self.make_display(self.frame)\n return self.frame", "def next(self):\n if self.index.get() != len(self.frame_list) - 1:\n for i in range(len(self.frame_list)):\n self.frame_list[i].pack_forget()\n\n self.index.set(self.index.get() + 1)\n self.frame_list[self.index.get()].pack(fill=\"both\", expand=True)\n\n self.work_out_pages()", "def createFrame(self):\n \n tkinterListBoxDialog.createFrame(self)\n \n f = Tk.Frame(self.outerFrame)\n f.pack()\n \n self.addStdButtons(f)", "def _create_frames(self, column, rows):\n pass", "def make(self) -> None:\n\n # arbitrarily selecting the first image from the list, index 0\n with Image.open(self.image_list[0]) as first_frame_image_in_list:\n\n # Find the width and height of the first image of the list.\n # Assuming all the images have same size.\n frame_image_width, frame_image_height = first_frame_image_in_list.size\n\n # scale is the ratio of collage_image_width and product of\n # images_per_row_in_collage with frame_image_width.\n\n # The scale will always lie between 0 and 1, which implies that\n # the images are always going to get downsized.\n scale = (self.collage_image_width) / (\n self.images_per_row_in_collage * frame_image_width\n )\n\n # Calculating the scaled height and width for the frame image.\n scaled_frame_image_width = ceil(frame_image_width * scale)\n scaled_frame_image_height = ceil(frame_image_height * scale)\n\n # Divide the number of images by images_per_row_in_collage. The later\n # was calculated by taking the square root of total number of images.\n number_of_rows = ceil(self.number_of_images / self.images_per_row_in_collage)\n\n # Multiplying the height of one downsized image with number of rows.\n # Height of 1 downsized image is product of scale and frame_image_height\n # Total height is number of rows times the height of one downsized image.\n self.collage_image_height = ceil(scale * frame_image_height * number_of_rows)\n\n # Create an image of passed collage_image_width and calculated collage_image_height.\n # The downsized images will be pasted on this new base image.\n # The image is 0,0,0 RGB(black).\n collage_image = Image.new(\n \"RGB\", (self.collage_image_width, self.collage_image_height)\n )\n\n # keep track of the x and y coordinates of the resized frame images\n i, j = (0, 0)\n\n # iterate the frames and paste them on their position on the collage_image\n for count, frame_path in enumerate(self.image_list):\n\n # Set the x coordinate to zero if we are on the first column\n # If self.images_per_row_in_collage is 4\n # then 0,4,8 and so on should have their x coordinate as 0\n if (count % self.images_per_row_in_collage) == 0:\n i = 0\n\n # open the frame image, must open it to resize it using the thumbnail method\n frame = Image.open(frame_path)\n\n # scale the opened frame images\n frame.thumbnail(\n (scaled_frame_image_width, scaled_frame_image_height), Image.ANTIALIAS\n )\n\n # set the value of x to that of i's value.\n # i is set to 0 if we are on the first column.\n x = i\n\n # It ensures that y coordinate stays the same for any given row.\n # The floor of a real number is the largest integer that is less\n # than or equal to the number. floor division is used because of\n # the zero based indexing, the floor of the division stays same\n # for an entier row as the decimal values are negled by the floor.\n # for the first row the result of floor division is always zero and\n # the product of 0 with scaled_frame_image_height is also zero, they\n # y coordinate for the first row is 0.\n # For the second row the result of floor division is one and the prodcut\n # with scaled_frame_image_height ensures that the y coordinate is\n # scaled_frame_image_height below the first row.\n y = (j // self.images_per_row_in_collage) * scaled_frame_image_height\n\n # paste the frame image on the newly created base image(base image is black)\n collage_image.paste(frame, (x, y))\n frame.close()\n\n # increase the x coordinate by scaled_frame_image_width\n # to get the x coordinate of the next frame. unless the next image\n # will be on the very first column this will be the x coordinate.\n i = i + scaled_frame_image_width\n\n # increase the value of j by 1, this is to calculate the y coordinate of\n # next image. The increased number will be floor divided by images_per_row_in_collage\n # therefore the y coordinate stays the same for any given row.\n j += 1\n\n # save the base image with all the scaled frame images embeded on it.\n collage_image.save(self.output_path)\n collage_image.close()", "def LayoutComponents(self):\n sizer = wx.BoxSizer(wx.VERTICAL)\n\n # Add header\n header = self.FindWindow(\"header\")\n if header is not None:\n sizer.Add(header, 0, wx.EXPAND, 0)\n sizer.Add(self.HorizLine(self), 0, wx.ALL | wx.EXPAND, 0)\n\n # Add content\n content = self.FindWindow(\"content\")\n if content is not None:\n sizer.Add(content, 1, wx.EXPAND, 0)\n else:\n sizer.AddSpacer(1)\n\n # Add action buttons\n actions = self.FindWindow(\"actions\")\n if actions is not None:\n sizer.Add(self.HorizLine(self), 0, wx.ALL | wx.EXPAND, 0)\n # proportion is 0 to ask the sizer to never hide the buttons\n sizer.Add(actions, 0, wx.EXPAND, 0)\n\n # Since Layout doesn't happen until there is a size event, you will\n # sometimes have to force the issue by calling Layout yourself. For\n # example, if a frame is given its size when it is created, and then\n # you add child windows to it, and then a sizer, and finally Show it,\n # then it may not receive another size event (depending on platform)\n # in order to do the initial layout. Simply calling self.Layout from\n # the end of the frame's __init__ method will usually resolve this.\n self.SetSizer(sizer)\n self.Layout()", "def new_frame(self):\n self.eyes_frame = numpy.zeros(self.shape, numpy.uint8)", "def __init__(self, total_length:int, initial_y:int, screen, number:int, debug:bool = False):\n\n #Call the superclass\n super().__init__()\n\n #Create the group of blocks based on x and y and add them to the group\n for k in range(number):\n for i in range(-1,2):\n for j in range(-2,3):\n self.add(Block(total_length * (k+1) // (number+1) + 10*j, initial_y + 10*i, screen, debug = debug))", "def create_mplframe(self):\r\n #create figure and axes objects\r\n self.fig = Figure()\r\n self.subplot = self.fig.add_subplot(111)\r\n #disable axis, because it will only show an image\r\n self.subplot.get_yaxis().set_visible(False)\r\n self.subplot.get_xaxis().set_visible(False)\r\n \r\n #create canvas and toolbar\r\n self.canvas = FigureCanvas(self.fig)\r\n self.toolbar = MyNavigationToolbar(self.canvas, None)\r\n\r\n #add the canvas and toolbar to the gui\r\n self.ui.imageLayout.addWidget(self.canvas)\r\n self.ui.imageLayout.addWidget(self.toolbar)\r\n\r\n #connect the toolbar selection to matploblib as a callback\r\n self.canvas.mpl_connect('selection_changed',self.toolbar_selection)", "def create_open_positions_grid(self):\n\n counter = 0\n col = 0\n row = 0\n\n for i in range(0, 99):\n if counter % 3 == 0:\n col = 0\n row += 1\n self.gp.addWidget(PositionPanel(), row, col)\n counter += 1\n col += 1", "def _build_gui(self):\n vlayout = qt.QVBoxLayout()\n \n box = qt.QHBoxLayout()\n box.addWidget(self._but)\n\n lab = self._lab\n Pol = qt.QSizePolicy\n lab.setSizePolicy(Pol.Expanding, Pol.Preferred)\n lab.setFrameStyle(qt.QLabel.Panel)\n box.addWidget(lab)\n title = u\"Select a mesh from the Salomé object browser\"\n vlayout.addWidget(qt.QLabel(title))\n vlayout.addLayout(box)\n return vlayout", "def create_panel(widgetmatrix, stretch, valign, halign, height, width, widgetsize):\n panel = []\n i = 0\n for widgetlist in widgetmatrix:\n j = 0\n for w in widgetlist:\n w.layout.padding = '10px'\n if (widgetsize[i][j][0] != 0 and widgetsize[i][j][1] != 0):\n w.layout.height = str(widgetsize[i][j][0]) + 'px'\n w.layout.width = str(widgetsize[i][j][1]) + 'px'\n if stretch[i][j] == 1:\n w.layout.flex = '1 0 auto'\n if valign[i][j] != 0:\n if valign[i][j] == 'top':\n w.layout.align_self = 'flex-start'\n if valign[i][j] == 'center':\n w.layout.align_self = 'center'\n if valign[i][j] == 'bottom':\n w.layout.align_self = 'flex-end'\n j += 1\n\n justify = halign[i]\n if justify == 'left':\n justify = 'flex-start'\n elif justify == 'middle':\n justify = 'center'\n elif justify == 'right':\n justify = 'flex-end'\n\n panel.append(HBox(widgetlist, layout=Layout(padding=\"15px\", justify_content=justify)))\n i += 1\n if height == 0 and width == 0:\n display(VBox(panel, layout=Layout(border=\"solid\")))\n else:\n h = str(height) + \"px\"\n w = str(width) + \"px\"\n display(VBox(panel, layout=Layout(border=\"solid\", height=h, width=w)))", "def clones(module, n):\n return nn.ModuleList([copy.deepcopy(module) for _ in range(n)])" ]
[ "0.64056253", "0.64034027", "0.6147005", "0.6052775", "0.5999924", "0.59923536", "0.58694065", "0.5848677", "0.5804697", "0.58001226", "0.5778056", "0.5773629", "0.5630633", "0.55947256", "0.5549548", "0.54448736", "0.54406804", "0.54355043", "0.5418124", "0.5387226", "0.5353704", "0.53511816", "0.5310975", "0.5306682", "0.5263236", "0.52563494", "0.5242236", "0.5231109", "0.5229966", "0.5225119", "0.5212293", "0.5199909", "0.5190505", "0.51891667", "0.51864153", "0.51616246", "0.5159119", "0.5139364", "0.5123899", "0.51102185", "0.5109634", "0.5105513", "0.51049656", "0.50927883", "0.50867146", "0.50859046", "0.5080754", "0.50702524", "0.50593674", "0.50543857", "0.5050489", "0.5034965", "0.5033052", "0.5028522", "0.5021395", "0.5014988", "0.50095654", "0.50033146", "0.49974003", "0.49879465", "0.4980774", "0.49797866", "0.4979672", "0.49729538", "0.49454406", "0.49405468", "0.49389544", "0.49376154", "0.49266276", "0.49208733", "0.49201548", "0.49122563", "0.4905071", "0.48985645", "0.48977363", "0.48933583", "0.48928997", "0.48880446", "0.4886361", "0.48770067", "0.48735526", "0.48730004", "0.48719597", "0.4870281", "0.4869689", "0.48681036", "0.48675215", "0.48645234", "0.48628256", "0.48596957", "0.4855901", "0.48556474", "0.48544234", "0.4853064", "0.48406655", "0.4834583", "0.48330483", "0.4819152", "0.4817186", "0.48148274", "0.4809354" ]
0.0
-1
Playing with spatial convs after transpose convolutions. Recent testing suggests this is the best setup so far. Revisit idea of interleaving spatial convolutions between transpose layers to achieve more cleanly defined shapes.
def decoder_setup_1(): decoder = RetinaDecoder( # pre-pooling {'op': 'avg', 'kernel': (1, 2, 2), 'causal': True}, # grouped temporal conv stacks: [ { 'in': 15, 'out': [45, 45, 15], 'kernel': (2, 1, 1), 'stride': 1, 'groups': 15, 'acivation': nn.ReLU, 'pool': {'op': 'avg', 'kernel': (2, 2, 2), 'causal': True} } ], # spatial conv layers: {in, out, kernel, stride} [ # {'in': 15, 'out': 64, 'kernel': (1, 3, 3), 'stride': 1} ], # for each ConvRNN cell: [ ], # temporal convolution stack(s) [ { 'in': 15, 'out': [128, 256, 128], 'kernel': (2, 3, 3), 'stride': 1, 'groups': 1, 'acivation': nn.ReLU } ], # ConvTranspose layers: {in, out, kernel, stride} [ {'in': 128, 'out': 64, 'kernel': (3, 3, 3), 'stride': (2, 2, 2)}, {'in': 64, 'out': 16, 'kernel': (3, 3, 3), 'stride': (1, 2, 2)}, ], # post conv layers [ {'in': 16, 'out': 8, 'kernel': (1, 3, 3), 'stride': 1}, {'in': 8, 'out': 1, 'kernel': (1, 1, 1), 'stride': 1} ], ) return decoder
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_transposed_conv2d_model(self):\n tf.compat.v1.reset_default_graph()\n\n _ = transposed_conv2d_model()\n\n conn_graph = ConnectedGraph(tf.compat.v1.get_default_graph(), ['input_1'], ['conv2d_transpose/BiasAdd'])\n self.assertEqual(conn_graph.get_all_ops()['conv2d_transpose/conv2d_transpose'].type, 'Conv2DTranspose')", "def schedule_conv2d_transpose(cfg, outs):\n outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs\n s = tvm.create_schedule([x.op for x in outs])\n scheduled_ops = []\n\n def traverse(op):\n \"\"\"Traverse operators from computation graph\"\"\"\n # inline all one-to-one-mapping operators except the last stage (output)\n if tag.is_injective(op.tag):\n if op not in s.outputs:\n s[op].compute_inline()\n for tensor in op.input_tensors:\n if isinstance(tensor.op, tvm.tensor.ComputeOp) and tensor.op not in scheduled_ops:\n traverse(tensor.op)\n\n if 'conv2d_transpose_nchw' in op.tag:\n C = op.output(0)\n\n N, OC, OH, OW = C.op.axis\n rc, ry, rx = C.op.reduce_axis\n\n OH, oh = s[C].split(OH, factor=2)\n OC, oc = s[C].split(OC, factor=32)\n IC, ic = s[C].split(rc, factor=32)\n\n s[C].reorder(N, OC, OH, OW, oc, IC, ry, rx, ic)\n N = s[C].fuse(N, OC)\n s[C].vectorize(oc)\n s[C].parallel(N)\n\n scheduled_ops.append(op)\n\n traverse(outs[0].op)\n return s", "def test_multiple_transpose_conv2d(self):\n\n tf.compat.v1.reset_default_graph()\n with tf.device('/cpu:0'):\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Conv2DTranspose(1, (4, 4), input_shape=(28, 28, 3)))\n model.add(tf.keras.layers.MaxPooling2D((2, 2)))\n model.add(tf.keras.layers.Conv2DTranspose(1, (4, 4), input_shape=(28, 28, 3)))\n model.add(tf.keras.layers.MaxPooling2D((2, 2)))\n model.summary()\n\n conn_graph = ConnectedGraph(tf.compat.v1.get_default_graph(), [model.input.op.name], [model.output.op.name])\n self.assertEqual(conn_graph.get_all_ops()['conv2d_transpose/conv2d_transpose'].type, 'Conv2DTranspose')\n self.assertEqual(conn_graph.get_all_ops()['conv2d_transpose_1/conv2d_transpose'].type, 'Conv2DTranspose')", "def out_tconv(spatial, config):\n p, k, s = [config[k] \n for k in ['padding', 'kernel_size', 'stride']]\n p2 = p if isinstance(p, int) else p[0] + p[1]\n p_out = config.get('output_padding', 0)\n return (spatial-1)*s - p2 + k + p_out", "def testTransposeNCHW(self, use_bias, use_output_shape):\n output_shape = tf.TensorShape((4, 5))\n\n conv2_transpose = snt.Conv2DTranspose(\n output_channels=5,\n output_shape=output_shape if use_output_shape else None,\n kernel_shape=3,\n padding=snt.VALID,\n stride=1,\n name=\"conv2_transpose\",\n use_bias=use_bias,\n data_format=conv.DATA_FORMAT_NCHW)\n conv2 = conv2_transpose.transpose()\n\n # Check kernel shapes, strides and padding match.\n self.assertEqual(conv2_transpose.kernel_shape, conv2.kernel_shape)\n self.assertEqual((1,) + conv2_transpose.stride[1:3] + (1,), conv2.stride)\n self.assertEqual(conv2_transpose.padding, conv2.padding)\n\n # Before conv2_transpose is connected, we cannot know how many\n # `output_channels` conv1 should have.\n err = \"Variables in conv2_transpose not instantiated yet\"\n with self.assertRaisesRegexp(snt.NotConnectedError, err):\n _ = conv2.output_channels\n\n # After connection the number of `output_channels` is known.\n batch_size = 32\n in_height = 2\n in_width = 3\n in_channels = 4\n x = tf.constant(np.random.randn(batch_size, in_channels, in_height,\n in_width),\n dtype=np.float32)\n conv2_transpose(x)\n self.assertEqual(in_channels, conv2.output_channels)\n\n # As is `output_channels`.\n self.assertEqual(output_shape, conv2_transpose.output_shape)\n\n # However, even after connection, the `input_shape` of the forward\n # convolution is not known until it is itself connected (i.e. it can be\n # connected to a different shape input from the `output_shape` of the\n # transpose convolution!)\n err = \"Variables in conv2_transpose_transpose not instantiated yet\"\n with self.assertRaisesRegexp(snt.NotConnectedError, err):\n self.assertEqual(conv2_transpose.output_shape, conv2.input_shape)", "def _transpose_by_2_vnchwconv_not_last_dim(tik_inst, dst, src, sub_dim_size):\n\n # whether the sub_h_size is block align or not should be decided before transferring in\n sub_axis_1, sub_axis_0, axis_2 = sub_dim_size\n data_size_one_block = _get_elment_cnt_one_block(src.dtype)\n axis_2_block_cnt = _ceil_div(axis_2, data_size_one_block)\n fp16_src = src.reinterpret_cast_to(\"float16\")\n fp16_dst = dst.reinterpret_cast_to(\"float16\")\n fp16_data_one_block = _get_elment_cnt_one_block(\"float16\")\n # vnchwconv get two bytes per time\n if src.dtype.lower() in (\"float32\", \"int32\", \"uint32\"):\n vnc_one_line_len = axis_2_block_cnt * data_size_one_block * sub_axis_1 * sub_axis_0 * 2\n elif src.dtype.lower() in (\"float16\", \"int16\", \"uint16\"):\n vnc_one_line_len = axis_2_block_cnt * data_size_one_block * sub_axis_1 * sub_axis_0\n else:\n error_detail = \"not support the dtype\"\n error_manager_vector.raise_err_two_input_dtype_invalid(\"transpose_d\", \"in_dtype\",\n \"dst_dtype\", error_detail)\n\n # do 16hc to hc16 transfer\n src_addr_list = [fp16_src[vnc_one_line_len * i] for i in ADDR_IDX_LIST]\n dst_addr_list = [fp16_dst[fp16_data_one_block * i] for i in ADDR_IDX_LIST]\n repeat_cnt = _ceil_div(vnc_one_line_len, fp16_data_one_block)\n with tik_inst.new_stmt_scope():\n src_stride = tik_inst.Scalar(\"int64\")\n dst_stride = tik_inst.Scalar(\"int64\")\n with tik_inst.if_scope(repeat_cnt == 1):\n src_stride.set_as(0)\n dst_stride.set_as(0)\n with tik_inst.else_scope():\n src_stride.set_as(1)\n dst_stride.set_as(16)\n tik_inst.vnchwconv(False, False,\n dst_addr_list, src_addr_list,\n repeat_cnt, dst_stride, src_stride)\n\n # do sub_axis_1*sub_axis_0*16 to sub_axis_1*sub_axis_0*axis_2 transfer\n with tik_inst.for_range(0, sub_axis_1) as sub_axis_1_idx:\n tik_inst.data_move(\n fp16_src[sub_axis_1_idx * sub_axis_0 * axis_2 * fp16_data_one_block * 2],\n fp16_dst[sub_axis_1_idx * sub_axis_0 * fp16_data_one_block * fp16_data_one_block],\n 0, sub_axis_0, 2 * axis_2, fp16_data_one_block - 2 * axis_2, 0)\n\n # do ch16 to 16ch transfer\n src_addr_list = [fp16_src[fp16_data_one_block * i] for i in ADDR_IDX_LIST]\n dst_addr_list = [fp16_dst[vnc_one_line_len * i] for i in ADDR_IDX_LIST]\n repeat_cnt = _ceil_div(vnc_one_line_len, fp16_data_one_block)\n with tik_inst.new_stmt_scope():\n src_stride = tik_inst.Scalar(\"int64\")\n dst_stride = tik_inst.Scalar(\"int64\")\n with tik_inst.if_scope(repeat_cnt == 1):\n src_stride.set_as(0)\n dst_stride.set_as(0)\n with tik_inst.else_scope():\n src_stride.set_as(16)\n dst_stride.set_as(1)\n tik_inst.vnchwconv(False, False,\n dst_addr_list, src_addr_list,\n repeat_cnt, dst_stride, src_stride)", "def testTransposeNCW(self, batch_size, in_length, in_channels, out_channels,\n kernel_shape, padding, use_bias, out_shape,\n stride_shape, use_output_shape):\n\n conv1_transpose = snt.Conv1DTranspose(\n output_channels=out_channels,\n output_shape=out_shape if use_output_shape else None,\n kernel_shape=kernel_shape,\n padding=padding,\n stride=stride_shape,\n name=\"conv1_transpose\",\n use_bias=use_bias,\n data_format=conv.DATA_FORMAT_NCW)\n conv1 = conv1_transpose.transpose()\n\n # Check kernel shapes, strides and padding match.\n self.assertEqual(conv1_transpose.kernel_shape, conv1.kernel_shape)\n self.assertEqual((1, 1, conv1_transpose.stride[2]), conv1.stride)\n self.assertEqual(conv1_transpose.padding, conv1.padding)\n\n # Before conv1_transpose is connected, we cannot know how many\n # `output_channels` conv1 should have.\n err = \"Variables in conv1_transpose not instantiated yet\"\n with self.assertRaisesRegexp(snt.NotConnectedError, err):\n conv1.output_channels # pylint: disable=pointless-statement\n\n # After connection the number of `output_channels` is known.\n x = tf.constant(np.random.randn(batch_size, in_channels, in_length),\n dtype=np.float32)\n conv1_transpose(x)\n self.assertEqual(in_channels, conv1.output_channels)\n\n # As is `output_shape`.\n self.assertIn(out_shape, conv1_transpose.output_shape)\n\n # However, even after connection, the `input_shape` of the forward\n # convolution is not known until it is itself connected (i.e. it can be\n # connected to a different shape input from the `output_shape` of the\n # transpose convolution!)\n err = \"Variables in conv1_transpose_transpose not instantiated yet\"\n with self.assertRaisesRegexp(snt.NotConnectedError, err):\n self.assertEqual(conv1_transpose.output_shape, conv1.input_shape)", "def schedule_conv2d_nchw_spatial_pack(cfg, outs):\n s = te.create_schedule([x.op for x in outs])\n\n def _callback(op):\n # schedule conv2d\n if \"spatial_conv2d_output\" in op.tag:\n _schedule_spatial_pack(cfg, s, op, layout=\"NCHW\")\n\n traverse_inline(s, outs[0].op, _callback)\n return s", "def _transpose_by_1_vnchwconv():\n\n pass", "def schedule_conv2d_nhwc_spatial_pack(cfg, outs):\n s = te.create_schedule([x.op for x in outs])\n\n def _callback(op):\n # schedule conv2d\n if \"spatial_conv_output_NHWC\" in op.tag:\n _schedule_spatial_pack(cfg, s, op, layout=\"NHWC\")\n\n traverse_inline(s, outs[0].op, _callback)\n return s", "def _transpose_by_2_vnchwconv(tik_inst, dst, src, sub_hw_size):\n\n # whether the sub_h_size is block align or not should be decided before transferring in\n sub_h_size, sub_w_size = sub_hw_size\n data_size_one_block = _get_elment_cnt_one_block(src.dtype)\n w_block_cnt = _ceil_div(sub_w_size, data_size_one_block)\n fp16_src = src.reinterpret_cast_to(\"float16\")\n fp16_dst = dst.reinterpret_cast_to(\"float16\")\n fp16_data_one_block = _get_elment_cnt_one_block(\"float16\")\n # vnchwconv get two bytes per time\n if src.dtype.lower() in (\"float32\", \"int32\", \"uint32\"):\n vnc_one_line_len = w_block_cnt * data_size_one_block * sub_h_size * 2\n elif src.dtype.lower() in (\"float16\", \"int16\", \"uint16\"):\n vnc_one_line_len = w_block_cnt * data_size_one_block * sub_h_size\n else:\n error_detail = \"not support the dtype\"\n error_manager_vector.raise_err_two_input_dtype_invalid(\"transpose_d\", \"in_dtype\",\n \"dst_dtype\", error_detail)\n\n # do 16hc to hc16 transfer\n src_addr_list = [fp16_src[vnc_one_line_len * i] for i in ADDR_IDX_LIST]\n dst_addr_list = [fp16_dst[fp16_data_one_block * i] for i in ADDR_IDX_LIST]\n repeat_cnt = _ceil_div(vnc_one_line_len, fp16_data_one_block)\n with tik_inst.new_stmt_scope():\n src_stride = tik_inst.Scalar(\"int64\")\n dst_stride = tik_inst.Scalar(\"int64\")\n with tik_inst.if_scope(repeat_cnt == 1):\n src_stride.set_as(0)\n dst_stride.set_as(0)\n with tik_inst.else_scope():\n src_stride.set_as(1)\n dst_stride.set_as(16)\n tik_inst.vnchwconv(False, False,\n dst_addr_list, src_addr_list,\n repeat_cnt, dst_stride, src_stride)\n\n # do hc16 to ch16 transfer\n with tik_inst.if_scope(sub_h_size > sub_w_size):\n with tik_inst.for_range(0, sub_w_size) as w_size_idx:\n tik_inst.data_move(\n fp16_src[w_size_idx * sub_h_size * fp16_data_one_block * 2],\n fp16_dst[w_size_idx * fp16_data_one_block * 2],\n 0, sub_h_size, 2, (w_block_cnt * data_size_one_block - 1) * 2, 0)\n with tik_inst.else_scope():\n with tik_inst.for_range(0, sub_h_size) as h_size_idx:\n tik_inst.data_move(\n fp16_src[h_size_idx * fp16_data_one_block * 2],\n fp16_dst[h_size_idx * w_block_cnt * data_size_one_block * fp16_data_one_block * 2],\n 0, sub_w_size, 2, 0, (sub_h_size - 1) * 2)\n\n # do ch16 to 16ch transfer\n src_addr_list = [fp16_src[fp16_data_one_block * i] for i in ADDR_IDX_LIST]\n dst_addr_list = [fp16_dst[vnc_one_line_len * i] for i in ADDR_IDX_LIST]\n repeat_cnt = _ceil_div(vnc_one_line_len, fp16_data_one_block)\n with tik_inst.new_stmt_scope():\n src_stride = tik_inst.Scalar(\"int64\")\n dst_stride = tik_inst.Scalar(\"int64\")\n with tik_inst.if_scope(repeat_cnt == 1):\n src_stride.set_as(0)\n dst_stride.set_as(0)\n with tik_inst.else_scope():\n src_stride.set_as(16)\n dst_stride.set_as(1)\n tik_inst.vnchwconv(False, False,\n dst_addr_list, src_addr_list,\n repeat_cnt, dst_stride, src_stride)", "def test_tpose1230(self):\n\n conv_params = {\n 'stride': 2,\n 'pad': 1\n }\n\n nr_img = 2;\n sz_img = 4;\n nr_in_channel = 3;\n sz_filter = 4;\n nr_filter = 3;\n\n a = np.random.randn(2, 1, 3, 2)\n p = np.array([1, 0, 1, 0, 1, 0, 1, 1, 1, 2, 3, 2, 1, 0, 1, 2, 1, 2]).reshape(1, 2, 3, 3)\n x = np.linspace(-.1, .5, 2 * 3 * 4 * 4).reshape(2, 3, 4, 4)\n w = np.linspace(-0.2, 0.3, 3 * 3 * 4 * 6).reshape(3, 3, 4, 6)\n\n # self.assertEqual(tpose1230(p).all(), p.transpose(1, 2, 3, 0).all())\n # self.assertEqual(tpose1230(w).all(), w.transpose(1, 2, 3, 0).all())\n # self.assertEqual(tpose1230(x).all(), x.transpose(1, 2, 3, 0).all())\n\n\n self.assertTrue(np.array_equal(tpose1230(a), a.transpose(1, 2, 3, 0)))\n self.assertTrue(np.array_equal(tpose1230(p), p.transpose(1, 2, 3, 0)))\n self.assertTrue(np.array_equal(tpose1230(w), w.transpose(1, 2, 3, 0)))\n self.assertTrue(np.array_equal(tpose1230(x), x.transpose(1, 2, 3, 0)))\n\n self.assertEqual(a.shape[0], a.transpose(1, 2, 3, 0).shape[3])\n self.assertEqual(a.shape[1], a.transpose(1, 2, 3, 0).shape[0])\n self.assertEqual(a.shape[2], a.transpose(1, 2, 3, 0).shape[1])\n self.assertEqual(a.shape[3], a.transpose(1, 2, 3, 0).shape[2])\n\n # print()\n # print(tpose1230(p).flatten())\n # print()\n # print(list(p.transpose(1, 2, 3, 0).flatten()))\n # print()\n # print(list(x.transpose(1, 2, 3, 0).flatten()))\n # print()\n # print(list(w.transpose(1, 2, 3, 0).flatten()))", "def forward(self, input_tensor):\n last = input_tensor\n for module in self.projection:\n projection = module(last)\n last = torch.cat((last, projection), -1)\n projection = last\n\n intermediate = self.seed(projection)\n intermediate = intermediate.view((-1, 512, 3, 3))\n\n projection_2d = projection.view((-1, self.projection_dim, 1, 1))\n projection_2d = self.projection_upscaler(projection_2d)\n\n for i, (conv, upscaling) in enumerate(zip(self.conv, self.upscaling)):\n if i + 1 != len(self.upscaling):\n if i > 0:\n intermediate = torch.cat((intermediate, projection_2d), 1)\n intermediate = torch.nn.functional.pixel_shuffle(intermediate, 2)\n intermediate = conv(intermediate)\n projection_2d = upscaling(projection_2d)\n\n r_space = self.colourspace_r(projection)\n r_space = r_space.view((-1, 16, 1, 1))\n r_space = self.colourspace_upscaler(r_space)\n r_space = intermediate * r_space\n r_space = torch.sum(r_space, dim=1, keepdim=True)\n\n g_space = self.colourspace_g(projection)\n g_space = g_space.view((-1, 16, 1, 1))\n g_space = self.colourspace_upscaler(g_space)\n g_space = intermediate * g_space\n g_space = torch.sum(g_space, dim=1, keepdim=True)\n\n b_space = self.colourspace_b(projection)\n b_space = b_space.view((-1, 16, 1, 1))\n b_space = self.colourspace_upscaler(b_space)\n b_space = intermediate * b_space\n b_space = torch.sum(b_space, dim=1, keepdim=True)\n\n output = torch.cat((r_space, g_space, b_space), dim=1)\n\n return output", "def testTransposition(self, use_bias):\n net = snt.Conv2DTranspose(name=\"conv2d\",\n output_channels=self.out_channels,\n output_shape=self.out_shape,\n kernel_shape=self.kernel_shape,\n padding=self.padding,\n stride=1,\n use_bias=use_bias)\n\n net_transpose = net.transpose()\n input_to_net = tf.placeholder(tf.float32, shape=self.in_shape)\n err = \"Variables in {} not instantiated yet, __call__ the module first.\"\n with self.assertRaisesRegexp(snt.NotConnectedError,\n err.format(net.scope_name)):\n net_transpose(input_to_net)\n net_transpose = net.transpose(name=\"another_net_transpose\")\n net_out = net(input_to_net)\n net_transposed_output = net_transpose(net_out)\n self.assertEqual(net_transposed_output.get_shape(),\n input_to_net.get_shape())", "def conv_transpose_pattern():\n pattern = is_op(\"nn.conv2d_transpose\")(wildcard(), is_constant())\n pattern = pattern.optional(lambda x: is_op(\"nn.bias_add\")(x, is_constant()))\n pattern = pattern.optional(lambda x: is_op(\"add\")(x, is_constant()))\n pattern = pattern.optional(\n lambda x: is_tuple_get_item(\n is_op(\"nn.batch_norm\")(\n x, is_constant(), is_constant(), is_constant(), is_constant()\n )\n )\n )\n pattern = pattern.optional(is_op(\"nn.relu\"))\n pattern = pattern.optional(is_op(\"clip\"))\n return pattern", "def convert_conv2d_transpose(g, op, block):\n\n dilations = op.attr(\"dilations\")\n groups = op.attr(\"groups\")\n paddings = op.attr(\"paddings\")\n padding_algorithm = op.attr(\"padding_algorithm\")\n strides = op.attr(\"strides\")\n output_padding = op.attr(\"output_padding\") if op.attr(\"output_padding\") else [0, 0]\n\n kernel = g.get_node(op.input(\"Filter\")[0])\n input_x = g.get_node(op.input(\"Input\")[0])\n _, out_channels, k_h, k_w = infer_shape(kernel)\n k_size = [k_h, k_w]\n if padding_algorithm == \"VALID\":\n paddings = [0, 0]\n elif padding_algorithm == \"SAME\":\n # SAME padding of conv2d_transpose is not same with conv2d\n # We cannot use auto_pad here, only static shape is supported now\n dilations = [1, 1]\n input_shape = shape_of(input_x)\n h_w = _op.strided_slice(input_shape, [2], [4])\n try:\n h_w = infer_value(h_w, g.get_params()).numpy().tolist()\n except Exception as e:\n msg = \"The SAME padding algorithm of conv2d_transpose not support dynamic shape\"\n raise tvm.error.OpAttributeInvalid(msg) from e\n paddings = []\n for i in range(2):\n if strides[i] == 1 or h_w[i] % strides[i] == 0:\n pad = max(k_size[i] - strides[i], 0)\n else:\n pad = max(k_size[i] - (h_w[i] % strides[i]), 0)\n pad_before = pad // 2\n pad_after = pad - pad_before\n paddings.insert(-1, pad_before)\n paddings.append(pad_after)\n elif padding_algorithm == \"EXPLICIT\":\n if len(paddings) == 2:\n paddings = [paddings[0], paddings[1], paddings[0], paddings[1]]\n elif len(paddings) == 4:\n paddings = [paddings[0], paddings[2], paddings[1], paddings[3]]\n else:\n msg = f'Value {padding_algorithm} in attribute \"padding\" of operator Conv is not \"valid.\"'\n raise tvm.error.OpAttributeInvalid(msg)\n\n out = _op.nn.conv2d_transpose(\n input_x,\n kernel,\n strides=strides,\n padding=paddings,\n dilation=dilations,\n groups=groups,\n channels=out_channels * groups,\n kernel_size=k_size,\n output_padding=output_padding,\n )\n g.add_node(op.output(\"Output\")[0], out)", "def testTransposeNWC(self, batch_size, in_length, in_channels, out_channels,\n kernel_shape, padding, use_bias, out_shape,\n stride_shape, use_output_shape):\n\n conv1_transpose = snt.Conv1DTranspose(\n output_channels=out_channels,\n output_shape=out_shape if use_output_shape else None,\n kernel_shape=kernel_shape,\n padding=padding,\n stride=stride_shape,\n name=\"conv1_transpose\",\n use_bias=use_bias,\n data_format=conv.DATA_FORMAT_NWC)\n conv1 = conv1_transpose.transpose()\n\n # Check kernel shapes, strides and padding match.\n self.assertEqual(conv1_transpose.kernel_shape, conv1.kernel_shape)\n self.assertEqual((1, conv1_transpose.stride[1], 1), conv1.stride)\n self.assertEqual(conv1_transpose.padding, conv1.padding)\n\n # Before conv1_transpose is connected, we cannot know how many\n # `output_channels` conv1 should have.\n err = \"Variables in conv1_transpose not instantiated yet\"\n with self.assertRaisesRegexp(snt.NotConnectedError, err):\n conv1.output_channels # pylint: disable=pointless-statement\n\n # After connection the number of `output_channels` is known.\n x = tf.constant(np.random.randn(batch_size, in_length, in_channels),\n dtype=np.float32)\n conv1_transpose(x)\n self.assertEqual(in_channels, conv1.output_channels)\n\n # As is `output_shape`.\n self.assertIn(out_shape, conv1_transpose.output_shape)\n\n # However, even after connection, the `input_shape` of the forward\n # convolution is not known until it is itself connected (i.e. it can be\n # connected to a different shape input from the `output_shape` of the\n # transpose convolution!)\n err = \"Variables in conv1_transpose_transpose not instantiated yet\"\n with self.assertRaisesRegexp(snt.NotConnectedError, err):\n self.assertEqual(conv1_transpose.output_shape, conv1.input_shape)", "def test_on_conv_transpose_2d_three_by_three(self):\n w_init = lambda s: jn.array([[[[1., 2., 1.], [1., 2., 1.], [1., 1., 1.]]]]).transpose((2, 3, 0, 1))\n conv = objax.nn.ConvTranspose2D(1, 1, 3, padding=objax.ConvPadding.VALID, w_init=w_init)\n x = jn.array([[[[2., 1., 3., 4.], [5., 6., 7., 8.], [9., 10., 11., 12.], [13., 14., 15., 16.]]]])\n y = jn.array([[[[2., 5., 7., 11., 11., 4.],\n [7., 21., 31., 39., 34., 12.],\n [16., 47., 70., 80., 65., 24.],\n [27., 79., 114., 125., 97., 36.],\n [22., 59., 86., 93., 70., 28.],\n [13., 27., 42., 45., 31., 16.]]]])\n self.assertEqual(conv(x).tolist(), y.tolist())", "def test_alternate_spatial_axes(conv1d_placeholder, output_size, width_axis):\n width_axis.name = \"time\"\n assert len(conv1d_placeholder.axes.find_by_name(\"time\")) == 1\n\n conv_layer = Convolution((3, output_size), lambda x: 1)\n with pytest.raises(IncompatibleAxesError):\n conv_layer(conv1d_placeholder)\n # As a dictionary\n output = conv_layer(conv1d_placeholder, spatial_axes={\"W\": \"time\"})\n assert output.axes == conv1d_placeholder.axes\n # As a tuple\n output = conv_layer(conv1d_placeholder, spatial_axes=(\"D\", \"H\", \"time\"))\n assert output.axes == conv1d_placeholder.axes", "def test_conv3d():\n img_r = np.array([\n [0.3, 0.5, 0.7, 0.9],\n [0.1, 0.3, 0.5, 0.7],\n [0.9, 0.7, 0.5, 0.3],\n ])\n img_g = np.array([\n [0.4, 0.6, 0.8, 1.0],\n [0.5, 0.5, 0.5, 0.5],\n [0.1, 0.7, 0.5, 0.9],\n ])\n img_b = np.array([\n [0.2, 0.2, 0.8, 0.8],\n [0.1, 0.6, 0.5, 0.6],\n [0.5, 0.3, 0.4, 0.7],\n ])\n img = np.dstack([img_r, img_g, img_b])\n template_r = np.array([\n [0.5, 0],\n [0.7, 0],\n ])\n template_g = np.array([\n [0.9, 0],\n [0.2, 0],\n ])\n template_b = np.array([\n [0.1, 0],\n [0.4, 0],\n ])\n template = np.dstack([template_r, template_g, template_b])\n template = np.flipud(np.fliplr(template))\n template[:, :, :] = template[:, :, ::-1]\n return np.squeeze(fftconvolve(img, template, mode='valid'))", "def testTransposition(self, use_bias):\n net = snt.Conv3DTranspose(name=\"conv3d_3\",\n output_channels=self.out_channels,\n output_shape=self.out_shape,\n kernel_shape=self.kernel_shape,\n padding=self.padding,\n stride=1,\n use_bias=use_bias)\n\n net_transpose = net.transpose()\n input_to_net = tf.placeholder(tf.float32, shape=self.in_shape)\n err = \"Variables in {} not instantiated yet, __call__ the module first.\"\n with self.assertRaisesRegexp(snt.NotConnectedError,\n err.format(net.scope_name)):\n net_transpose(input_to_net)\n net_transpose = net.transpose(name=\"another_net_transpose\")\n net_out = net(input_to_net)\n net_transposed_output = net_transpose(net_out)\n self.assertEqual(net_transposed_output.get_shape(),\n input_to_net.get_shape())", "def convolve_one_image(self,input4D, one_image, image_shape, \n Pstruct, filter_shape,\n image_index,\n channel_index): \n \n \n ## We look at the composition for the first channel in the beginning \n rank = Pstruct[0]['U1'].shape[1]\n fwidth = filter_shape[2]\n fheight = filter_shape[3]\n \n \n # Construct horizontal filters\n #TODO save the filters in the correct shape\n horizontal_filter_shape = (rank, 1, fwidth)\n horizontal_filters = np.ndarray(horizontal_filter_shape)\n horizontal_filters[:, 0, :] = np.transpose(Pstruct[channel_index]['U1']);\n \n # Output is 1 x rank x W x H\n horizontal_conv_out = conv.conv2d(input=one_image, \n filters = horizontal_filters,\n filter_shape = horizontal_filter_shape, \n image_shape = image_shape)\n \n # Construct vertical filters\n vertical_filter_shape = (rank, fheight, 1)\n vertical_filters = np.ndarray(vertical_filter_shape) \n vertical_filters[:,:, 0] = np.transpose(Pstruct[channel_index]['U2']);\n\n initial_n_rows = image_shape[1]\n final_n_rows = initial_n_rows- fwidth + 1\n final_n_cols = image_shape[2] - fheight + 1 \n conv_out = theano.shared(np.zeros((rank, final_n_rows, final_n_cols)))\n for r in range(rank):\n # temp is 1x1x imageW x imageH\n A = conv.conv2d(input = horizontal_conv_out[:,r,:,:], \n filters = vertical_filters[r,:,:],\n filter_shape = (1, fheight, 1), \n image_shape = (1, initial_n_rows, final_n_cols))\n conv_out = T.set_subtensor(conv_out[r,:,:], A[0,:,:])\n \n nbr_filters = Pstruct[0]['U3'].shape[0]\n # Final number of rows and columns \n ## numberof images, number of filters, image width, image height\n alphas = Pstruct[channel_index]['U3'] \n for f in range(nbr_filters): \n temp = theano.shared(np.zeros((final_n_rows, final_n_cols)))\n for r in range(rank):\n temp = temp + conv_out[r, :,:]* alphas[f, r] * Pstruct[channel_index]['lmbda'][r]; \n input4D =T.set_subtensor(input4D[image_index,f,:,:], temp)\n return input4D", "def testTransposeNHWC(self, use_bias, use_output_shape):\n output_shape = tf.TensorShape((4, 5))\n\n conv2_transpose = snt.Conv2DTranspose(\n output_channels=5,\n output_shape=output_shape if use_output_shape else None,\n kernel_shape=3,\n padding=snt.VALID,\n stride=1,\n name=\"conv2_transpose\",\n use_bias=use_bias,\n data_format=conv.DATA_FORMAT_NHWC)\n conv2 = conv2_transpose.transpose()\n\n # Check kernel shapes, strides and padding match.\n self.assertEqual(conv2_transpose.kernel_shape, conv2.kernel_shape)\n self.assertEqual((1,) + conv2_transpose.stride[1:3] + (1,), conv2.stride)\n self.assertEqual(conv2_transpose.padding, conv2.padding)\n\n # Before conv2_transpose is connected, we cannot know how many\n # `output_channels` conv1 should have.\n err = \"Variables in conv2_transpose not instantiated yet\"\n with self.assertRaisesRegexp(snt.NotConnectedError, err):\n _ = conv2.output_channels\n\n # After connection the number of `output_channels` is known.\n batch_size = 32\n in_height = 2\n in_width = 3\n in_channels = 4\n x = tf.constant(np.random.randn(batch_size, in_height, in_width,\n in_channels),\n dtype=np.float32)\n conv2_transpose(x)\n self.assertEqual(in_channels, conv2.output_channels)\n\n # As is `output_channels`.\n self.assertEqual(output_shape, conv2_transpose.output_shape)\n\n # However, even after connection, the `input_shape` of the forward\n # convolution is not known until it is itself connected (i.e. it can be\n # connected to a different shape input from the `output_shape` of the\n # transpose convolution!)\n err = \"Variables in conv2_transpose_transpose not instantiated yet\"\n with self.assertRaisesRegexp(snt.NotConnectedError, err):\n self.assertEqual(conv2_transpose.output_shape, conv2.input_shape)", "def _conv2d_transpose_block(in_f, out_f, *args, **kwargs):\n return nn.Sequential(\n nn.ConvTranspose2d(in_f, out_f, *args, **kwargs),\n nn.BatchNorm2d(out_f),\n nn.ReLU(),\n nn.Dropout2d(p=0.2)\n )", "def __init__(\n self,\n *,\n input_dims: Union[List[int], Tuple[int]],\n cnn_transpose_filter_specifiers: List[List[Union[int, List]]],\n cnn_transpose_use_bias: bool = True,\n cnn_transpose_activation: Optional[str] = \"relu\",\n cnn_transpose_use_layernorm: bool = False,\n ):\n super().__init__()\n\n assert len(input_dims) == 3\n\n cnn_transpose_activation = get_activation_fn(\n cnn_transpose_activation, framework=\"tf2\"\n )\n\n layers = []\n\n # Input layer.\n layers.append(tf.keras.layers.Input(shape=input_dims))\n\n for i, (num_filters, kernel_size, strides) in enumerate(\n cnn_transpose_filter_specifiers\n ):\n is_final_layer = i == len(cnn_transpose_filter_specifiers) - 1\n layers.append(\n tf.keras.layers.Conv2DTranspose(\n filters=num_filters,\n kernel_size=kernel_size,\n strides=strides,\n padding=\"same\",\n # Last layer is never activated (regardless of config).\n activation=(\n None\n if cnn_transpose_use_layernorm or is_final_layer\n else cnn_transpose_activation\n ),\n # Last layer always uses bias (b/c has no LayerNorm, regardless of\n # config).\n use_bias=cnn_transpose_use_bias or is_final_layer,\n )\n )\n if cnn_transpose_use_layernorm and not is_final_layer:\n # Use epsilon=1e-5 here (instead of default 1e-3) to be unified with\n # torch. Need to normalize over all axes.\n layers.append(\n tf.keras.layers.LayerNormalization(axis=[-3, -2, -1], epsilon=1e-5)\n )\n layers.append(tf.keras.layers.Activation(cnn_transpose_activation))\n\n # Create the final CNNTranspose network.\n self.cnn_transpose = tf.keras.Sequential(layers)\n\n self.expected_input_dtype = tf.float32", "def testTransposition(self, use_bias):\n net = snt.Conv2D(name=\"conv2d\",\n output_channels=4,\n kernel_shape=3,\n stride=1,\n use_bias=use_bias)\n\n net_transpose = net.transpose()\n input_to_net = tf.placeholder(tf.float32, shape=[None, 100, 100, 3])\n\n err = \"Variables in {} not instantiated yet, __call__ the module first.\"\n with self.assertRaisesRegexp(snt.NotConnectedError,\n err.format(net.scope_name)):\n net_transpose(input_to_net)\n net_transpose = net.transpose(name=\"another_net_transpose\")\n net_out = net(input_to_net)\n net_transposed_output = net_transpose(net_out)\n self.assertAllEqual(net_transposed_output.get_shape().as_list(),\n input_to_net.get_shape().as_list())", "def check_conv_transpose(extract):\n call = extract\n if isinstance(call, tvm.relay.expr.TupleGetItem):\n call = call.tuple_value\n elif call.op.name == \"nn.relu\":\n call = call.args[0]\n if isinstance(call, tvm.relay.expr.TupleGetItem):\n call = call.tuple_value\n elif call.op.name == \"clip\":\n if call.attrs[\"a_min\"] != 0.0 or call.attrs[\"a_max\"] != 6.0:\n return False\n call = call.args[0]\n if isinstance(call, tvm.relay.expr.TupleGetItem):\n call = call.tuple_value\n\n while call.op.name != \"nn.conv2d_transpose\":\n call = call.args[0]\n\n attrs = call.attrs\n if attrs.data_layout != \"NCHW\":\n return False\n\n return True", "def transpose(incoming, conv, nonlinearity, *args, **kwargs):\n return TransposedConv2DLayer(incoming, conv.input_shape[1],\n conv.filter_size, stride=conv.stride,\n crop=conv.pad, W=conv.W,\n flip_filters=not conv.flip_filters,\n nonlinearity=nonlinearity, *args,\n **kwargs)", "def go(self, z):\n with tf.variable_scope(self.name) as scope:\n batch_size = tf.shape(z)[0]\n fc = tf.contrib.layers.fully_connected(z, 4*4*1024, activation_fn=tf.identity)\n reshape_fc = tf.reshape(fc, [1, 4, 4, 1024])\n \n conv1 = tf.contrib.layers.conv2d_transpose(\n reshape_fc, 512, [4,4], [2,2],\n weights_initializer = tf.random_normal_initializer(stddev=0.02), #st_dev from dcgan paper\n activation_fn = leaky_relu\n )\n \n conv2 = tf.contrib.layers.conv2d_transpose(\n conv1, 256, [4, 4], [2,2],\n weights_initializer = tf.random_normal_initializer(stddev=0.02),\n activation_fn = leaky_relu\n )\n \n conv3 = tf.contrib.layers.conv2d_transpose(\n conv2, 3, [4,4], [2,2],\n weights_initializer = tf.random_normal_initializer(stddev=0.02),\n# activation_fn = leaky_relu\n activation_fn = tf.tanh\n )\n \n# conv4 = tf.contrib.layers.conv2d_transpose(\n# conv3, 3, [4,4], [2,2],\n# weights_initializer = tf.random_normal_initializer(stddev=0.02),\n# activation_fn = tf.tanh\n# )\n return conv3", "def testTransposeNDHWC(self, use_bias):\n\n conv3_transpose = snt.Conv3DTranspose(\n output_channels=self.out_channels,\n output_shape=self.out_shape,\n kernel_shape=self.kernel_shape,\n padding=self.padding,\n stride=self.strides,\n name=\"conv3_transpose\",\n use_bias=use_bias,\n data_format=conv.DATA_FORMAT_NDHWC)\n conv3 = conv3_transpose.transpose()\n\n # Check kernel shapes, strides and padding match.\n self.assertEqual(conv3_transpose.kernel_shape, conv3.kernel_shape)\n self.assertEqual((1,) + self.strides + (1,), conv3.stride)\n self.assertEqual(conv3_transpose.padding, conv3.padding)\n\n # Before conv3_transpose is connected, we cannot know how many\n # `output_channels` conv1 should have.\n err = \"Variables in conv3_transpose not instantiated yet\"\n with self.assertRaisesRegexp(snt.NotConnectedError, err):\n _ = conv3.output_channels\n\n # After connection the number of `output_channels` is known.\n x = tf.constant(np.random.randn(self.batch_size,\n self.in_depth,\n self.in_height,\n self.in_width,\n self.in_channels),\n dtype=np.float32)\n conv3_transpose(x)\n self.assertEqual(self.in_channels, conv3.output_channels)\n\n # However, even after connection, the `input_shape` of the forward\n # convolution is not known until it is itself connected (i.e. it can be\n # connected to a different shape input from the `output_shape` of the\n # transpose convolution!)\n err = \"Variables in conv3_transpose_transpose not instantiated yet\"\n with self.assertRaisesRegexp(snt.NotConnectedError, err):\n self.assertEqual(conv3_transpose.output_shape, conv3.input_shape)", "def fp32_1_0_transpose(tik_inst, block_idx, trans_params):\n\n data_in, data_out, ub_input, ub_tiling, tiling_reg_list = trans_params\n\n _get_tiling_params(tiling_reg_list, ub_tiling)\n # rename tiling parameters\n need_core_num = tiling_reg_list[1]\n ub_offset = tiling_reg_list[2]\n max_sub_h_size = tiling_reg_list[3]\n max_sub_w_size = tiling_reg_list[4]\n per_core_col_size = tiling_reg_list[5]\n per_core_loop_cnt = tiling_reg_list[6]\n per_core_left_data = tiling_reg_list[7]\n last_core_loop_cnt = tiling_reg_list[9]\n last_core_left_data = tiling_reg_list[10]\n h_loop_cnt = tiling_reg_list[11]\n h_left = tiling_reg_list[12]\n axis_0 = tiling_reg_list[13]\n axis_1 = tiling_reg_list[14]\n axis_2 = tiling_reg_list[15]\n # check whether axis_1 is block align or not\n data_size_one_block = _get_elment_cnt_one_block(data_in.dtype)\n\n with tik_inst.if_scope(block_idx < need_core_num):\n\n def _fp32_1_0_t_mc_on_1(loop_cnt, left_size):\n \"\"\"\n detail process for permute (1, 0)\n \"\"\"\n\n def _fp32_vnchwconv_process(axis_0_index, h_loop_idx, h_size):\n \"\"\"\n do transpose by vnchwconv\n \"\"\"\n\n def _fp32_inner_vnchwconv(col_lp_idx, col_size):\n \"\"\"\n inner vnchwconv\n \"\"\"\n\n # move data in\n in_offset = (block_idx * per_core_col_size + col_lp_idx * max_sub_w_size +\n h_loop_idx * max_sub_h_size * axis_2 +\n axis_0_index * axis_1 * axis_2)\n data_in_info = (h_size, col_size, axis_1, axis_2, in_offset)\n _data_move_in_mc_on_w(tik_inst, ub_input, data_in, data_in_info)\n\n # for this case, data_move will move in one more block\n with tik_inst.new_stmt_scope():\n h_size_temp = tik_inst.Scalar(\"int64\")\n with tik_inst.if_scope(tik.all(axis_1 > data_size_one_block,\n h_size % data_size_one_block > 0)):\n h_size_temp.set_as(_ceil_div(h_size, data_size_one_block) *\n data_size_one_block)\n with tik_inst.else_scope():\n h_size_temp.set_as(h_size)\n # transpose by vnchwconv\n sub_hw_size = (h_size_temp, col_size)\n _transpose_by_2_vnchwconv(tik_inst, ub_input[ub_offset],\n ub_input, sub_hw_size)\n\n # move data out\n out_offset = ((block_idx * per_core_col_size + col_lp_idx * max_sub_w_size) *\n axis_1 + h_loop_idx * max_sub_h_size +\n axis_0_index * axis_1 * axis_2)\n data_out_info = (h_size, col_size, axis_1, axis_2, out_offset)\n _data_move_out_mc_on_w(tik_inst, data_out, ub_input[ub_offset], data_out_info)\n\n with tik_inst.for_range(0, loop_cnt) as lp_idx:\n _fp32_inner_vnchwconv(lp_idx, max_sub_w_size)\n with tik_inst.if_scope(left_size > 0):\n _fp32_inner_vnchwconv(loop_cnt, left_size)\n\n with tik_inst.for_range(0, axis_0) as axis_0_idx:\n with tik_inst.for_range(0, h_loop_cnt) as h_lp_idx:\n _fp32_vnchwconv_process(axis_0_idx, h_lp_idx, max_sub_h_size)\n with tik_inst.if_scope(h_left > 0):\n _fp32_vnchwconv_process(axis_0_idx, h_loop_cnt, h_left)\n\n with tik_inst.if_scope(block_idx == need_core_num - 1):\n _fp32_1_0_t_mc_on_1(last_core_loop_cnt, last_core_left_data)\n with tik_inst.else_scope():\n _fp32_1_0_t_mc_on_1(per_core_loop_cnt, per_core_left_data)", "def conv_transpose2d(self, kernel, **kwargs):\n raise NotImplementedError(\"conv_transpose2d is not implemented\")", "def test_transposition(self):\n\n World.reset()\n\n\n def inside(x, y):\n centers_distance = tf.sqrt(tf.reduce_sum(tf.squared_difference(x[:, 0:2], y[:, 0:2]), axis=1) + 1e-6)\n return tf.cast((centers_distance + x[:, 2]) < y[:, 2], tf.float32)\n\n circles = tfl.Domain(label=\"Circles\", data=[[0., 0, 1], [0,0, 2], [0,0, 3]])\n inside = tfl.Predicate(label=\"inside\", domains=[\"Circles\", \"Circles\"], function=inside)\n tfl.setTNorm(id=tfl.SS, p=1)\n sess = tf.Session()\n\n\n # Constraint 1\n x = tfl.variable(circles, name=\"x\")\n y = tfl.variable(circles, name=\"y\")\n a = tfl.atom(inside, (x,y))\n b = tfl.atom(inside, (y,x))\n rule = tfl.and_n(a, b)\n\n assert np.greater(sess.run(rule), np.zeros(shape=[3,3,3])).all()\n assert len(World._predicates_cache)==1", "def conv_capsule(inputs, shape, strides, iterations, batch_size, name):\n inputs_poses, inputs_activations = inputs\n\n with tf.variable_scope(name) as scope:\n stride = strides[1] # 2\n i_size = shape[-2] # 32\n o_size = shape[-1] # 32\n pose_size = inputs_poses.get_shape()[-1] # 4\n\n # Tile the input capusles' pose matrices to the spatial dimension of the output capsules\n # Such that we can later multiple with the transformation matrices to generate the votes.\n inputs_poses = kernel_tile(inputs_poses, 3, stride) # (?, 14, 14, 32, 4, 4) -> (?, 6, 6, 3x3=9, 32x16=512)\n\n # Tile the activations needed for the EM routing\n inputs_activations = kernel_tile(inputs_activations, 3, stride) # (?, 14, 14, 32) -> (?, 6, 6, 9, 32)\n spatial_size = int(inputs_activations.get_shape()[1]) # 6\n\n # Reshape it for later operations\n inputs_poses = tf.reshape(inputs_poses, shape=[-1, 3 * 3 * i_size, 16]) # (?, 9x32=288, 16)\n inputs_activations = tf.reshape(inputs_activations,\n shape=[-1, spatial_size, spatial_size, 3 * 3 * i_size]) # (?, 6, 6, 9x32=288)\n\n with tf.variable_scope('votes') as scope:\n # Generate the votes by multiply it with the transformation matrices\n votes = mat_transform(inputs_poses, o_size,\n size=batch_size * spatial_size * spatial_size) # (864, 288, 32, 16)\n\n # Reshape the vote for EM routing\n votes_shape = votes.get_shape()\n votes = tf.reshape(votes, shape=[batch_size, spatial_size, spatial_size, votes_shape[-3], votes_shape[-2],\n votes_shape[-1]]) # (24, 6, 6, 288, 32, 16)\n tf.logging.info(\"{} votes shape: {}\".format(name, votes.get_shape()))\n\n with tf.variable_scope('routing') as scope:\n # beta_v and beta_a one for each output capsule: (1, 1, 1, 32)\n beta_v = tf.get_variable(\n name='beta_v', shape=[1, 1, 1, o_size], dtype=tf.float32,\n initializer=initializers.xavier_initializer()\n )\n beta_a = tf.get_variable(\n name='beta_a', shape=[1, 1, 1, o_size], dtype=tf.float32,\n initializer=initializers.xavier_initializer()\n )\n\n # Use EM routing to compute the pose and activation\n # votes (24, 6, 6, 3x3x32=288, 32, 16), inputs_activations (?, 6, 6, 288)\n # poses (24, 6, 6, 32, 16), activation (24, 6, 6, 32)\n poses, activations = matrix_capsules_em_routing(\n votes, inputs_activations, beta_v, beta_a, iterations, name='em_routing'\n )\n\n # Reshape it back to 4x4 pose matrix\n poses_shape = poses.get_shape()\n # (24, 6, 6, 32, 4, 4)\n poses = tf.reshape(\n poses, [\n poses_shape[0], poses_shape[1], poses_shape[2], poses_shape[3], pose_size, pose_size\n ]\n )\n\n tf.logging.info(\"{} pose shape: {}\".format(name, poses.get_shape()))\n tf.logging.info(\"{} activations shape: {}\".format(name, activations.get_shape()))\n\n return poses, activations", "def inference_fconv_supercompact(input_shape=[None, 22,22,10,1],\n input_shape_m=[None, 22,22,10,3],\n n_filters=[1, 32, 32, 32],\n filter_sizes=[3, 2, 3, 2],\n corruption=False):\n \n # input to the network\n x = tf.placeholder(\n tf.float32, input_shape, name='x')\n m = tf.placeholder(\n tf.float32, input_shape_m, name='m')\n t = tf.placeholder(\n tf.float32, input_shape, name='t')\n keep_prob = tf.placeholder(tf.float32, name='keep_prob') #dropout (keep probability)\n \n encoder_i = []\n encoder_m = []\n encoder_main = []\n shapes_main = []\n shapes_i = []\n shapes_m = []\n \n #keep_prob=1.\n ### BRANCH 3d images\n with tf.variable_scope('img_conv1_1') as scope:\n shapes_i.append(x.get_shape().as_list())\n nfeaturemap = 256\n W = weight_variable([3, 3, 3, 1, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(x, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n \n with tf.variable_scope('img_conv1_2') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 128\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n \n branch_image = current_input\n\n ### BRANCH motion parameters\n with tf.variable_scope('motion_conv1_1') as scope:\n shapes_m.append(m.get_shape().as_list())\n nfeaturemap = 128\n W = weight_variable([3, 3, 3, 3, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(m, W) + b)\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n \n #current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n\n branch_motion = current_input\n \n #current_input = tf.concat([branch_image, branch_motion], axis=4)\n #input_nfeaturemap = 256\n current_input = tf.multiply(branch_image,branch_motion)\n #print tf.shape(current_input)[-1]\n #tf.shape(current_input)[-1]\n \n with tf.variable_scope('conv3_1') as scope:\n shapes_main.append(current_input.get_shape().as_list())\n nfeaturemap = 128\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_main.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n \n\n # store the latent representation\n z = current_input\n z_input_nfeaturemap = input_nfeaturemap\n '''\n encoder_main.reverse()\n encoder_i.reverse()\n encoder_m.reverse()\n \n shapes_main.reverse()\n shapes_i.reverse()\n shapes_m.reverse()\n '''\n \n \n \n #current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n \n with tf.variable_scope('deconv_i_2') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 1\n W = weight_variable([1, 1, 1, z_input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = conv3d(z, W) + b\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n y = output\n \n \n with tf.variable_scope('deconv_m_2') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 3\n W = weight_variable([1, 1, 1, z_input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = conv3d(z, W) + b\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n m_hat = output\n \n \n \n \n loss_m = tf.reduce_mean(tf.square(m-m_hat))\n loss_i = tf.reduce_mean(tf.square(t-y))\n cost = loss_i + loss_m\n\n # %%\n return {'x': x, 't':t, 'm': m, 'm_hat':m_hat, 'y': y, 'cost': cost, 'loss_i':loss_i, 'loss_m':loss_m, 'keep_prob': keep_prob, 'encoder_main':encoder_main, 'encoder_i':encoder_i, 'encoder_m':encoder_m}", "def inference_fconv_small12(input_shape=[None, 22,22,10,1],\n input_shape_m=[None, 22,22,10,3],\n n_filters=[1, 32, 32, 32],\n filter_sizes=[3, 2, 3, 2],\n corruption=False):\n \n # input to the network\n x = tf.placeholder(\n tf.float32, input_shape, name='x')\n m = tf.placeholder(\n tf.float32, input_shape_m, name='m')\n t = tf.placeholder(\n tf.float32, input_shape, name='t')\n keep_prob = tf.placeholder(tf.float32, name='keep_prob') #dropout (keep probability)\n \n encoder_i = []\n encoder_m = []\n encoder_main = []\n shapes_main = []\n shapes_i = []\n shapes_m = []\n \n #keep_prob=1.\n ### BRANCH 3d images\n with tf.variable_scope('img_conv1_1') as scope:\n shapes_i.append(x.get_shape().as_list())\n nfeaturemap = 256\n W = weight_variable([3, 3, 3, 1, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(x, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n \n \n with tf.variable_scope('img_conv1_3') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 128\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n branch_image = current_input\n '''\n ### BRANCH motion parameters\n with tf.variable_scope('motion_conv1_1') as scope:\n shapes_m.append(m.get_shape().as_list())\n nfeaturemap = 64\n W = weight_variable([3, 3, 3, 3, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(m, W) + b)\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n \n #current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n \n with tf.variable_scope('motion_conv1_3') as scope:\n shapes_m.append(current_input.get_shape().as_list())\n nfeaturemap = 128\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n\n branch_motion = current_input\n \n #current_input = tf.concat([branch_image, branch_motion], axis=4)\n #input_nfeaturemap = 128\n current_input = tf.multiply(branch_image,branch_motion)\n #print tf.shape(current_input)[-1]\n #tf.shape(current_input)[-1]\n \n '''\n \n with tf.variable_scope('conv3_1') as scope:\n shapes_main.append(current_input.get_shape().as_list())\n nfeaturemap = 16\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(branch_image, W) + b)\n encoder_main.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n # Max pooling\n #current_input = max_pool_2x2(current_input)\n #''' \n with tf.variable_scope('conv3_2') as scope:\n shapes_main.append(current_input.get_shape().as_list())\n nfeaturemap = 16\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_main.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n \n # store the latent representation\n z = current_input\n z_input_nfeaturemap = input_nfeaturemap\n '''\n encoder_main.reverse()\n encoder_i.reverse()\n encoder_m.reverse()\n \n shapes_main.reverse()\n shapes_i.reverse()\n shapes_m.reverse()\n '''\n \n with tf.variable_scope('deconv_i_1') as scope:\n shapes_i.append(z.get_shape().as_list())\n nfeaturemap = 128\n W = weight_variable([3, 3, 3, z_input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(z, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n #current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n \n with tf.variable_scope('deconv_i_2') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 1\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = conv3d(current_input, W) + b\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n y = output\n \n \n with tf.variable_scope('deconv_m_1') as scope:\n shapes_i.append(z.get_shape().as_list())\n nfeaturemap = 128\n W = weight_variable([3, 3, 3, z_input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(z, W) + b)\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n with tf.variable_scope('deconv_m_2') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 3\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = conv3d(current_input, W) + b\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n m_hat = output\n \n \n loss_m = tf.reduce_mean(tf.square(m-m_hat))\n loss_i = tf.reduce_mean(tf.square(t-y))\n cost = loss_i + loss_m\n\n # %%\n return {'x': x, 't':t, 'm': m, 'm_hat':m_hat, 'y': y, 'cost': cost, 'loss_i':loss_i, 'loss_m':loss_m, 'keep_prob': keep_prob, 'encoder_main':encoder_main, 'encoder_i':encoder_i, 'encoder_m':encoder_m}", "def test_convTranpose2d(self, _, module, inputs, filters, bias=None):\n\n utils.compare_tracing_methods(\n module, inputs, filters, fusible_ops={\"aten::_convolution\"}\n )", "def inference_fconv(input_shape=[None, 22,22,10,1],\n input_shape_m=[None, 22,22,10,3],\n n_filters=[1, 32, 32, 32],\n filter_sizes=[3, 2, 3, 2],\n corruption=False):\n \n # input to the network\n x = tf.placeholder(\n tf.float32, input_shape, name='x')\n m = tf.placeholder(\n tf.float32, input_shape_m, name='m')\n t = tf.placeholder(\n tf.float32, input_shape, name='t')\n keep_prob = tf.placeholder(tf.float32, name='keep_prob') #dropout (keep probability)\n \n encoder_i = []\n encoder_m = []\n encoder_main = []\n shapes_main = []\n shapes_i = []\n shapes_m = []\n \n #keep_prob=1.\n ### BRANCH 3d images\n \n with tf.variable_scope('img_conv1_1') as scope:\n shapes_i.append(x.get_shape().as_list())\n nfeaturemap = 128\n W = weight_variable([3, 3, 3, input_shape[4], nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(x, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n img_1 = output\n \n #current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n ### BRANCH motion parameters\n with tf.variable_scope('motion_conv1_1') as scope:\n shapes_m.append(m.get_shape().as_list())\n nfeaturemap = 128\n W = weight_variable([3, 3, 3, input_shape_m[4], nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(m, W) + b)\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n motion_1 = output\n \n \n current_input = tf.multiply(img_1,motion_1)\n \n with tf.variable_scope('img_conv1_3') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 256\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n img_2 = output\n \n\n \n #current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n # Max pooling\n motion_1 = max_pool_2x2(motion_1)\n input_nfeaturemap = 128\n \n with tf.variable_scope('motion_conv1_3') as scope:\n shapes_m.append(motion_1.get_shape().as_list())\n nfeaturemap = 256\n W = weight_variable([2, 2, 2, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(motion_1, W) + b)\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n motion_2 = output\n \n \n # resize upsampling\n motion_2 = resize_volumes(motion_2, 2, 2, 2)\n \n #current_input = tf.concat([branch_image, branch_motion], axis=4)\n #input_nfeaturemap = 512\n current_input = tf.multiply(img_2,motion_2)\n input_nfeaturemap = 256\n #print tf.shape(current_input)[-1]\n #tf.shape(current_input)[-1]\n '''\n with tf.variable_scope('img_conv1_1') as scope:\n shapes_i.append(x.get_shape().as_list())\n nfeaturemap = 256\n W = weight_variable([3, 3, 3, 1, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(x, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n \n \n with tf.variable_scope('img_conv1_3') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 128\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n branch_image = current_input\n\n ### BRANCH motion parameters\n with tf.variable_scope('motion_conv1_1') as scope:\n shapes_m.append(m.get_shape().as_list())\n nfeaturemap = 64\n W = weight_variable([3, 3, 3, 3, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(m, W) + b)\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n \n #current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n \n with tf.variable_scope('motion_conv1_3') as scope:\n shapes_m.append(current_input.get_shape().as_list())\n nfeaturemap = 128\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n\n branch_motion = current_input\n \n #current_input = tf.concat([branch_image, branch_motion], axis=4)\n #input_nfeaturemap = 256\n current_input = tf.multiply(branch_image,branch_motion)\n #print tf.shape(current_input)[-1]\n #tf.shape(current_input)[-1]\n '''\n with tf.variable_scope('conv3_1') as scope:\n shapes_main.append(current_input.get_shape().as_list())\n nfeaturemap = 128\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_main.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n # Max pooling\n #current_input = max_pool_2x2(current_input)\n \n with tf.variable_scope('conv3_2') as scope:\n shapes_main.append(current_input.get_shape().as_list())\n nfeaturemap = 128\n W = weight_variable([2, 2, 2, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_main.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n \n # store the latent representation\n z = current_input\n z_input_nfeaturemap = input_nfeaturemap\n '''\n encoder_main.reverse()\n encoder_i.reverse()\n encoder_m.reverse()\n \n shapes_main.reverse()\n shapes_i.reverse()\n shapes_m.reverse()\n '''\n \n with tf.variable_scope('deconv_i_1') as scope:\n shapes_i.append(z.get_shape().as_list())\n nfeaturemap = 16\n W = weight_variable([3, 3, 3, z_input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(z, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n #current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n \n with tf.variable_scope('deconv_i_2') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 1\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = conv3d(current_input, W) + b\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n y = output\n \n \n with tf.variable_scope('deconv_m_1') as scope:\n shapes_i.append(z.get_shape().as_list())\n nfeaturemap = 32\n W = weight_variable([3, 3, 3, z_input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(z, W) + b)\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n with tf.variable_scope('deconv_m_2') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 3\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = conv3d(current_input, W) + b\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n m_hat = output\n \n \n \n \n loss_m = tf.reduce_mean(tf.square(m-m_hat))\n loss_i = tf.reduce_mean(tf.square(t-y))\n cost = loss_i + loss_m\n\n # %%\n return {'x': x, 't':t, 'm': m, 'm_hat':m_hat, 'y': y, 'cost': cost, 'loss_i':loss_i, 'loss_m':loss_m, 'keep_prob': keep_prob, 'encoder_main':encoder_main, 'encoder_i':encoder_i, 'encoder_m':encoder_m}", "def testTransposeNCDHW(self, use_bias):\n\n conv3_transpose = snt.Conv3DTranspose(\n output_channels=self.out_channels,\n output_shape=self.out_shape,\n kernel_shape=self.kernel_shape,\n padding=self.padding,\n stride=self.strides,\n name=\"conv3_transpose\",\n use_bias=use_bias,\n data_format=conv.DATA_FORMAT_NCDHW)\n conv3 = conv3_transpose.transpose()\n\n # Check kernel shapes, strides and padding match.\n self.assertEqual(conv3_transpose.kernel_shape, conv3.kernel_shape)\n self.assertEqual((1, 1) + self.strides, conv3.stride)\n self.assertEqual(conv3_transpose.padding, conv3.padding)\n\n # Before conv3_transpose is connected, we cannot know how many\n # `output_channels` conv1 should have.\n err = \"Variables in conv3_transpose not instantiated yet\"\n with self.assertRaisesRegexp(snt.NotConnectedError, err):\n _ = conv3.output_channels\n\n # After connection the number of `output_channels` is known.\n x = tf.constant(np.random.randn(self.batch_size,\n self.in_channels,\n self.in_depth,\n self.in_height,\n self.in_width),\n dtype=np.float32)\n conv3_transpose(x)\n self.assertEqual(self.in_channels, conv3.output_channels)\n\n # However, even after connection, the `input_shape` of the forward\n # convolution is not known until it is itself connected (i.e. it can be\n # connected to a different shape input from the `output_shape` of the\n # transpose convolution!)\n err = \"Variables in conv3_transpose_transpose not instantiated yet\"\n with self.assertRaisesRegexp(snt.NotConnectedError, err):\n self.assertEqual(conv3_transpose.output_shape, conv3.input_shape)", "def inference_fconv_small(alpha=1.,input_shape=[None, 22,22,10,1],\n input_shape_m=[None, 22,22,10,3],\n n_filters=[1, 32, 32, 32],\n filter_sizes=[3, 2, 3, 2],\n corruption=False):\n \n # input to the network\n x = tf.placeholder(\n tf.float32, input_shape, name='x')\n m = tf.placeholder(\n tf.float32, input_shape_m, name='m')\n t = tf.placeholder(\n tf.float32, input_shape, name='t')\n keep_prob = tf.placeholder(tf.float32, name='keep_prob') #dropout (keep probability)\n \n encoder_i = []\n encoder_m = []\n encoder_main = []\n shapes_main = []\n shapes_i = []\n shapes_m = []\n \n #keep_prob=1.\n ### BRANCH 3d images\n with tf.variable_scope('img_conv1_1') as scope:\n shapes_i.append(x.get_shape().as_list())\n nfeaturemap = 32\n W = weight_variable([2, 2, 2, 1, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(x, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n #current_input = max_pool_2x2(current_input)\n input_nfeaturemap = 32\n \n with tf.variable_scope('img_conv1_2') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 32\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n \n with tf.variable_scope('img_conv1_3') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 1\n W = weight_variable([2, 2, 2, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n \n # resize upsampling\n #current_input = resize_volumes(current_input, 2, 2, 2) \n \n branch_image = current_input\n\n ### BRANCH motion parameters\n with tf.variable_scope('motion_conv1_1') as scope:\n shapes_m.append(m.get_shape().as_list())\n nfeaturemap = 64\n W = weight_variable([3, 3, 3, 3, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(m, W) + b)\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n \n #current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n \n with tf.variable_scope('motion_conv1_3') as scope:\n shapes_m.append(current_input.get_shape().as_list())\n nfeaturemap = 1\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n\n branch_motion = current_input\n \n #current_input = tf.concat([branch_image, branch_motion], axis=4)\n #input_nfeaturemap = 128\n current_input = tf.multiply(branch_image,branch_motion)\n #print tf.shape(current_input)[-1]\n #tf.shape(current_input)[-1]\n #current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n \n \n \n with tf.variable_scope('conv3_1') as scope:\n shapes_main.append(current_input.get_shape().as_list())\n nfeaturemap = 16\n W = weight_variable([3, 3, 3, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_main.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n # Max pooling\n #current_input = max_pool_2x2(current_input)\n #''' \n with tf.variable_scope('conv3_2') as scope:\n shapes_main.append(current_input.get_shape().as_list())\n nfeaturemap = 16\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(current_input, W) + b)\n encoder_main.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n \n \n \n # store the latent representation\n z = current_input\n z_input_nfeaturemap = input_nfeaturemap\n '''\n encoder_main.reverse()\n encoder_i.reverse()\n encoder_m.reverse()\n \n shapes_main.reverse()\n shapes_i.reverse()\n shapes_m.reverse()\n '''\n \n with tf.variable_scope('deconv_i_1') as scope:\n shapes_i.append(z.get_shape().as_list())\n nfeaturemap = 64\n W = weight_variable([3, 3, 3, z_input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(z, W) + b)\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n #current_input = tf.nn.dropout(current_input, keep_prob, [tf.shape(x)[0],1,1,1,input_nfeaturemap])\n \n with tf.variable_scope('deconv_i_2') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 1\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = conv3d(current_input, W) + b\n encoder_i.append(W)\n input_nfeaturemap = nfeaturemap\n y = output\n \n \n with tf.variable_scope('deconv_m_1') as scope:\n shapes_i.append(z.get_shape().as_list())\n nfeaturemap = 64\n W = weight_variable([3, 3, 3, z_input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = tf.nn.relu(conv3d(z, W) + b)\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n current_input = output\n \n with tf.variable_scope('deconv_m_2') as scope:\n shapes_i.append(current_input.get_shape().as_list())\n nfeaturemap = 3\n W = weight_variable([1, 1, 1, input_nfeaturemap, nfeaturemap])\n b = bias_variable([nfeaturemap])\n output = conv3d(current_input, W) + b\n encoder_m.append(W)\n input_nfeaturemap = nfeaturemap\n m_hat = output\n \n \n \n \n loss_m = tf.reduce_mean(tf.square(m-m_hat))\n loss_i = tf.reduce_mean(tf.square(t-y))\n cost = alpha*loss_i + loss_m\n\n # %%\n return {'x': x, 't':t, 'm': m, 'm_hat':m_hat, 'y': y, 'cost': cost, 'loss_i':loss_i, 'loss_m':loss_m, 'keep_prob': keep_prob, 'encoder_main':encoder_main, 'encoder_i':encoder_i, 'encoder_m':encoder_m}", "def all_views_conv_layer(input_layer,network_type, layer_name, number_of_filters=32, filter_size=(3, 3), stride=(1, 1),\r\n padding='VALID', biases_initializer=tf.zeros_initializer()):\r\n if network_type == \"CC\":\r\n\r\n\r\n input_l_cc, input_r_cc = input_layer\r\n\r\n #with tf.variable_scope(layer_name + \"_CC\") as cc_cope:\r\n h_l_cc = tf.contrib.layers.convolution2d(inputs=input_l_cc, num_outputs=number_of_filters,\r\n kernel_size=filter_size, stride=stride, padding=padding,\r\n weights_initializer=tf.contrib.layers.xavier_initializer(), biases_initializer=biases_initializer)\r\n h_r_cc = tf.contrib.layers.convolution2d(inputs=input_r_cc, num_outputs=number_of_filters,\r\n kernel_size=filter_size, stride=stride, padding=padding, reuse=False,\r\n weights_initializer=tf.contrib.layers.xavier_initializer(), biases_initializer=biases_initializer)\r\n\r\n\r\n h = (h_l_cc, h_r_cc)\r\n\r\n return h\r\n\r\n else:\r\n input_l_mlo, input_r_mlo = input_layer\r\n\r\n # with tf.variable_scope(layer_name + \"_CC\") as cc_cope:\r\n h_l_mlo = tf.contrib.layers.convolution2d(inputs=input_l_mlo, num_outputs=number_of_filters,\r\n kernel_size=filter_size, stride=stride, padding=padding,\r\n weights_initializer=tf.contrib.layers.xavier_initializer(),\r\n biases_initializer=biases_initializer)\r\n h_r_mlo = tf.contrib.layers.convolution2d(inputs=input_r_mlo, num_outputs=number_of_filters,\r\n kernel_size=filter_size, stride=stride, padding=padding, reuse=False,\r\n weights_initializer=tf.contrib.layers.xavier_initializer(),\r\n biases_initializer=biases_initializer)\r\n\r\n h = (h_l_mlo, h_r_mlo)\r\n\r\n return h", "def test_on_conv_transpose_2d_two_by_two(self):\n w_init = lambda s: jn.array([[[[1., 2.], [3., 4.]]]]).transpose((2, 3, 0, 1))\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding=objax.ConvPadding.VALID, w_init=w_init)\n x = jn.array([[[[2., 1., 3., 4.], [5., 6., 7., 8.], [9., 10., 11., 12.], [13., 14., 15., 16.]]]])\n y = jn.array([[[[2., 5., 5., 10., 8.],\n [11., 27., 32., 46., 32.],\n [24., 66., 76., 86., 56.],\n [40., 106., 116., 126., 80.],\n [39., 94., 101., 108., 64.]]]])\n self.assertEqual(conv(x).tolist(), y.tolist())", "def conv2d_transpose(self, output_shape, filter_):\n return self.add_layer(conv2d_transpose, output_shape, filter_)", "def test_on_conv_transpose_2d_padding(self):\n x = jn.array([[[[2., 1., 3., 4.], [5., 6., 7., 8.], [9., 10., 11., 12.], [13., 14., 15., 16.]]]])\n y = jn.array([[[[2., 5., 5., 10.], [11., 27., 32., 46.], [24., 66., 76., 86.], [40., 106., 116., 126.]]]])\n w_init = lambda s: jn.array([[[[1., 2.], [3., 4.]]]]).transpose((2, 3, 0, 1))\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding=objax.ConvPadding.SAME, w_init=w_init)\n self.assertEqual(conv(x).tolist(), y.tolist())\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding='same', w_init=w_init)\n self.assertEqual(conv(x).tolist(), y.tolist())\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding='Same', w_init=w_init)\n self.assertEqual(conv(x).tolist(), y.tolist())\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding='SAME', w_init=w_init)\n self.assertEqual(conv(x).tolist(), y.tolist())\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding=(1, 0), w_init=w_init)\n self.assertEqual(conv(x).tolist(), y.tolist())\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding=[(1, 0), (1, 0)], w_init=w_init)\n self.assertEqual(conv(x).tolist(), y.tolist())\n y = [[[[2., 5., 5., 10., 8.], [11., 27., 32., 46., 32.], [24., 66., 76., 86., 56.],\n [40., 106., 116., 126., 80.], [39., 94., 101., 108., 64.]]]]\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding=1, w_init=w_init)\n self.assertEqual(conv(x).tolist(), y)\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding=(1, 1), w_init=w_init)\n self.assertEqual(conv(x).tolist(), y)\n conv = objax.nn.ConvTranspose2D(1, 1, 2, padding=[(1, 1), (1, 1)], w_init=w_init)\n self.assertEqual(conv(x).tolist(), y)", "def test_conv2d_transpose(ifm_shape, strides, kernel_size, out_channels, dtype, bias):\n np.random.seed(0)\n\n kernel_layout = \"IOHW\"\n dilation = (1, 1)\n groups = 1\n\n iinfo = np.iinfo(dtype)\n data_min = iinfo.min\n data_max = iinfo.max\n\n input_zp = np.random.randint(data_min, data_max)\n input_sc = np.random.random() * 2\n kernel_zp = np.random.randint(data_min, data_max)\n kernel_sc = np.random.random() * 4\n output_zp, output_sc = tei.get_conv2d_qnn_params(\n dtype, input_zp, input_sc, kernel_zp, kernel_sc, ifm_shape[1], ifm_shape[2], ifm_shape[3]\n )\n\n model, params = _get_model(\n shape=ifm_shape,\n kernel_h=kernel_size[0],\n kernel_w=kernel_size[1],\n input_zp=input_zp,\n input_sc=input_sc,\n kernel_zp=kernel_zp,\n kernel_sc=kernel_sc,\n output_zp=output_zp,\n output_sc=output_sc,\n stride=strides,\n dilation=dilation,\n groups=groups,\n kernel_layout=kernel_layout,\n dtype=dtype,\n out_channels=out_channels,\n bias=bias,\n )\n\n outputs = []\n inputs = {\n \"a\": tvm.nd.array(np.random.randint(data_min, data_max + 1, size=ifm_shape, dtype=dtype))\n }\n\n for npu in [False, True]:\n mod = tei.make_module(model, params)\n outputs.append(tei.build_and_run(mod, inputs, 1, params, npu=npu))\n\n tei.verify(outputs, dtype, 1)", "def conv2d_transpose_legalize(attrs, inputs, types):\n if attrs['data_layout'] == 'NHWC':\n data, kernel = inputs\n kernel_layout = attrs['kernel_layout']\n # Convert Kernel layout to IOHW\n # kernel_layout is different from input kernel layout - IO is swapped\n if kernel_layout == 'HWIO':\n # input kernel layout is swapped to HWOI\n # output kernel layout will be IOHW\n kernel = relay.transpose(kernel, axes=(3, 2, 0, 1))\n elif kernel_layout == 'HWOI':\n # input kernel layout is swapped to HWIO\n # output kernel layout will be IOHW\n kernel = relay.transpose(kernel, axes=(2, 3, 0, 1))\n elif kernel_layout == 'IOHW':\n # input kernel layout is swapped to OIHW\n # output kernel layout will be IOHW\n kernel = relay.transpose(kernel, axes=(1, 0, 2, 3))\n elif kernel_layout == 'OIHW':\n # input kernel layout is swapped to IOHW\n # output kernel layout will be IOHW\n pass\n else:\n # Skip legalize. Let relay.nn.conv2d_transpose to handle the case\n return None\n\n # Set new attrs for conv2d_transpose.\n new_attrs = {k: attrs[k] for k in attrs.keys()}\n new_attrs['data_layout'] = 'NCHW'\n # layout of kernel should be IOHW, but kernel_layout should be swapped - OIHW\n new_attrs['kernel_layout'] = 'OIHW'\n\n # Convert data to NCHW.\n data = relay.transpose(data, axes=(0, 3, 1, 2))\n deconv = relay.nn.conv2d_transpose(data, kernel, **new_attrs)\n # Convert back to original NHWC layout.\n out = relay.transpose(deconv, axes=(0, 2, 3, 1))\n return out\n\n return None", "def declaration_conv2d_transpose_impl(data, kernel, strides, padding, out_dtype):\n data_pad, kernel_transform = \\\n conv2d_transpose_nchw_preprocess(data, kernel, strides, padding, out_dtype)\n batch, in_c, in_h, in_w = data_pad.shape\n out_c, _, filter_h, filter_w = kernel_transform.shape\n stride_h, stride_w = strides\n\n # convolution stage\n out_c = simplify(out_c)\n out_h = simplify(in_h - filter_h + 1)\n out_w = simplify(in_w - filter_w + 1)\n dc = tvm.reduce_axis((0, in_c), name='dc')\n dh = tvm.reduce_axis((0, filter_h), name='dh')\n dw = tvm.reduce_axis((0, filter_w), name='dw')\n\n Output = tvm.compute(\n (batch, out_c, out_h, out_w),\n lambda b, c, h, w: tvm.sum(\n data_pad[b, dc, h+dh, w+dw].astype(out_dtype) *\n kernel_transform[c, dc, dh, dw].astype(out_dtype),\n axis=[dc, dh, dw]), tag=\"conv2d_transpose_nchw\")\n\n return Output", "def test_coords_transformation():\n\n # H+R+S+T, not reverse, depth\n img_meta = {\n 'pcd_scale_factor':\n 1.2311e+00,\n 'pcd_rotation': [[8.660254e-01, 0.5, 0], [-0.5, 8.660254e-01, 0],\n [0, 0, 1.0e+00]],\n 'pcd_trans': [1.111e-02, -8.88e-03, 0.0],\n 'pcd_horizontal_flip':\n True,\n 'transformation_3d_flow': ['HF', 'R', 'S', 'T']\n }\n\n pcd = torch.tensor([[-5.2422e+00, -2.9757e-01, 4.0021e+01],\n [-9.1435e-01, 2.6675e+01, -5.5950e+00],\n [2.0089e-01, 5.8098e+00, -3.5409e+01],\n [-1.9461e-01, 3.1309e+01, -1.0901e+00]])\n\n pcd_transformed = apply_3d_transformation(\n pcd, 'DEPTH', img_meta, reverse=False)\n\n expected_tensor = torch.tensor(\n [[5.78332345e+00, 2.900697e+00, 4.92698531e+01],\n [-1.5433839e+01, 2.8993850e+01, -6.8880045e+00],\n [-3.77929405e+00, 6.061661e+00, -4.35920199e+01],\n [-1.9053658e+01, 3.3491436e+01, -1.34202211e+00]])\n\n assert torch.allclose(expected_tensor, pcd_transformed, 1e-4)\n\n # H+R+S+T, reverse, depth\n img_meta = {\n 'pcd_scale_factor':\n 7.07106781e-01,\n 'pcd_rotation': [[7.07106781e-01, 7.07106781e-01, 0.0],\n [-7.07106781e-01, 7.07106781e-01, 0.0],\n [0.0, 0.0, 1.0e+00]],\n 'pcd_trans': [0.0, 0.0, 0.0],\n 'pcd_horizontal_flip':\n False,\n 'transformation_3d_flow': ['HF', 'R', 'S', 'T']\n }\n\n pcd = torch.tensor([[-5.2422e+00, -2.9757e-01, 4.0021e+01],\n [-9.1435e+01, 2.6675e+01, -5.5950e+00],\n [6.061661e+00, -0.0, -1.0e+02]])\n\n pcd_transformed = apply_3d_transformation(\n pcd, 'DEPTH', img_meta, reverse=True)\n\n expected_tensor = torch.tensor(\n [[-5.53977e+00, 4.94463e+00, 5.65982409e+01],\n [-6.476e+01, 1.1811e+02, -7.91252488e+00],\n [6.061661e+00, -6.061661e+00, -1.41421356e+02]])\n assert torch.allclose(expected_tensor, pcd_transformed, 1e-4)\n\n # H+R+S+T, not reverse, camera\n img_meta = {\n 'pcd_scale_factor':\n 1.0 / 7.07106781e-01,\n 'pcd_rotation': [[7.07106781e-01, 0.0, 7.07106781e-01],\n [0.0, 1.0e+00, 0.0],\n [-7.07106781e-01, 0.0, 7.07106781e-01]],\n 'pcd_trans': [1.0e+00, -1.0e+00, 0.0],\n 'pcd_horizontal_flip':\n True,\n 'transformation_3d_flow': ['HF', 'S', 'R', 'T']\n }\n\n pcd = torch.tensor([[-5.2422e+00, 4.0021e+01, -2.9757e-01],\n [-9.1435e+01, -5.5950e+00, 2.6675e+01],\n [6.061661e+00, -1.0e+02, -0.0]])\n\n pcd_transformed = apply_3d_transformation(\n pcd, 'CAMERA', img_meta, reverse=False)\n\n expected_tensor = torch.tensor(\n [[6.53977e+00, 5.55982409e+01, 4.94463e+00],\n [6.576e+01, -8.91252488e+00, 1.1811e+02],\n [-5.061661e+00, -1.42421356e+02, -6.061661e+00]])\n\n assert torch.allclose(expected_tensor, pcd_transformed, 1e-4)\n\n # V, reverse, camera\n img_meta = {'pcd_vertical_flip': True, 'transformation_3d_flow': ['VF']}\n\n pcd_transformed = apply_3d_transformation(\n pcd, 'CAMERA', img_meta, reverse=True)\n\n expected_tensor = torch.tensor([[-5.2422e+00, 4.0021e+01, 2.9757e-01],\n [-9.1435e+01, -5.5950e+00, -2.6675e+01],\n [6.061661e+00, -1.0e+02, 0.0]])\n\n assert torch.allclose(expected_tensor, pcd_transformed, 1e-4)\n\n # V+H, not reverse, depth\n img_meta = {\n 'pcd_vertical_flip': True,\n 'pcd_horizontal_flip': True,\n 'transformation_3d_flow': ['VF', 'HF']\n }\n\n pcd_transformed = apply_3d_transformation(\n pcd, 'DEPTH', img_meta, reverse=False)\n\n expected_tensor = torch.tensor([[5.2422e+00, -4.0021e+01, -2.9757e-01],\n [9.1435e+01, 5.5950e+00, 2.6675e+01],\n [-6.061661e+00, 1.0e+02, 0.0]])\n assert torch.allclose(expected_tensor, pcd_transformed, 1e-4)\n\n # V+H, reverse, lidar\n img_meta = {\n 'pcd_vertical_flip': True,\n 'pcd_horizontal_flip': True,\n 'transformation_3d_flow': ['VF', 'HF']\n }\n\n pcd_transformed = apply_3d_transformation(\n pcd, 'LIDAR', img_meta, reverse=True)\n\n expected_tensor = torch.tensor([[5.2422e+00, -4.0021e+01, -2.9757e-01],\n [9.1435e+01, 5.5950e+00, 2.6675e+01],\n [-6.061661e+00, 1.0e+02, 0.0]])\n assert torch.allclose(expected_tensor, pcd_transformed, 1e-4)", "def convt_block(layer, concat, fsize, name):\n with tf.variable_scope(name):\n\n layer = tf.layers.conv2d_transpose(layer, filters=fsize, kernel_size=2, strides=2, \n kernel_regularizer=l2_reg(1e-1), name='convt')\n layer = tf.concat([layer, concat], axis=-1, name='concat')\n\n return layer", "def testConnectTwice(self, input_shape, stride, kernel_shape, padding,\n output_shape):\n if len(input_shape) == 1:\n module = snt.Conv1DTranspose\n elif len(input_shape) == 2:\n module = snt.Conv2DTranspose\n elif len(input_shape) == 3:\n module = snt.Conv3DTranspose\n\n batch_size = [1]\n channels = [1]\n\n inputs = tf.zeros(shape=batch_size + input_shape + channels,\n dtype=tf.float32)\n inputs_2 = tf.zeros(shape=batch_size + input_shape + channels,\n dtype=tf.float32)\n conv1 = module(output_channels=1, kernel_shape=kernel_shape,\n stride=stride, padding=padding)\n outputs = conv1(inputs)\n\n # Connecting for the second time with the same shape should be OK.\n outputs_2 = conv1(inputs_2)\n\n # So should connecting with a different shape.\n new_input_shape = [25] * len(input_shape)\n new_inputs = tf.zeros(shape=batch_size + new_input_shape + channels,\n dtype=tf.float32)\n new_outputs = conv1(new_inputs)\n\n with self.test_session() as sess:\n tf.global_variables_initializer().run()\n outputs_array, outputs_array_2 = sess.run([outputs, outputs_2])\n self.assertEqual(outputs_array.shape, outputs_array_2.shape)\n\n sess.run(new_outputs)", "def inference(image,norm = True,phase_train = True):\n batch_size = image.shape[0]\n r,g,b = tf.split(axis = 3,num_or_size_splits = 3,value = image)\n p_image = tf.concat([r - 123.68,\n g - 116.78,\n b - 103.94],axis = 3)\n with tf.variable_scope('vgg_16'):\n with tf.variable_scope('conv1'):\n conv1_1 = layer.conv_layer('conv1_1',p_image,[3,3,3,64])\n conv1_2 = layer.conv_layer('conv1_2',conv1_1,[3,3,64,64])\n pool1 = layer.pool_layer('pool1',conv1_2)\n with tf.variable_scope('conv2'):\n conv2_1 = layer.conv_layer('conv2_1',pool1,[3,3,64,128])\n conv2_2 = layer.conv_layer('conv2_2',conv2_1,[3,3,128,128])\n pool2 = layer.pool_layer('pool2',conv2_2)\n with tf.variable_scope('conv3'):\n conv3_1 = layer.conv_layer('conv3_1',pool2,[3,3,128,256])\n conv3_2 = layer.conv_layer('conv3_2',conv3_1,[3,3,256,256])\n conv3_3 = layer.conv_layer('conv3_3',conv3_2,[3,3,256,256])\n pool3 = layer.pool_layer('pool3',conv3_3)\n with tf.variable_scope('conv4'):\n conv4_1 = layer.conv_layer('conv4_1',pool3,[3,3,256,512])\n conv4_2 = layer.conv_layer('conv4_2',conv4_1,[3,3,512,512])\n conv4_3 = layer.conv_layer('conv4_3',conv4_2,[3,3,512,512])\n pool4 = layer.pool_layer('pool4',conv4_3)\n with tf.variable_scope('conv5'):\n conv5_1 = layer.conv_layer('conv5_1',pool4,[3,3,512,512])\n conv5_2 = layer.conv_layer('conv5_2',conv5_1,[3,3,512,512])\n conv5_3 = layer.conv_layer('conv5_3',conv5_2,[3,3,512,512])\n pool5 = layer.pool_layer('pool5',conv5_3,ksize = [1,3,3,1],strides = [1,1,1,1])\n with tf.variable_scope('ssd'):\n conv6 = layer.atrous_conv('conv6',pool5,[3,3,512,1024],rate = 6,\n batch_normalization = norm,phase_train = phase_train)\n conv7 = layer.conv_layer('conv7',conv6,[1,1,1024,1024],\n batch_normalization = norm,phase_train = phase_train)\n with tf.variable_scope('conv8'):\n conv8_1 = layer.conv_layer('conv8_1',conv7,[1,1,1024,256],\n batch_normalization = norm,phase_train = phase_train)\n conv8_2 = layer.conv_layer('conv8_2',conv8_1,[3,3,256,512],\n stride = [1,2,2,1],batch_normalization = norm,phase_train = phase_train)\n with tf.variable_scope('conv9'):\n conv9_1 = layer.conv_layer('conv9_1',conv8_2,[1,1,512,128],\n batch_normalization = norm,phase_train = phase_train)\n conv9_2 = layer.conv_layer('conv9_2',conv9_1,[3,3,128,256],\n stride = [1,2,2,1],batch_normalization = norm,phase_train = phase_train)\n with tf.variable_scope('conv10'):\n conv10_1 = layer.conv_layer('conv10_1',conv9_2,[1,1,256,128],\n batch_normalization = norm,phase_train = phase_train)\n conv10_2 = layer.conv_layer('conv10_2',conv10_1,[3,3,128,256],\n padding = 'VALID',batch_normalization = norm,phase_train = phase_train)\n with tf.variable_scope('conv11'):\n conv11_1 = layer.conv_layer('conv11_1',conv10_2,[1,1,256,128],\n batch_normalization = norm,phase_train = phase_train)\n conv11_2 = layer.conv_layer('conv11_2',conv11_1,[3,3,128,256],\n padding = 'VALID',batch_normalization = norm,phase_train = phase_train)#vgg300\n with tf.variable_scope('multibox'):\n\n l2_conv4_3 = layer.l2_normalization('l2_normalization',conv4_3,scaling = True)\n cls4 = layer.conv_layer('cls4',l2_conv4_3,[3,3,512,84],activation = None)\n loc4 = layer.conv_layer('loc4',l2_conv4_3,[3,3,512,16],activation = None)\n\n cls4_reshape = tf.reshape(cls4,[batch_size,-1,21])\n loc4_reshape = tf.reshape(loc4,[batch_size,-1,4])\n\n\n cls7 = layer.conv_layer('cls7',conv7,[3,3,1024,126],activation = None)\n loc7 = layer.conv_layer('loc7',conv7,[3,3,1024,24],activation = None)\n\n cls7_reshape = tf.reshape(cls7,[batch_size,-1,21])\n loc7_reshape = tf.reshape(loc7,[batch_size,-1,4])\n\n cls8 = layer.conv_layer('cls8',conv8_2,[3,3,512,126],activation = None)\n loc8 = layer.conv_layer('loc8',conv8_2,[3,3,512,24],activation = None)\n\n cls8_reshape = tf.reshape(cls8,[batch_size,-1,21])\n loc8_reshape = tf.reshape(loc8,[batch_size,-1,4])\n\n cls9 = layer.conv_layer('cls9',conv9_2,[3,3,256,126],activation = None)\n loc9 = layer.conv_layer('loc9',conv9_2,[3,3,256,24],activation = None)\n\n cls9_reshape = tf.reshape(cls9,[batch_size,-1,21])\n loc9_reshape = tf.reshape(loc9,[batch_size,-1,4])\n\n cls10 = layer.conv_layer('cls10',conv10_2,[3,3,256,84],activation = None)\n loc10 = layer.conv_layer('loc10',conv10_2,[3,3,256,16],activation = None)\n\n cls10_reshape = tf.reshape(cls10,[batch_size,-1,21])\n loc10_reshape = tf.reshape(loc10,[batch_size,-1,4])\n\n cls11 = layer.conv_layer('cls11',conv11_2,[1,1,256,84],activation = None)\n loc11 = layer.conv_layer('loc11',conv11_2,[1,1,256,16],activation = None)\n\n cls11_reshape = tf.reshape(cls11,[batch_size,-1,21])\n loc11_reshape = tf.reshape(loc11,[batch_size,-1,4])\n\n cls_logit = tf.concat([\n cls4_reshape,\n cls7_reshape,\n cls8_reshape,\n cls9_reshape,\n cls10_reshape,\n cls11_reshape\n ],1)\n loc_logit = tf.concat([\n loc4_reshape,\n loc7_reshape,\n loc8_reshape,\n loc9_reshape,\n loc10_reshape,\n loc11_reshape\n ],1)\n \n return cls_logit,loc_logit", "def _schedule_spatial_pack(cfg, s, op, layout):\n\n assert layout in (\"NCHW\", \"NHWC\")\n\n output = op.output(0)\n conv = op.input_tensors[0]\n data_vec = conv.op.input_tensors[0]\n data_pad = data_vec.op.input_tensors[0]\n s[data_pad].compute_inline()\n kernel_vec = conv.op.input_tensors[1]\n if kernel_vec.op.name == \"kernel_vec\":\n kernel = kernel_vec.op.input_tensors[0]\n else:\n kernel = kernel_vec\n if isinstance(kernel.op, tvm.te.ComputeOp) and \"dilate\" in kernel.op.tag:\n s[kernel].compute_inline()\n data = s[data_vec].op.input_tensors[0]\n\n max_unroll = 16\n vec_size = [1, 2, 4, 8, 16]\n # get tunable parameters (they are defined in compute)\n _, TC, VC = cfg[\"tile_co\"].size\n _, TH, VH = cfg[\"tile_oh\"].size\n _, TW, VW = cfg[\"tile_ow\"].size\n\n # schedule padding\n if isinstance(data.op, tvm.te.ComputeOp) and \"pad\" in data.op.tag:\n data_pad = data\n s[data_pad].compute_inline()\n\n # schedule data packing\n if layout == \"NCHW\":\n if isinstance(data_vec.op, tvm.te.ComputeOp) and data_vec.op.name == \"data_vec_undilated\":\n _, h, w, ci, _, _, vh, vw = s[data_vec].op.axis\n else:\n _, h, w, ci, vh, vw = s[data_vec].op.axis\n z, y, x, unroll1, unroll2 = h, w, ci, vh, vw\n else:\n if isinstance(data_vec.op, tvm.te.ComputeOp) and data_vec.op.name == \"data_vec_undilated\":\n _, oho, owo, _, _, ic, ohi, owi = s[data_vec].op.axis\n else:\n _, oho, owo, ohi, owi, ic = s[data_vec].op.axis\n z, y, x, unroll1, unroll2 = oho, owo, ohi, ic, owi\n tile_and_bind3d(s, data_vec, z, y, x, 1)\n if unroll1.dom.extent.value < max_unroll:\n s[data_vec].unroll(unroll1)\n if unroll2.dom.extent.value < max_unroll:\n s[data_vec].unroll(unroll2)\n\n if isinstance(kernel_vec.op, tvm.te.ComputeOp) and kernel_vec.name == \"kernel_vec\":\n if not autotvm.GLOBAL_SCOPE.in_tuning:\n max_threads = tvm.target.Target.current(allow_none=False).max_num_threads\n ax1, ax2, ax3, ax4, ax5 = s[kernel_vec].op.axis\n fused = s[kernel_vec].fuse(ax1, ax2, ax3, ax4, ax5)\n fused, vec = s[kernel_vec].split(fused, VC)\n bb, tt = s[kernel_vec].split(fused, max_threads)\n s[kernel_vec].bind(bb, te.thread_axis(\"blockIdx.x\"))\n s[kernel_vec].bind(tt, te.thread_axis(\"threadIdx.x\"))\n if VC in vec_size:\n s[kernel_vec].vectorize(vec)\n\n # schedule convolution\n ic, kh, kw = s[conv].op.reduce_axis\n if layout == \"NCHW\":\n kh_dim, kw_dim = kernel_vec.shape[2], kernel_vec.shape[3]\n else:\n kh_dim, kw_dim = kernel_vec.shape[0], kernel_vec.shape[1]\n cfg[\"ann_reduce\"].apply(\n s,\n conv,\n [kh, kw],\n axis_lens=[get_const_int(kh_dim), get_const_int(kw_dim)],\n max_unroll=max_unroll,\n )\n\n if layout == \"NCHW\":\n n, c, h, w, vh, vw, vc = s[conv].op.axis\n cfg[\"reorder_0\"].apply(s, conv, [n, c, h, w, ic, kh, kw, vh, vw, vc])\n tile_and_bind3d(s, conv, c, h, w, TC, TH, TW)\n unroll_vec_axes = [vh, vw, vc]\n axis_lens = [VH, VW, VC]\n else:\n n, oho, owo, oco, ohi, owi, oci = s[conv].op.axis\n cfg[\"reorder_conv\"].apply(s, conv, [n, oho, owo, oco, kh, kw, ic, ohi, owi, oci])\n tile_and_bind3d(s, conv, oho, owo, oco, TH, TW, TC)\n unroll_vec_axes = [ohi, owi, oci]\n axis_lens = [VH, VW, VC]\n\n cfg[\"ann_spatial\"].apply(\n s, conv, unroll_vec_axes, axis_lens, max_unroll=max_unroll, vec_size=vec_size, cfg=cfg\n )\n\n # schedule output\n if output.op not in s.outputs: # has bias\n s[output].compute_inline()\n output = s.outputs[0]\n if layout == \"NCHW\":\n _, co, oh, ow = s[output].op.axis\n tile_and_bind3d(s, output, co, oh, ow, TC, TH, TW)\n else:\n _, oh, ow, co = s[output].op.axis\n tile_and_bind3d(s, output, oh, ow, co, TH, TW, TC)\n\n return s", "def __call__(self, inputs):\n with tf.variable_scope('conv_t_{}'.format(self.idx)):\n activation_fn = get_act_fn(self.act_fn)\n\n if self.cfg.VAR_ON_CPU:\n kernels = variable_on_cpu(\n name='kernels',\n shape=[self.kernel_size, self.kernel_size,\n self.n_kernel, inputs.get_shape().as_list()[3]],\n initializer=self.w_init_fn,\n dtype=tf.float32)\n conv_t = tf.nn.conv2d_transpose(\n value=inputs,\n filter=kernels,\n output_shape=self.output_shape,\n strides=[1, self.stride, self.stride, 1],\n padding=self.padding)\n\n if self.use_bias:\n biases = variable_on_cpu(\n name='biases',\n shape=[self.n_kernel],\n initializer=tf.zeros_initializer(),\n dtype=tf.float32)\n conv_t = tf.nn.bias_add(conv_t, biases)\n\n if activation_fn is not None:\n conv_t = activation_fn(conv_t)\n\n else:\n biases_initializer = tf.zeros_initializer() if self.use_bias else None\n conv_t = tf.contrib.layers.conv2d_transpose(\n inputs=inputs,\n num_outputs=self.n_kernel,\n kernel_size=self.kernel_size,\n stride=self.stride,\n padding=self.padding,\n activation_fn=activation_fn,\n weights_initializer=self.w_init_fn,\n biases_initializer=biases_initializer)\n\n return conv_t", "def _get_model(\n shape,\n kernel_h,\n kernel_w,\n input_zp,\n input_sc,\n kernel_zp,\n kernel_sc,\n output_zp,\n output_sc,\n stride,\n dilation,\n groups,\n kernel_layout,\n dtype,\n out_channels,\n bias,\n):\n a = relay.var(\"a\", shape=shape, dtype=dtype)\n p = tei.get_same_padding((shape[1], shape[2]), (kernel_h, kernel_w), dilation, stride)\n weight_shape = (shape[3], out_channels // groups, kernel_h, kernel_w)\n\n weight_data = tvm.nd.array(\n np.random.randint(\n np.iinfo(dtype).min,\n high=(np.iinfo(dtype).max + 1),\n size=weight_shape,\n dtype=dtype,\n )\n )\n weights = relay.const(weight_data, dtype)\n op = relay.qnn.op.conv2d_transpose(\n a,\n weights,\n input_zero_point=relay.const(input_zp, \"int32\"),\n input_scale=relay.const(input_sc, \"float32\"),\n kernel_zero_point=relay.const(kernel_zp, \"int32\"),\n kernel_scale=relay.const(kernel_sc, \"float32\"),\n kernel_size=(kernel_h, kernel_w),\n padding=p,\n strides=stride,\n dilation=dilation,\n data_layout=\"NHWC\",\n kernel_layout=kernel_layout,\n out_dtype=\"int32\",\n channels=out_channels,\n groups=groups,\n )\n if bias:\n bias_data = tvm.nd.array(\n np.random.randint(\n np.iinfo(dtype).min,\n high=np.iinfo(dtype).max + 1,\n size=(out_channels,),\n dtype=\"int32\",\n )\n )\n biasc = relay.const(bias_data, \"int32\")\n op = relay.nn.bias_add(op, biasc, axis=3)\n\n if isinstance(kernel_sc, tvm.runtime.ndarray.NDArray):\n req_input_sc = [sc * input_sc for sc in kernel_sc.numpy()]\n else:\n req_input_sc = input_sc * kernel_sc\n\n op = relay.qnn.op.requantize(\n op,\n input_zero_point=relay.const(input_zp, \"int32\"),\n input_scale=relay.const(req_input_sc, \"float32\"),\n output_zero_point=relay.const(output_zp, \"int32\"),\n output_scale=relay.const(output_sc, \"float32\"),\n axis=3,\n rounding=\"UPWARD\",\n out_dtype=dtype,\n )\n params = {\"w\": weight_data}\n if bias:\n params[\"b\"] = bias_data\n return op, params", "def myWarpPerspective(img, H, output_shapes):\n c, r = output_shapes\n \n # Create an output canvas according to the parameter \"output_shapes\"\n if len(img.shape) == 3:\n output = np.zeros((r, c, 3))\n else:\n output = np.zeros((r, c, 1))\n\n # List of pixel coordinates in canvas\n inverse_map = [[i, j] for i in range(c) for j in range(r)]\n\n # Covert the coordinates in the system of img2 back to the system of img1 \n # to find out the reference points\n inverse_map = np.asarray(inverse_map)\n inverse_map = myPerspectiveTransform(inverse_map, np.linalg.inv(H))\n \n \n for i in range(c):\n for j in range(r):\n index = i*r + j\n ix, iy = inverse_map[index]\n \n # Because the converted coords. are float, \n # we need to find out four ref. points to do bilinear interpolation\n tix, bix = np.ceil(ix), np.floor(ix)\n tiy, biy = np.ceil(iy), np.floor(iy)\n\n x_ratio = ix - bix\n y_ratio = iy - biy\n\n # Indexing does not allow float indices\n tix, bix, tiy, biy = np.int32(tix), np.int32(bix), np.int32(tiy), np.int32(biy)\n \n # Boundary checking: each ref point should locate within the input image\n if bix < 0 or biy < 0 or tix >= img.shape[1] or tiy >= img.shape[0]:\n continue\n else:\n # Bilinear interpolation\n output[j, i] = x_ratio*y_ratio*img[tiy, tix] \\\n + x_ratio*(1-y_ratio)*img[biy, tix] \\\n + (1-x_ratio)*y_ratio*img[tiy, bix] \\\n + (1-x_ratio)*(1-y_ratio)*img[biy, bix]\n output[j, i] = np.round(output[j, i])\n\n # Cast back to uint8 because of displaying and return results\n return np.uint8(output)", "def conv_transpose_block(inputs, n_filters, strides=2, filter_size=[3, 3], dropout_p=0.0):\n\tconv = slim.conv2d_transpose(inputs, n_filters, kernel_size=[3, 3], stride=[strides, strides])\n\tout = tf.nn.relu(slim.batch_norm(conv, fused=True))\n\tif dropout_p != 0.0:\n\t out = slim.dropout(out, keep_prob=(1.0-dropout_p))\n\treturn out", "def conv_transpose1d(self, kernel, **kwargs):\n raise NotImplementedError(\"conv_transpose1d is not implemented\")", "def spatialtransformer(U,\n theta,\n batch_size=64,\n downsample_factor=1.0,\n num_transform=1,\n name='SpatialTransformer',\n **kwargs):\n with tf.variable_scope(name):\n if num_transform > 1 and len(theta.get_shape().as_list()) == 3:\n _, num_transforms = map(int, theta.get_shape().as_list()[:2])\n indices = [[i] * num_transforms for i in range(batch_size)]\n U = tf.gather(U, tf.reshape(indices, [-1]))\n\n input_shape = U.get_shape().as_list()\n num_channels = input_shape[3]\n theta = tf.reshape(theta, (-1, 2, 3))\n theta = tf.cast(theta, tf.float32)\n if not isinstance(downsample_factor, float):\n downsample_factor = tf.cast(downsample_factor, tf.float32)\n\n # grid of (x_t, y_t, 1), eq (1) in ref [1]\n out_height = tf.cast(input_shape[1] / downsample_factor, tf.int32)\n out_width = tf.cast(input_shape[2] / downsample_factor, tf.int32)\n grid = _meshgrid(out_height, out_width)\n grid = tf.expand_dims(grid, 0)\n grid = tf.reshape(grid, [-1])\n grid = tf.tile(grid, tf.stack([batch_size]))\n grid = tf.reshape(grid, tf.stack([batch_size, 3, -1]))\n\n # Transform A x (x_t, y_t, 1)^T -> (x_s, y_s)\n T_g = tf.matmul(theta, grid)\n x_s = tf.slice(T_g, [0, 0, 0], [-1, 1, -1])\n y_s = tf.slice(T_g, [0, 1, 0], [-1, 1, -1])\n x_s_flat = tf.reshape(x_s, [-1])\n y_s_flat = tf.reshape(y_s, [-1])\n\n input_transformed = _interpolate(U, x_s_flat, y_s_flat, batch_size, downsample_factor)\n\n output = tf.reshape(input_transformed,\n tf.stack([batch_size, out_height, out_width, num_channels]))\n return output", "def three_layers_cnn( input_layer ):\n # Convolutional Layer #1\n # Computes 8 features using a 4x4 filter with ReLU activation.\n # Padding is added to preserve width and height.\n # Input Tensor Shape: [batch_size, NXCHANNELS, NVCHANNELS, 1]\n # Output Tensor Shape: [batch_size, NXCHANNELS, NVCHANNELS, 8]\n\n conv1 = tf.layers.conv2d(\n inputs = input_layer,\n filters = 8,\n kernel_size = [4,4],\n padding=\"same\",\n activation=tf.nn.relu)\n\n # Pooling Layer #1\n # First max pooling layer with a 8 filter and stride of 2\n # Input Tensor Shape: [batch_size, 64, 64]\n # Output Tensor Shape: [batch_size, 32, 32, 8]\n pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=(2,2), strides=2)\n\n # Convolutional Layer #2\n # Computes 16 features using a 4x4 filter.\n # Padding is added to preserve width and height.\n # Input Tensor Shape: [batch_size, 32, 32, 8 ]\n # Output Tensor Shape: [batch_size, 32, 32, 16]\n conv2 = tf.layers.conv2d(\n inputs = pool1,\n filters = 16,\n kernel_size = [4,4],\n padding =\"same\",\n activation =tf.nn.relu)\n\n # Pooling Layer #2\n # Second max pooling layer with a 2 filter and stride of 2\n # Input Tensor Shape: [batch_size, 32, 32, 16]\n # Output Tensor Shape: [batch_size, 16, 16, 16]\n pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=(2,2), strides=2)\n \n # Convolutional Layer #3\n # Computes 16 features using a 4x4 filter.\n # Padding is added to preserve width and height.\n # Input Tensor Shape: [batch_size, 16, 16, 16 ]\n # Output Tensor Shape: [batch_size, 16, 16, 32 ]\n conv3 = tf.layers.conv2d(\n inputs = pool2,\n filters = 32,\n kernel_size = [4,4],\n padding =\"same\",\n activation =tf.nn.relu)\n \n # Pooling Layer #2\n # Second max pooling layer with a 2 filter and stride of 2\n # Input Tensor Shape: [batch_size, 16, 16, 32]\n # Output Tensor Shape: [batch_size, 4, 4, 32]\n pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=(4,4), strides=4)\n \n\n # Flatten tensor into a batch of vectors\n # Input Tensor Shape: [batch_size, 4, 4, 32]\n # Output Tensor Shape: [batch_size, 4x4x32 ]\n pool3_flat = tf.reshape(pool3, [-1, 4*4*32 ])\n\n return pool3_flat", "def call(self, inputs, state):\n dtype = inputs.dtype\n input_size = inputs.get_shape().with_rank(4)[3]\n if input_size.value is None:\n raise ValueError('Could not infer size from inputs.get_shape()[-1]')\n\n c_prev, m_prev = state\n inputs = tf.concat([inputs, m_prev], axis=-1)\n\n if not self._w_conv:\n scope = tf.get_variable_scope()\n with tf.variable_scope(scope, initializer=self._initializer):\n kernel_shape = self._kernel + [inputs.shape[-1].value, 4 * self._depth]\n self._w_conv = tf.get_variable('w_conv', shape=kernel_shape, dtype=dtype)\n\n # i = input_gate, j = new_input, f = forget_gate, o = ouput_gate\n conv = tf.nn.conv2d(inputs, self._w_conv, (1, 1, 1, 1), 'SAME')\n i, j, f, o = tf.split(conv, 4, axis=-1)\n\n # Diagonal connections\n if self._use_peepholes and not self._w_f_diag:\n scope = tf.get_variable_scope()\n with tf.variable_scope(scope, initializer=self._initializer):\n self._w_f_diag = tf.get_variable('w_f_diag', c_prev.shape[1:], dtype=dtype)\n self._w_i_diag = tf.get_variable('w_i_diag', c_prev.shape[1:], dtype=dtype)\n self._w_o_diag = tf.get_variable('w_o_diag', c_prev.shape[1:], dtype=dtype)\n\n if self._use_peepholes:\n f = f + self._w_f_diag * c_prev\n i = i + self._w_i_diag * c_prev\n if self._normalize is not None:\n f = self._normalize(f)\n i = self._normalize(i)\n j = self._normalize(j)\n\n j = self._activation(j)\n\n if self._dropout is not None:\n j = tf.nn.dropout(j, self._dropout)\n\n c = tf.nn.sigmoid(f + self._forget_bias) * c_prev + tf.nn.sigmoid(i) * j\n\n if self._cell_clip is not None:\n # pylint: disable=invalid-unary-operand-type\n c = tf.clip_by_value(c, -self._cell_clip, self._cell_clip)\n # pylint: enable=invalid-unary-operand-type\n if self._use_peepholes:\n o = o + self._w_o_diag * c\n if self._normalize is not None:\n o = self._normalize(o)\n c = self._normalize(c)\n\n m = tf.nn.sigmoid(o) * self._activation(c)\n\n new_state = tf.nn.rnn_cell.LSTMStateTuple(c, m)\n return m, new_state", "def __init__(\n self,\n in_shape: Tuple,\n kernel_size: int,\n out_channels: int = None,\n stride: int = 1,\n aux_shape: Optional[Tuple] = None,\n downsampling_mode: str = \"convolutional\",\n upsampling_mode: str = \"convolutional\",\n transposed: bool = False,\n residual: bool = True,\n weightnorm: bool = True,\n gated: bool = True,\n activation: nn.Module = nn.ReLU,\n dropout: Optional[float] = None,\n ):\n super().__init__(in_shape=in_shape, transposed=transposed, residual=residual, aux_shape=aux_shape)\n\n # some parameters\n self.channels_in = in_shape[0]\n self.channels_out = out_channels\n self.kernel_size = kernel_size\n self.stride = stride\n self.resample_mode = upsampling_mode if transposed else downsampling_mode\n self.transposed = transposed\n self.residual = residual\n self.gated = gated\n self.activation_pre = activation() if self.residual else None\n\n # first convolution is always non-transposed and stride 1\n self.conv1 = TransposeableNormedSameConv2d(\n in_shape=in_shape,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=1,\n transposed=False,\n resample_mode=\"convolutional\",\n weightnorm=weightnorm,\n )\n\n # aux op\n if aux_shape is not None:\n self.activation_aux = activation()\n\n if list(aux_shape[1:]) > list(self.conv1.out_shape[1:]):\n # Downsample height and width (and match channels)\n aux_stride = tuple(np.asarray(aux_shape[1:]) // np.asarray(self.conv1.out_shape[1:]))\n self.aux_op = TransposeableNormedSameConv2d(\n in_shape=aux_shape,\n out_channels=self.conv1.out_shape[0],\n kernel_size=kernel_size,\n stride=aux_stride,\n transposed=False,\n resample_mode=self.resample_mode,\n weightnorm=weightnorm,\n )\n elif list(aux_shape[1:]) < list(self.conv1.out_shape[1:]):\n # Upsample height and width (and match channels)\n aux_stride = tuple(np.asarray(self.conv1.out_shape[1:]) // np.asarray(aux_shape[1:]))\n self.aux_op = TransposeableNormedSameConv2d(\n in_shape=aux_shape,\n out_channels=self.conv1.out_shape[0],\n kernel_size=kernel_size,\n stride=aux_stride,\n transposed=True,\n resample_mode=self.resample_mode,\n weightnorm=weightnorm,\n )\n elif aux_shape[0] != self.conv1.out_shape[0]:\n # Change only channels using 1x1 convolution\n self.aux_op = TransposeableNormedSameConv2d(\n in_shape=aux_shape,\n out_channels=self.conv1.out_shape[0],\n kernel_size=1,\n stride=1,\n transposed=False,\n resample_mode=self.resample_mode,\n weightnorm=weightnorm,\n )\n else:\n # aux_shape and out_shape are the same\n assert aux_shape == self.conv1.out_shape\n self.aux_op = None\n else:\n self.aux_op = None\n\n self.activation_mid = activation()\n\n # dropout\n self.dropout = nn.Dropout(dropout) if dropout else dropout\n\n # second convolution is potentially transposed and potentially resampling\n gated_channels = 2 * out_channels if self.gated else out_channels\n self.conv2 = TransposeableNormedSameConv2d(\n in_shape=self.conv1.out_shape,\n out_channels=gated_channels,\n kernel_size=kernel_size,\n stride=self.stride,\n weightnorm=weightnorm,\n transposed=transposed,\n resample_mode=self.resample_mode,\n ) # doubled out channels for gating\n\n # output shape\n self._out_shape = (out_channels, *self.conv2.out_shape[1:]) # always out_channels regardless of gating\n\n # residual connections\n self.residual_op = ResidualConnectionConv2d(self._in_shape, self._out_shape, residual)", "def conv2x2t(in_planes, out_planes, stride=1):\n return nn.ConvTranspose2d(in_planes, out_planes, kernel_size=2, stride=stride)", "def out_conv(spatial, config):\n p, k, s = [config[k] \n for k in ['padding', 'kernel_size', 'stride']]\n p2 = p if isinstance(p, int) else p[0] + p[1]\n\n return (spatial + p2 - k)//s + 1", "def conv2d_transpose_legalize(attrs, inputs, types):\n data, kernel = inputs\n kernel_layout = attrs[\"kernel_layout\"]\n\n target = tvm.target.Target.current(allow_none=True)\n if target and \"cudnn\" in target.libs:\n # cuDNN backend can directly operate on NHWC layout.\n return None\n\n if attrs[\"data_layout\"] == \"NHWC\":\n kernel = layout_transform(kernel, kernel_layout, \"IOHW\")\n\n # Set new attrs for conv2d_transpose.\n new_attrs = {k: attrs[k] for k in attrs.keys()}\n new_attrs[\"data_layout\"] = \"NCHW\"\n # layout of kernel should be IOHW, but kernel_layout will be swapped - OIHW\n new_attrs[\"kernel_layout\"] = \"IOHW\"\n\n # Convert data to NCHW.\n data = relay.transpose(data, axes=(0, 3, 1, 2))\n deconv = relay.nn.conv2d_transpose(data, kernel, **new_attrs)\n # Convert back to original NHWC layout.\n out = relay.transpose(deconv, axes=(0, 2, 3, 1))\n return out\n\n if attrs[\"data_layout\"] == \"NCHW\":\n kernel = layout_transform(kernel, kernel_layout, \"IOHW\")\n new_attrs = {k: attrs[k] for k in attrs.keys()}\n\n # layout of kernel should be IOHW, but kernel_layout will be swapped - OIHW\n new_attrs[\"kernel_layout\"] = \"IOHW\"\n return relay.nn.conv2d_transpose(data, kernel, **new_attrs)\n\n return None", "def test_on_conv_transpose_2d_stride(self):\n\n # Channels/Colors, #filters, filter_size (square)\n conv_filter = objax.nn.ConvTranspose2D(1, 1, 2, strides=2, padding=objax.ConvPadding.VALID)\n weights = objax.TrainVar(jn.array([[[[1., 2.], [3., 4.]]]]).transpose((2, 3, 0, 1)))\n conv_filter.w = weights\n image = jn.array([[[[2., 1., 3., 4.],\n [5., 6., 7., 8.], [9., 10., 11., 12.], [13., 14., 15., 16.]]]])\n # NCHW: Batch, Channels/Colors, Height, Width\n features = conv_filter(image)\n expected_features = jn.array([[[[2., 4., 1., 2., 3., 6., 4., 8.],\n [6., 8., 3., 4., 9., 12., 12., 16.],\n [5., 10., 6., 12., 7., 14., 8., 16.],\n [15., 20., 18., 24., 21., 28., 24., 32.],\n [9., 18., 10., 20., 11., 22., 12., 24.],\n [27., 36., 30., 40., 33., 44., 36., 48.],\n [13., 26., 14., 28., 15., 30., 16., 32.],\n [39., 52., 42., 56., 45., 60., 48., 64.]]]])\n self.assertEqual(features.shape, (1, 1, 8, 8))\n self.assertTrue(jn.array_equal(features, expected_features))", "def cvpr2018_net_T1T2(vol_size, enc_nf, dec_nf, full_size=True, indexing='ij'):\n ndims = len(vol_size)\n assert ndims in [1, 2, 3], \"ndims should be one of 1, 2, or 3. found: %d\" % ndims\n\n # get the core model\n unet_model_channel1 = unet_core(vol_size, enc_nf, dec_nf, full_size=full_size)\n [srcT1, tgtT1] = unet_model_channel1.inputs\n x_out_T1 = unet_model_channel1.outputs\n unet_model_channel2 = unet_core(vol_size, enc_nf, dec_nf, full_size=full_size)\n [srcT2, tgtT2] = unet_model_channel2.inputs\n x_out_T2 = unet_model_channel2.outputs\n\n # transform the results into a flow field.\n Conv = getattr(KL, 'Conv%dD' % ndims)\n flow_T1 = Conv(ndims, kernel_size=3, padding='same', name='flow_T1',\n kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5))(x_out_T1)\n flow_T2 = Conv(ndims, kernel_size=3, padding='same', name='flow_T2',\n kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5))(x_out_T2)\n flow = MergeInputs3D()([flow_T1, flow_T2])\n\n # warp the source with the flow\n y_T1_flowT1 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([srcT1, flow_T1])\n y_T2_flowT2 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([srcT2, flow_T2])\n y_T1 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([srcT1, flow])\n y_T2 = nrn_layers.SpatialTransformer(interp_method='linear', indexing=indexing)([srcT2, flow])\n # prepare model\n model = Model(inputs=[srcT1, tgtT1, srcT2, tgtT2], outputs=[y_T1, y_T2, flow, y_T1_flowT1, y_T2_flowT2])\n return model", "def test_axis_preservation(conv1d_placeholder, output_size):\n conv_layer = Convolution((3, output_size), lambda x: 1)\n output = conv_layer(conv1d_placeholder)\n assert output.axes == conv1d_placeholder.axes, (\"Output axes are not the same as input axes: \"\n \"{} != {}\").format(output.axes,\n conv1d_placeholder.axes)", "def setUp(self):\n\n super(Conv3DTransposeTest, self).setUp()\n\n self.batch_size = 7\n self.in_depth = 7\n self.in_height = 7\n self.in_width = 11\n self.in_channels = 4\n self.out_channels = 10\n self.kernel_shape_d = 5\n self.kernel_shape_h = 5\n self.kernel_shape_w = 7\n self.stride_d = 1\n self.stride_h = 2\n self.stride_w = 3\n self.padding = snt.SAME\n\n self.in_shape = (self.batch_size, self.in_depth, self.in_height,\n self.in_width, self.in_channels)\n\n self.out_shape = (self.in_depth, self.in_height, self.in_width)\n\n self.kernel_shape = (self.kernel_shape_d, self.kernel_shape_h,\n self.kernel_shape_w)\n\n self.kernel_shape2 = (self.kernel_shape_d, self.kernel_shape_h,\n self.kernel_shape_w, self.out_channels,\n self.in_channels)\n\n self.strides = (self.stride_d, self.stride_h, self.stride_w)", "def get_perspective_transform(points_src: Tensor, points_dst: Tensor) -> Tensor:\n KORNIA_CHECK_SHAPE(points_src, [\"B\", \"4\", \"2\"])\n KORNIA_CHECK_SHAPE(points_dst, [\"B\", \"4\", \"2\"])\n KORNIA_CHECK(points_src.shape == points_dst.shape, \"Source data shape must match Destination data shape.\")\n KORNIA_CHECK(points_src.dtype == points_dst.dtype, \"Source data type must match Destination data type.\")\n\n # we build matrix A by using only 4 point correspondence. The linear\n # system is solved with the least square method, so here\n # we could even pass more correspondence\n\n # create the lhs tensor with shape # Bx8x8\n B: int = points_src.shape[0] # batch_size\n\n A = torch.empty(B, 8, 8, device=points_src.device, dtype=points_src.dtype)\n\n # we need to perform in batch\n _zeros = zeros(B, device=points_src.device, dtype=points_src.dtype)\n _ones = torch.ones(B, device=points_src.device, dtype=points_src.dtype)\n\n for i in range(4):\n x1, y1 = points_src[..., i, 0], points_src[..., i, 1] # Bx4\n x2, y2 = points_dst[..., i, 0], points_dst[..., i, 1] # Bx4\n\n A[:, 2 * i] = stack([x1, y1, _ones, _zeros, _zeros, _zeros, -x1 * x2, -y1 * x2], -1)\n A[:, 2 * i + 1] = stack([_zeros, _zeros, _zeros, x1, y1, _ones, -x1 * y2, -y1 * y2], -1)\n\n # the rhs tensor\n b = points_dst.view(-1, 8, 1)\n\n # solve the system Ax = b\n X: Tensor = _torch_solve_cast(A, b)\n\n # create variable to return the Bx3x3 transform\n M = torch.empty(B, 9, device=points_src.device, dtype=points_src.dtype)\n M[..., :8] = X[..., 0] # Bx8\n M[..., -1].fill_(1)\n\n return M.view(-1, 3, 3) # Bx3x3", "def setUp(self):\n\n super(Conv2DTransposeTest, self).setUp()\n\n self.batch_size = 100\n self.in_height = 32\n self.in_width = 32\n self.in_channels = 3\n self.out_channels = 10\n self.kernel_shape_h = 5\n self.kernel_shape_w = 5\n self.strides = (1, 1, 1, 1)\n self.padding = snt.SAME\n\n self.in_shape = (self.batch_size, self.in_height, self.in_width,\n self.in_channels)\n\n self.out_shape = (self.in_height, self.in_width)\n\n self.kernel_shape = (self.kernel_shape_h, self.kernel_shape_w)\n\n self.kernel_shape2 = (self.kernel_shape_h, self.kernel_shape_w,\n self.out_channels, self.in_channels)", "def test_direct_shape():\n\n n = 21\n x = np.ones((n, n))\n\n recon = abel.direct.direct_transform(x, direction='forward')\n assert recon.shape == (n, n) \n\n recon = abel.direct.direct_transform(x, direction='inverse')\n assert recon.shape == (n, n)", "def ternausnetv1(input_shape=(512, 512, 3), base_depth=64):\n inputs = Input(input_shape)\n conv1 = Conv2D(base_depth, 3, activation='relu', padding='same')(inputs)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n\n conv2_1 = Conv2D(base_depth*2, 3, activation='relu',\n padding='same')(pool1)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2_1)\n\n conv3_1 = Conv2D(base_depth*4, 3, activation='relu',\n padding='same')(pool2)\n conv3_2 = Conv2D(base_depth*4, 3, activation='relu',\n padding='same')(conv3_1)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3_2)\n\n conv4_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(pool3)\n conv4_2 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(conv4_1)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4_2)\n\n conv5_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(pool4)\n conv5_2 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(conv5_1)\n pool5 = MaxPooling2D(pool_size=(2, 2))(conv5_2)\n\n conv6_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(pool5)\n\n up7 = Conv2DTranspose(base_depth*4, 2, strides=(2, 2), activation='relu',\n padding='same')(conv6_1)\n concat7 = concatenate([up7, conv5_2])\n conv7_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(concat7)\n\n up8 = Conv2DTranspose(base_depth*4, 2, strides=(2, 2), activation='relu',\n padding='same')(conv7_1)\n concat8 = concatenate([up8, conv4_2])\n conv8_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(concat8)\n\n up9 = Conv2DTranspose(base_depth*2, 2, strides=(2, 2), activation='relu',\n padding='same')(conv8_1)\n concat9 = concatenate([up9, conv3_2])\n conv9_1 = Conv2D(base_depth*4, 3, activation='relu',\n padding='same')(concat9)\n\n up10 = Conv2DTranspose(base_depth, 2, strides=(2, 2), activation='relu',\n padding='same')(conv9_1)\n concat10 = concatenate([up10, conv2_1])\n conv10_1 = Conv2D(base_depth*2, 3, activation='relu',\n padding='same')(concat10)\n\n up11 = Conv2DTranspose(int(base_depth/2), 2, strides=(2, 2),\n activation='relu', padding='same')(conv10_1)\n concat11 = concatenate([up11, conv1])\n\n out = Conv2D(1, 1, activation='sigmoid', padding='same')(concat11)\n\n return Model(input=inputs, output=out)", "def test_convolution(transformer_factory):\n N = 128\n C, K = 3, 8\n D, T = 1, 1\n H = W = 32\n R = S = 2\n\n padding = dict(pad_d=0, pad_h=0, pad_w=0)\n strides = dict(str_d=1, str_h=1, str_w=1)\n dilation = dict(dil_d=1, dil_h=1, dil_w=1)\n conv_params = padding.copy()\n conv_params.update(strides)\n conv_params.update(dilation)\n\n ax_i = ng.make_axes([ax.C, ax.D, ax.H, ax.W, ax.N])\n ax_f = ng.make_axes([ax.C, ax.T, ax.R, ax.S, ax.K])\n ax_i.set_shape((C, D, H, W, N))\n ax_f.set_shape((C, T, R, S, K))\n\n ax_o = ng.make_axes([\n ng.make_axis(roles=[ar.features_input]).named('C'),\n ng.make_axis(roles=[ar.features_0]).named('D'),\n ng.make_axis(roles=[ar.features_1]).named('H'),\n ng.make_axis(roles=[ar.features_2]).named('W'),\n ax.N\n ])\n\n ax_o[:-1].set_shape((\n K,\n output_dim(D, T, padding['pad_d'], strides['str_d']),\n output_dim(H, R, padding['pad_h'], strides['str_h']),\n output_dim(W, S, padding['pad_w'], strides['str_w']))\n )\n\n inputs = ng.placeholder(axes=ax_i)\n filters = ng.placeholder(axes=ax_f)\n\n # randomly initialize\n input_value = rng.uniform(-1, 1, ax_i)\n filter_value = rng.uniform(-1, 1, ax_f)\n\n assert input_value.shape == ax_i.lengths\n assert filter_value.shape == ax_f.lengths\n\n inputs = ng.placeholder(ax_i)\n filters = ng.placeholder(ax_f)\n\n output = ng.convolution(conv_params, inputs, filters, axes=ax_o)\n targets = ng.placeholder(axes=output.axes)\n\n costs = ng.cross_entropy_binary(ng.sigmoid(output), targets)\n error = ng.sum(costs, out_axes=()) / ng.batch_size(costs)\n d_inputs = ng.deriv(error, inputs)\n d_filters = ng.deriv(error, filters)\n\n targets_value = rng.uniform(.1, 0.9, output.axes)\n\n with executor([output, error, d_inputs, d_filters], inputs, filters, targets) as conv_executor:\n result_ng, err_ng, gradI_ng, gradF_ng = \\\n conv_executor(input_value, filter_value, targets_value)\n\n # Now compute reference values via NEON\n NervanaObject.be.bsz = N\n neon_layer = Convolution(fshape=(R, S, K), padding=padding, strides=strides)\n\n inp = neon_layer.be.array(input_value.reshape(C * H * W * D, N))\n neon_layer.W = neon_layer.be.array(filter_value.reshape(C * R * S * T, K))\n neon_layer.dW = neon_layer.be.empty_like(neon_layer.W)\n neon_layer.configure((C, H, W))\n neon_layer.prev_layer = True\n neon_layer.allocate()\n neon_layer.set_deltas(DummyDeltaBuffers())\n\n result_ne = neon_layer.fprop(inp).get().reshape(output.axes.lengths)\n\n act_result_ne = 1. / (1.0 + np.exp(-result_ne))\n err = neon_layer.be.array((act_result_ne - targets_value).reshape(-1, N) / float(N))\n gradI_ne = neon_layer.bprop(err).get().reshape(ax_i.lengths)\n gradF_ne = neon_layer.dW.get().reshape(ax_f.lengths)\n\n # Compare fprop\n ng.testing.assert_allclose(result_ng, result_ne, rtol=0, atol=1e-6)\n\n # Compare bprop\n ng.testing.assert_allclose(gradI_ng, gradI_ne, rtol=0, atol=1e-6)\n\n # Compare update\n ng.testing.assert_allclose(gradF_ng, gradF_ne, rtol=0, atol=1e-4)", "def copy_conv(sess, tftensor, layer):\n\n W = sess.graph.get_tensor_by_name('{}/conv2d_params:0'.format(tftensor)).eval()\n W = W.transpose((3, 2, 0, 1))\n\n assert W.shape == layer.W.data.shape\n\n layer.W.data = W", "def _fp32_vnchwconv_process(axis_0_index, h_loop_idx, h_size):\n\n def _fp32_inner_vnchwconv(col_lp_idx, col_size):\n \"\"\"\n inner vnchwconv\n \"\"\"\n\n # move data in\n in_offset = (block_idx * per_core_col_size + col_lp_idx * max_sub_w_size +\n h_loop_idx * max_sub_h_size * axis_2 +\n axis_0_index * axis_1 * axis_2)\n data_in_info = (h_size, col_size, axis_1, axis_2, in_offset)\n _data_move_in_mc_on_w(tik_inst, ub_input, data_in, data_in_info)\n\n # for this case, data_move will move in one more block\n with tik_inst.new_stmt_scope():\n h_size_temp = tik_inst.Scalar(\"int64\")\n with tik_inst.if_scope(tik.all(axis_1 > data_size_one_block,\n h_size % data_size_one_block > 0)):\n h_size_temp.set_as(_ceil_div(h_size, data_size_one_block) *\n data_size_one_block)\n with tik_inst.else_scope():\n h_size_temp.set_as(h_size)\n # transpose by vnchwconv\n sub_hw_size = (h_size_temp, col_size)\n _transpose_by_2_vnchwconv(tik_inst, ub_input[ub_offset],\n ub_input, sub_hw_size)\n\n # move data out\n out_offset = ((block_idx * per_core_col_size + col_lp_idx * max_sub_w_size) *\n axis_1 + h_loop_idx * max_sub_h_size +\n axis_0_index * axis_1 * axis_2)\n data_out_info = (h_size, col_size, axis_1, axis_2, out_offset)\n _data_move_out_mc_on_w(tik_inst, data_out, ub_input[ub_offset], data_out_info)\n\n with tik_inst.for_range(0, loop_cnt) as lp_idx:\n _fp32_inner_vnchwconv(lp_idx, max_sub_w_size)\n with tik_inst.if_scope(left_size > 0):\n _fp32_inner_vnchwconv(loop_cnt, left_size)", "def declaration_conv2d_transpose_impl(data, kernel, strides, padding, out_dtype, output_padding):\n data_pad, kernel_transform = conv2d_transpose_nchw_preprocess(\n data, kernel, strides, padding, out_dtype, output_padding\n )\n batch, in_c, in_h, in_w = data_pad.shape\n out_c, _, filter_h, filter_w = kernel_transform.shape\n\n # convolution stage\n out_c = simplify(out_c)\n\n out_h = simplify(in_h - filter_h + 1)\n out_w = simplify(in_w - filter_w + 1)\n dc = te.reduce_axis((0, in_c), name=\"dc\")\n dh = te.reduce_axis((0, filter_h), name=\"dh\")\n dw = te.reduce_axis((0, filter_w), name=\"dw\")\n\n Output = te.compute(\n (batch, out_c, out_h, out_w),\n lambda b, c, h, w: te.sum(\n data_pad[b, dc, h + dh, w + dw].astype(out_dtype)\n * kernel_transform[c, dc, dh, dw].astype(out_dtype),\n axis=[dc, dh, dw],\n ),\n tag=\"conv2d_transpose_nchw\",\n )\n\n return Output", "def CustomConv3DTranspose(x_in, nf, strides=2, kernel_size = 3):\r\n\tx_out = Conv3DTranspose(nf, kernel_size=3, padding='same',kernel_initializer='he_normal', strides=strides)(x_in)\r\n\t#print(\"AAAAA\", x_out.shape)\r\n\tx_out = BatchNormalization()(x_out)\r\n\tx_out = LeakyReLU(0.2)(x_out)\r\n\treturn x_out", "def test_conv2d():\n img = np.array([\n [0.3, 0.5, 0.7, 0.9],\n [0.1, 0.3, 0.5, 0.7],\n [0.9, 0.7, 0.5, 0.3],\n ])\n template = np.array([\n [1, 0],\n [1, 0],\n ])\n template = np.flipud(np.fliplr(template))\n return fftconvolve(img, template, mode='valid')", "def test_conv_flatten_deriv(transformer_factory):\n\n # set shape\n # NOTE: N must be >= 4 for GPU, but for CPU this could be decreased to\n # speed up the test\n N = 4\n C, D, H, W = (3, 1, 28, 28)\n T, R, S, K = (1, 5, 5, 8)\n\n params = dict(pad_d=0, pad_h=0, pad_w=0, str_d=1, str_h=1, str_w=1, dil_d=1, dil_h=1, dil_w=1)\n\n # i, f, o axes\n ax_i = ng.make_axes([ax.C, ax.D, ax.H, ax.W, ax.N])\n ax_f = ng.make_axes([ax.C, ax.T, ax.R, ax.S, ax.K])\n ax_o = ng.make_axes([\n ng.make_axis(roles=[ar.features_input]).named('C'),\n ng.make_axis(roles=[ar.features_0]).named('D'),\n ng.make_axis(roles=[ar.features_1]).named('H'),\n ng.make_axis(roles=[ar.features_2]).named('W'),\n ax.N\n ])\n\n ax_i.set_shape((C, D, H, W, N))\n ax_f.set_shape((C, T, R, S, K))\n ax_o.set_shape((K, D - T + 1, H - R + 1, W - S + 1, N))\n axes_rsck = ng.make_axes([ax.R, ax.S, ax.C, ax.K])\n axes_rsck_prime = ng.make_axes([ng.make_axis(axis.length).named(axis.name + 'p')\n for axis in axes_rsck])\n axes_nmpqk = ng.make_axes([ax_o[-1], ax_o[1], ax_o[2], ax_o[3], ax_o[0]])\n\n # broadcast input / filter axes\n input_var = ng.variable(ax_i).named('input')\n input_var.input = True\n input_val = np.ones(input_var.axes.lengths)\n\n filter_rsck_prime = ng.variable(axes_rsck_prime)\n filter_var = filter_rsck_prime\n filter_rsck = ng.cast_axes(filter_rsck_prime, axes_rsck)\n filter_trsck = ng.expand_dims(filter_rsck, ax.T, 0)\n filter_ctrsk = ng.axes_with_order(filter_trsck, axes=ax_f)\n\n # convolution\n output_kmpqn = ng.convolution(params, input_var, filter_ctrsk, axes=ax_o)\n output_nmpqk = ng.axes_with_order(output_kmpqn, axes=axes_nmpqk)\n\n # slice away the oD\n out_slicing = [slice(None), 0, slice(None), slice(None), slice(None)]\n output_npqk = ng.tensor_slice(output_nmpqk, out_slicing)\n\n output = ng.flatten_at(output_npqk, idx=1)\n\n # cost and grad\n cost = ng.sum(output, out_axes=())\n\n filter_var.input = True\n filter_var.named('filter')\n filter_val = np.ones(filter_var.axes.lengths)\n\n with ExecutorFactory() as factory:\n\n conv_comp = factory.executor(output, filter_var, input_var)\n grad_filter_num_comp = factory.numeric_derivative(cost, filter_var, 1.0, input_var)\n grad_filter_sym_comp = factory.derivative(cost, filter_var, input_var)\n\n grad_input_num_comp = factory.numeric_derivative(cost, input_var, 1.0, filter_var)\n grad_input_sym_comp = factory.derivative(cost, input_var, filter_var)\n\n conv_val = conv_comp(filter_val, input_val)\n conv_val_num = np.empty_like(conv_val)\n conv_val_num.fill(C * T * R * S)\n assert ng.testing.allclose(conv_val, conv_val_num)\n\n grad_filter_num_val = grad_filter_num_comp(filter_val, input_val)\n grad_filter_sym_val = grad_filter_sym_comp(filter_val, input_val)\n assert ng.testing.allclose(grad_filter_num_val, grad_filter_sym_val)\n\n grad_input_num_val = grad_input_num_comp(input_val, filter_val)\n grad_input_sym_val = grad_input_sym_comp(input_val, filter_val)\n assert ng.testing.allclose(grad_input_num_val, grad_input_sym_val)", "def cspdarknet53_tiny(input_data):\n input_data = common.convolutional(input_data, (3, 3, 3, 32), downsample=True)\n input_data = common.convolutional(input_data, (3, 3, 32, 64), downsample=True)\n input_data = common.convolutional(input_data, (3, 3, 64, 64))\n\n route = input_data\n input_data = common.route_group(input_data, 2, 1)\n input_data = common.convolutional(input_data, (3, 3, 32, 32))\n route_1 = input_data\n input_data = common.convolutional(input_data, (3, 3, 32, 32))\n input_data = tf.concat([input_data, route_1], -1)\n input_data = common.convolutional(input_data, (1, 1, 32, 64))\n input_data = tf.concat([route, input_data], -1)\n input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)\n\n input_data = common.convolutional(input_data, (3, 3, 64, 128))\n route = input_data\n input_data = common.route_group(input_data, 2, 1)\n input_data = common.convolutional(input_data, (3, 3, 64, 64))\n route_1 = input_data\n input_data = common.convolutional(input_data, (3, 3, 64, 64))\n input_data = tf.concat([input_data, route_1], -1)\n input_data = common.convolutional(input_data, (1, 1, 64, 128))\n input_data = tf.concat([route, input_data], -1)\n input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)\n\n input_data = common.convolutional(input_data, (3, 3, 128, 256))\n route = input_data\n input_data = common.route_group(input_data, 2, 1)\n input_data = common.convolutional(input_data, (3, 3, 128, 128))\n route_1 = input_data\n input_data = common.convolutional(input_data, (3, 3, 128, 128))\n input_data = tf.concat([input_data, route_1], -1)\n input_data = common.convolutional(input_data, (1, 1, 128, 256))\n route_1 = input_data\n input_data = tf.concat([route, input_data], -1)\n input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)\n\n input_data = common.convolutional(input_data, (3, 3, 512, 512))\n\n return route_1, input_data", "def transition_up(self, x, filters, name):\n with tf.name_scope(name):\n x = tf.layers.conv2d_transpose(x,\n filters=filters,\n kernel_size=[3, 3],\n strides=[2, 2],\n padding='SAME',\n activation=None,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name=name+'_trans_conv3x3')\n\n return x", "def transformer_V2(U, theta, out_size, name='SpatialTransformer', **kwargs):\n\n def _repeat(x, n_repeats):\n with tf.variable_scope('_repeat'):\n rep = tf.transpose(\n tf.expand_dims(tf.ones(shape=tf.stack([n_repeats, ])), 1), [1, 0])\n rep = tf.cast(rep, 'int32')\n x = tf.matmul(tf.reshape(x, (-1, 1)), rep)\n return tf.reshape(x, [-1])\n\n def _interpolate(im, x, y, out_size):\n with tf.variable_scope('_interpolate'):\n # constants\n num_batch = tf.shape(im)[0]\n height = tf.shape(im)[1]\n width = tf.shape(im)[2]\n channels = tf.shape(im)[3]\n\n x = tf.cast(x, 'float32')\n y = tf.cast(y, 'float32')\n height_f = tf.cast(height, 'float32')\n width_f = tf.cast(width, 'float32')\n out_height = out_size[0]\n out_width = out_size[1]\n zero = tf.zeros([], dtype='int32')\n max_y = tf.cast(tf.shape(im)[1] - 1, 'int32')\n max_x = tf.cast(tf.shape(im)[2] - 1, 'int32')\n\n # scale indices from [-1, 1] to [0, width/height]\n x = (x + 1.0)*(width_f) / 2.0\n y = (y + 1.0)*(height_f) / 2.0\n\n # do sampling\n x0 = tf.cast(tf.floor(x), 'int32')\n x1 = x0 + 1\n y0 = tf.cast(tf.floor(y), 'int32')\n y1 = y0 + 1\n\n x0 = tf.clip_by_value(x0, zero, max_x)\n x1 = tf.clip_by_value(x1, zero, max_x)\n y0 = tf.clip_by_value(y0, zero, max_y)\n y1 = tf.clip_by_value(y1, zero, max_y)\n dim2 = width\n dim1 = width*height\n base = _repeat(tf.range(num_batch)*dim1, out_height*out_width)\n base_y0 = base + y0*dim2\n base_y1 = base + y1*dim2\n idx_a = base_y0 + x0\n idx_b = base_y1 + x0\n idx_c = base_y0 + x1\n idx_d = base_y1 + x1\n\n # use indices to lookup pixels in the flat image and restore\n # channels dim\n im_flat = tf.reshape(im, tf.stack([-1, channels]))\n im_flat = tf.cast(im_flat, 'float32')\n Ia = tf.gather(im_flat, idx_a)\n Ib = tf.gather(im_flat, idx_b)\n Ic = tf.gather(im_flat, idx_c)\n Id = tf.gather(im_flat, idx_d)\n\n # and finally calculate interpolated values\n x0_f = tf.cast(x0, 'float32')\n x1_f = tf.cast(x1, 'float32')\n y0_f = tf.cast(y0, 'float32')\n y1_f = tf.cast(y1, 'float32')\n wa = tf.expand_dims(((x1_f-x) * (y1_f-y)), 1)\n wb = tf.expand_dims(((x1_f-x) * (y-y0_f)), 1)\n wc = tf.expand_dims(((x-x0_f) * (y1_f-y)), 1)\n wd = tf.expand_dims(((x-x0_f) * (y-y0_f)), 1)\n output = tf.add_n([wa*Ia, wb*Ib, wc*Ic, wd*Id])\n return output\n\n def _meshgrid(height, width):\n with tf.variable_scope('_meshgrid'):\n # This should be equivalent to:\n # x_t, y_t = np.meshgrid(np.linspace(-1, 1, width),\n # np.linspace(-1, 1, height))\n # ones = np.ones(np.prod(x_t.shape))\n # grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])\n x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])),\n tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))\n y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),\n tf.ones(shape=tf.stack([1, width])))\n\n x_t_flat = tf.reshape(x_t, (1, -1))\n y_t_flat = tf.reshape(y_t, (1, -1))\n\n ones = tf.ones_like(x_t_flat)\n grid = tf.concat(axis=0, values=[x_t_flat, y_t_flat, ones])\n return grid\n\n def _transform(theta, input_dim, out_size):\n with tf.variable_scope('_transform'):\n num_batch = tf.shape(input_dim)[0]\n height = tf.shape(input_dim)[1]\n width = tf.shape(input_dim)[2]\n num_channels = tf.shape(input_dim)[3]\n theta = tf.reshape(theta, (-1, 2, 3))\n theta = tf.cast(theta, 'float32')\n\n # grid of (x_t, y_t, 1), eq (1) in ref [1]\n height_f = tf.cast(height, 'float32')\n width_f = tf.cast(width, 'float32')\n out_height = out_size[0]\n out_width = out_size[1]\n grid = _meshgrid(out_height, out_width)\n grid = tf.expand_dims(grid, 0)\n grid = tf.reshape(grid, [-1])\n grid = tf.tile(grid, tf.stack([num_batch]))\n grid = tf.reshape(grid, tf.stack([num_batch, 3, -1]))\n\n # Transform A x (x_t, y_t, 1)^T -> (x_s, y_s)\n T_g = tf.matmul(theta, grid)\n x_s = tf.slice(T_g, [0, 0, 0], [-1, 1, -1])\n y_s = tf.slice(T_g, [0, 1, 0], [-1, 1, -1])\n x_s_flat = tf.reshape(x_s, [-1])\n y_s_flat = tf.reshape(y_s, [-1])\n\n input_transformed = _interpolate(\n input_dim, x_s_flat, y_s_flat,\n out_size)\n\n output = tf.reshape(\n input_transformed, tf.stack([num_batch, out_height, out_width, num_channels]))\n return output\n\n with tf.variable_scope(name):\n output = _transform(theta, U, out_size)\n return output", "def conv_2d_transpose(incoming, nb_filter, filter_size, output_shape,\n strides=1, padding='same', activation='linear',\n bias=True, weights_init='uniform_scaling',\n bias_init='zeros', regularizer=None, weight_decay=0.001,\n trainable=True, restore=True, name=\"Conv2DTranspose\"):\n assert padding in ['same', 'valid', 'SAME', 'VALID'], \\\n \"Padding must be same' or 'valid'\"\n\n input_shape = utils.get_incoming_shape(incoming)\n assert len(input_shape) == 4, \"Incoming Tensor shape must be 4-D\"\n\n filter_size = utils.autoformat_filter_conv2d(filter_size,\n nb_filter,\n input_shape[-1])\n strides = utils.autoformat_kernel_2d(strides)\n padding = utils.autoformat_padding(padding)\n\n with tf.name_scope(name) as scope:\n\n W_init = initializations.get(weights_init)()\n W_regul = None\n if regularizer:\n W_regul = lambda x: losses.get(regularizer)(x, weight_decay)\n W = vs.variable(scope + 'W', shape=filter_size,\n regularizer=W_regul, initializer=W_init,\n trainable=trainable, restore=restore)\n # Track per layer variables\n tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + scope, W)\n\n b = None\n if bias:\n b_init = initializations.get(bias_init)()\n b = vs.variable(scope + 'b', shape=nb_filter,\n initializer=b_init, trainable=trainable,\n restore=restore)\n # Track per layer variables\n tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + scope, b)\n\n # Determine the complete shape of the output tensor.\n batch_size = tf.gather(tf.shape(incoming), tf.constant([0]))\n if len(output_shape) == 2:\n output_shape = output_shape + [nb_filter]\n elif len(output_shape) != 3:\n raise Exception(\"output_shape length error: \" \n + str(len(output_shape))\n + \", only a length of 2 or 3 is supported.\")\n complete_out_shape = tf.concat(0, [batch_size, tf.constant(output_shape)])\n \n inference = tf.nn.conv2d_transpose(incoming, W, complete_out_shape,\n strides, padding)\n \n # Reshape tensor so its shape is correct.\n inference.set_shape([None] + output_shape)\n\n if b: inference = tf.nn.bias_add(inference, b)\n\n if isinstance(activation, str):\n inference = activations.get(activation)(inference)\n elif hasattr(activation, '__call__'):\n inference = activation(inference)\n else:\n raise ValueError(\"Invalid Activation.\")\n\n # Track activations.\n tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, inference)\n\n # Add attributes to Tensor to easy access weights.\n inference.scope = scope\n inference.W = W\n inference.b = b\n\n return inference", "def traverse(op):\n # inline all one-to-one-mapping operators except the last stage (output)\n if tag.is_broadcast(op.tag):\n if op not in s.outputs:\n s[op].compute_inline()\n for tensor in op.input_tensors:\n if tensor.op.input_tensors:\n traverse(tensor.op)\n\n if 'conv2d_NCHWc' in op.tag:\n conv_out = op.output(0)\n kernel = conv_out.op.input_tensors[1]\n data_vec = conv_out.op.input_tensors[0]\n data = data_vec.op.input_tensors[0] \\\n if isinstance(data_vec.op, tvm.tensor.ComputeOp) and \"pad\" not in data_vec.op.tag \\\n else data_vec\n if isinstance(data.op, tvm.tensor.ComputeOp) and \"pad\" in data.op.tag:\n data_pad = data\n data = data_pad.op.input_tensors[0]\n\n n, ic_chunk, h, w, ic_block = [x.value for x in data.shape]\n ic = ic_chunk * ic_block\n original_data = tvm.placeholder((n, ic, h, w), dtype=conv_out.dtype)\n\n kh, kw = kernel_size\n original_kernel = tvm.placeholder((num_filter, ic, kh, kw), dtype=conv_out.dtype)\n\n wkl = _get_workload(original_data, original_kernel, stride, padding, conv_out.dtype)\n sch = _get_schedule(wkl)\n _SCH_TO_SCH_FUNC[type(sch)](s, wkl, data_vec,\n kernel, conv_out, outs[0])", "def _initialize_tps(num_control_points, input_shape, downsample_factor,\n precompute_grid):\n\n # break out input_shape\n _, _, height, width = input_shape\n\n # Create source grid\n grid_size = np.sqrt(num_control_points)\n x_control_source, y_control_source = np.meshgrid(\n np.linspace(-1, 1, grid_size),\n np.linspace(-1, 1, grid_size))\n\n # Create 2 x num_points array of source points\n source_points = np.vstack(\n (x_control_source.flatten(), y_control_source.flatten()))\n\n # Convert to floatX\n source_points = source_points.astype(theano.config.floatX)\n\n # Get number of equations\n num_equations = num_control_points + 3\n\n # Initialize L to be num_equations square matrix\n L = np.zeros((num_equations, num_equations), dtype=theano.config.floatX)\n\n # Create P matrix components\n L[0, 3:num_equations] = 1.\n L[1:3, 3:num_equations] = source_points\n L[3:num_equations, 0] = 1.\n L[3:num_equations, 1:3] = source_points.T\n\n # Loop through each pair of points and create the K matrix\n for point_1 in range(num_control_points):\n for point_2 in range(point_1, num_control_points):\n\n L[point_1 + 3, point_2 + 3] = _U_func_numpy(\n source_points[0, point_1], source_points[1, point_1],\n source_points[0, point_2], source_points[1, point_2])\n\n if point_1 != point_2:\n L[point_2 + 3, point_1 + 3] = L[point_1 + 3, point_2 + 3]\n\n # Invert\n L_inv = np.linalg.inv(L)\n\n if precompute_grid:\n # Construct grid\n out_height = np.array(height // downsample_factor[0]).astype('int64')\n out_width = np.array(width // downsample_factor[1]).astype('int64')\n x_t, y_t = np.meshgrid(np.linspace(-1, 1, out_width),\n np.linspace(-1, 1, out_height))\n ones = np.ones(np.prod(x_t.shape))\n orig_grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])\n orig_grid = orig_grid[0:2, :]\n orig_grid = orig_grid.astype(theano.config.floatX)\n\n # Construct right mat\n\n # First Calculate the U function for the new point and each source\n # point as in ref [2]\n # The U function is simply U(r) = r^2 * log(r^2), where r^2 is the\n # squared distance\n to_transform = orig_grid[:, :, np.newaxis].transpose(2, 0, 1)\n stacked_transform = np.tile(to_transform, (num_control_points, 1, 1))\n stacked_source_points = \\\n source_points[:, :, np.newaxis].transpose(1, 0, 2)\n r_2 = np.sum((stacked_transform - stacked_source_points) ** 2, axis=1)\n\n # Take the product (r^2 * log(r^2)), being careful to avoid NaNs\n log_r_2 = np.log(r_2)\n log_r_2[np.isinf(log_r_2)] = 0.\n distances = r_2 * log_r_2\n\n # Add in the coefficients for the affine translation (1, x, and y,\n # corresponding to a_1, a_x, and a_y)\n upper_array = np.ones(shape=(1, orig_grid.shape[1]),\n dtype=theano.config.floatX)\n upper_array = np.concatenate([upper_array, orig_grid], axis=0)\n right_mat = np.concatenate([upper_array, distances], axis=0)\n\n # Convert to tensors\n out_height = T.as_tensor_variable(out_height)\n out_width = T.as_tensor_variable(out_width)\n right_mat = T.as_tensor_variable(right_mat)\n\n else:\n out_height = None\n out_width = None\n right_mat = None\n\n # Convert to tensors\n L_inv = T.as_tensor_variable(L_inv)\n source_points = T.as_tensor_variable(source_points)\n\n return right_mat, L_inv, source_points, out_height, out_width", "def get_unet():\n inputs = Input((img_rows, img_cols, 1))\n conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)\n conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n\n conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)\n conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n\n conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)\n conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n\n conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(pool3)\n conv4 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv4)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n\n conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(pool4)\n conv5 = Conv2D(512, (3, 3), activation='relu', padding='same')(conv5)\n\n up6 = concatenate([Conv2DTranspose(256, (2, 2), strides=(2, 2),\n padding='same')(conv5), conv4], axis=3)\n conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(up6)\n conv6 = Conv2D(256, (3, 3), activation='relu', padding='same')(conv6)\n\n up7 = concatenate([Conv2DTranspose(128, (2, 2), strides=(2, 2),\n padding='same')(conv6), conv3], axis=3)\n conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(up7)\n conv7 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv7)\n\n up8 = concatenate([Conv2DTranspose(64, (2, 2), strides=(2, 2),\n padding='same')(conv7), conv2], axis=3)\n conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(up8)\n conv8 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv8)\n\n up9 = concatenate([Conv2DTranspose(32, (2, 2), strides=(2, 2),\n padding='same')(conv8), conv1], axis=3)\n conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(up9)\n conv9 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv9)\n\n conv10 = Conv2D(1, (1, 1), activation='sigmoid')(conv9)\n\n model = Model(inputs=[inputs], outputs=[conv10])\n\n model.compile(optimizer=Adam(lr=1e-4), loss=dice_coef_loss,\n metrics=[dice_coef])\n\n return model", "def test_on_conv_transpose_2d_two_channels(self):\n\n # Channels/Colors, #filters, filter_size (square)\n conv_filter = objax.nn.ConvTranspose2D(2, 1, 2, padding=objax.ConvPadding.VALID)\n weights = objax.TrainVar(jn.array([[[[1., 2.], [3., 4.]], [[1., 2.], [3., 4.]]]]).transpose((2, 3, 0, 1)))\n conv_filter.w = weights\n image = jn.array([[[[2., 1., 3., 4.], [5., 6., 7., 8.],\n [9., 10., 11., 12.], [13., 14., 15., 16.]],\n [[2., 1., 3., 4.], [5., 6., 7., 8.],\n [9., 10., 11., 12.], [13., 14., 15., 16.]]]])\n # NCHW: Batch, Channels/Colors, Height, Width\n features = conv_filter(image)\n expected_features = jn.array([[[[4., 10., 10., 20., 16.],\n [22., 54., 64., 92., 64.],\n [48., 132., 152., 172., 112.],\n [80., 212., 232., 252., 160.],\n [78., 188., 202., 216., 128.]]]])\n self.assertEqual(features.shape, (1, 1, 5, 5))\n self.assertTrue(jn.array_equal(features, expected_features))", "def two_layers_cnn( input_layer ):\n # Convolutional Layer #1\n # Computes 8 features using a 4x4 filter with ReLU activation.\n # Padding is added to preserve width and height.\n # Input Tensor Shape: [batch_size, NXCHANNELS, NVCHANNELS, 1]\n # Output Tensor Shape: [batch_size, NXCHANNELS, NVCHANNELS, 8]\n\n conv1 = tf.layers.conv2d(\n inputs = input_layer,\n filters = 8,\n kernel_size = [4,4],\n padding=\"same\",\n activation=tf.nn.relu)\n\n # Pooling Layer #1\n # First max pooling layer with a 8 filter and stride of 2\n # Input Tensor Shape: [batch_size, 64, 64]\n # Output Tensor Shape: [batch_size, 32, 32, 8]\n pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=(4,4), strides=4)\n\n # Convolutional Layer #2\n # Computes 16 features using a 4x4 filter.\n # Padding is added to preserve width and height.\n # Input Tensor Shape: [batch_size, 32, 32, 8 ]\n # Output Tensor Shape: [batch_size, 32, 32, 16]\n conv2 = tf.layers.conv2d(\n inputs = pool1,\n filters = 16,\n kernel_size = [4,4],\n padding =\"same\",\n activation =tf.nn.relu)\n\n # Pooling Layer #2\n # Second max pooling layer with a 2 filter and stride of 2\n # Input Tensor Shape: [batch_size, 32, 32, 16]\n # Output Tensor Shape: [batch_size, 16, 16, 16]\n pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=(4,4), strides=4)\n \n # Flatten tensor into a batch of vectors\n # Input Tensor Shape: [batch_size, 4, 4, 16]\n # Output Tensor Shape: [batch_size, 4x4x16 ]\n pool2_flat = tf.reshape(pool2, [-1, 4*4*16 ])\n\n return pool2_flat", "def construct(input_placeholder):\n\t\t###############################\n\t\t# MODEL ARCHITECTURE #\n\t\t###############################\n\t\t# First block of convolutions\n\t\twith tf.variable_scope(\"conv_1\"):\n\t\t\tconv_1_1 = conv2d(input_placeholder,\n\t\t\t\tinput_channels=1,\n\t\t\t\toutput_channels=64,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1)\n\t\t\tconv_1_2 = conv2d(conv_1_1,\n\t\t\t\tinput_channels=64,\n\t\t\t\toutput_channels=64,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tstride=2)\n\t\t\t# TODO batchn\n\t\t\tbn_1 = conv_1_2\n\n\t\t# Second block of convolutions.\n\t\twith tf.variable_scope(\"conv2\"):\n\t\t\tconv_2_1 = conv2d(bn_1,\n\t\t\t\tinput_channels=64,\n\t\t\t\toutput_channels=128,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1)\n\t\t\tconv_2_2 = conv2d(conv_2_1,\n\t\t\t\tinput_channels=128,\n\t\t\t\toutput_channels=128,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tstride=2)\n\n\t\t\t# TODO batchn\n\t\t\tbn_2 = conv_2_2\n\n\t\twith tf.variable_scope(\"conv3\"):\n\t\t\tconv_3_1 = conv2d(bn_2,\n\t\t\t\tinput_channels=128,\n\t\t\t\toutput_channels=256,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1)\n\t\t\tconv_3_2 = conv2d(conv_3_1,\n\t\t\t\tinput_channels=256,\n\t\t\t\toutput_channels=256,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1)\n\t\t\tconv_3_3 = conv2d(conv_3_2,\n\t\t\t\tinput_channels=256,\n\t\t\t\toutput_channels=256,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tstride=2)\n\t\t\t# TODO batchn\n\t\t\tbn_3 = conv_3_3\n\n\n\t\t# DILATED LAYERS:\n\t\twith tf.variable_scope(\"conv4\"):\n\t\t\tconv_4_1 = conv2d(bn_3,\n\t\t\t\tinput_channels=256,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tdilation=1)\n\t\t\tconv_4_2 = conv2d(conv_4_1,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tdilation=1)\n\t\t\tconv_4_3 = conv2d(conv_4_2,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tdilation=1)\n\t\t\t# TODO batchn\n\t\t\tbn_4 = conv_4_3\n\n\t\twith tf.variable_scope(\"conv5\"):\n\t\t\tconv_5_1 = conv2d(bn_4,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=2,\n\t\t\t\tdilation=2)\n\t\t\tconv_5_2 = conv2d(conv_5_1,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=2,\n\t\t\t\tdilation=2)\n\t\t\tconv_5_3 = conv2d(conv_5_2,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=2,\n\t\t\t\tdilation=2)\n\t\t\t# TODO batchn\n\t\t\tbn_5 = conv_5_3\n\n\t\twith tf.variable_scope(\"conv6\"):\n\t\t\tconv_6_1 = conv2d(bn_5,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=2,\n\t\t\t\tdilation=2)\n\t\t\tconv_6_2 = conv2d(conv_6_1,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=2,\n\t\t\t\tdilation=2)\n\t\t\tconv_6_3 = conv2d(conv_6_2,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=2,\n\t\t\t\tdilation=2)\n\t\t\t# TODO batchn\n\t\t\tbn_6 = conv_6_3\n\n\n\t\twith tf.variable_scope(\"conv7\"):\n\t\t\tconv_7_1 = conv2d(bn_6,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tdilation=1)\n\t\t\tconv_7_2 = conv2d(conv_7_1,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tdilation=1)\n\t\t\tconv_7_3 = conv2d(conv_7_2,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_channels=512,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tdilation=1)\n\t\t\t# TODO batchn\n\t\t\tbn_7 = conv_7_3\n\n\n\t\twith tf.variable_scope(\"conv8\"):\n\t\t\tconv_8_1 = deconv2d(bn_7,\n\t\t\t\tinput_channels=512,\n\t\t\t\toutput_size=[None, 64, 64, 256],\n\t\t\t\tkernel_size=4,\n\t\t\t\tstride=2,\n\t\t\t\tpad=1)\n\t\t\tconv_8_2 = conv2d(conv_8_1,\n\t\t\t\tinput_channels=256,\n\t\t\t\toutput_channels=256,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1)\n\t\t\tconv_8_3 = conv2d(conv_8_2,\n\t\t\t\tinput_channels=256,\n\t\t\t\toutput_channels=256,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tstride=1)\n\t\t\tconv_8_313 = conv2d(conv_8_3,\n\t\t\t\tinput_channels=256,\n\t\t\t\toutput_channels=313,\n\t\t\t\tkernel_size=3,\n\t\t\t\tpad=1,\n\t\t\t\tstride=1)\n\n\n\t\treturn conv_8_313", "def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):\n\n fcn_layer7_conv_1x1 = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, padding='SAME',\n kernel_initializer=tf.random_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3), name='fcn_layer7_conv_1x1')\n\n fcn_layer7_deconv = tf.layers.conv2d_transpose(fcn_layer7_conv_1x1, num_classes, 4, 2, padding='SAME',\n kernel_initializer=tf.random_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3), name='fcn_layer7_deconv')\n\n vgg_layer4_out_scale = tf.multiply(vgg_layer4_out, 0.01, name='vgg_layer4_out_scale')\n\n fcn_layer4_conv_1x1 = tf.layers.conv2d(vgg_layer4_out_scale, num_classes, 1, padding='SAME',\n kernel_initializer=tf.random_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3), name='fcn_layer4_conv_1x1')\n\n intermediate_1 = tf.add(fcn_layer7_deconv, fcn_layer4_conv_1x1, name='intermediate_1')\n\n vgg_layer3_out_scale = tf.multiply(vgg_layer3_out, 0.0001, name='vgg_layer3_out_scale')\n\n fcn_layer3_conv_1x1 = tf.layers.conv2d(vgg_layer3_out_scale, num_classes, 1, padding='SAME',\n kernel_initializer=tf.random_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3), name='fcn_layer3_conv_1x1')\n\n intermediate_1_deconv = tf.layers.conv2d_transpose(intermediate_1, num_classes, 4, 2, padding='SAME',\n kernel_initializer=tf.random_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3), name='intermediate_1_deconv')\n\n intermediate_2 = tf.add(intermediate_1_deconv, fcn_layer3_conv_1x1, name='intermediate_2')\n\n fcn_output = tf.layers.conv2d_transpose(intermediate_2, num_classes, 16, 8, padding='SAME',\n kernel_initializer=tf.random_normal_initializer(stddev=0.01),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3), name='fcn_output')\n\n return fcn_output", "def test_ops_shape(self):\n confs = {'C_in': 3, 'C_out': 8, 'stride': 1, 'affine': True}\n\n for name, layer in OPS.items():\n net = layer(**confs)\n x = torch.rand((16, confs['C_in'], 32, 32))\n y = net(x)\n self.assertEqual(list(y.shape), [16, confs['C_out'], 32, 32])", "def cifar10_8layers(input_image, keep_prob, init_method=tf.truncated_normal_initializer()):\n with tf.variable_scope(\"conv1_1\"):\n W1_1 = tf.get_variable(name=\"W1_1\", shape=[3,3,3,32], dtype=tf.float32, \\\n initializer=init_method)\n b1_1 = tf.get_variable(name=\"b1_1\", shape=[32], dtype=tf.float32, \\\n initializer=tf.constant_initializer(0.01))\n conv1_1 = conv_relu(input_image, W1_1, b1_1)\n with tf.variable_scope(\"conv1_2\"):\n W1_2 = tf.get_variable(name=\"W1_2\", shape=[3,3,32,32], dtype=tf.float32, \\\n initializer=init_method)\n b1_2 = tf.get_variable(name=\"b1_2\", shape=[32], dtype=tf.float32, \\\n initializer=tf.constant_initializer(0.01))\n conv1_2 = max_pool(conv_relu(conv1_1, W1_2, b1_2))\n\t#conv1_2 = tf.nn.dropout(conv1_2, keep_prob)\n with tf.variable_scope(\"conv2_1\"):\n W2_1 = tf.get_variable(name=\"W2_1\", shape=[3,3,32,64], dtype=tf.float32, \\\n initializer=init_method)\n b2_1 = tf.get_variable(name=\"b2_1\", shape=[64], dtype=tf.float32, \\\n initializer=tf.constant_initializer(0.01))\n conv2_1 = conv_relu(conv1_2, W2_1, b2_1)\n #conv2_1 = tf.nn.dropout(conv2_1, keep_prob)\n with tf.variable_scope(\"conv2_2\"):\n W2_2 = tf.get_variable(name=\"W2_2\", shape=[3,3,64,64], dtype=tf.float32, \\\n initializer=init_method)\n b2_2 = tf.get_variable(name=\"b2_2\", shape=[64], dtype=tf.float32, \\\n initializer=tf.constant_initializer(0.01))\n conv2_2 = max_pool(conv_relu(conv2_1, W2_2, b2_2))\n #conv2_2 = tf.nn.dropout(conv2_2, keep_prob)\n with tf.variable_scope(\"conv3_1\"):\n W3_1 = tf.get_variable(name=\"W3_1\", shape=[3,3,64,128], dtype=tf.float32, \\\n initializer=init_method)\n b3_1 = tf.get_variable(name=\"b3_1\", shape=[128], dtype=tf.float32, \\\n initializer=tf.constant_initializer(0.01))\n conv3_1 = conv_relu(conv2_2, W3_1, b3_1)\n #conv3_1 = tf.nn.dropout(conv3_1, keep_prob)\n with tf.variable_scope(\"conv3_2\"):\n W3_2 = tf.get_variable(name=\"W3_2\", shape=[3,3,128,128], dtype=tf.float32, \\\n initializer=init_method)\n b3_2 = tf.get_variable(name=\"b3_2\", shape=[128], dtype=tf.float32, \\\n initializer=tf.constant_initializer(0.01))\n conv3_2 = max_pool(conv_relu(conv3_1, W3_2, b3_2))\n conv3_2 = tf.nn.dropout(conv3_2, keep_prob)\n with tf.variable_scope(\"fc1\"):\n W4 = tf.get_variable(name=\"W4\", shape=[4*4*128,256], dtype=tf.float32, \\\n initializer=init_method)\n b4 = tf.get_variable(name=\"b4\", shape=[256], dtype=tf.float32, \\\n initializer=tf.constant_initializer(0.01))\n conv3_flat = tf.reshape(conv3_2, [-1, 4*4*128])\n fc1 = fc_relu(conv3_flat, W4, b4)\n fc1 = tf.nn.dropout(fc1, keep_prob)\n with tf.variable_scope(\"fc2\"):\n W5 = tf.get_variable(name=\"W5\", shape=[256,512], dtype=tf.float32, \\\n initializer=init_method)\n b5 = tf.get_variable(name=\"b5\", shape=[512], dtype=tf.float32, \\\n initializer=tf.constant_initializer(0.01))\n fc2 = fc_relu(fc1, W5, b5)\n fc2 = tf.nn.dropout(fc2, keep_prob)\n with tf.variable_scope(\"output\"):\n W6 = tf.get_variable(name=\"W6\", shape=[512,10], dtype=tf.float32, \\\n initializer=init_method)\n b6 = tf.get_variable(name=\"b6\", shape=[10], dtype=tf.float32, \\\n initializer=tf.constant_initializer(0.01))\n y_logit = tf.matmul(fc2, W6) + b6\n return y_logit, tf.nn.softmax(y_logit, name=\"softmax\")", "def tfconv2d_transpose(in_channels,\n out_channels,\n kernel_size,\n stride=1,\n output_padding = 0,\n groups=1,\n bias=True,\n dilation=1,\n tf_padding_type = None):\n modules = []\n if tf_padding_type == 'same':\n padding = nn.ZeroPad2d(0)\n hook = hook_factory_tf_inverse_padding_same(kernel_size, stride)\n padding.register_forward_pre_hook(hook)\n modules.append(padding)\n\n # eliminate the effect of the in-build padding (is not capable of asymmeric padding)\n if isinstance(kernel_size, int):\n padding = kernel_size - 1\n else:\n padding = (kernel_size[0] - 1, kernel_size[1] - 1)\n\n modules.append(nn.ConvTranspose2d(in_channels,\n out_channels,\n kernel_size,\n stride,\n padding,\n output_padding,\n groups,\n bias,\n dilation))\n\n return nn.Sequential(*modules)", "def _conv_op(self, in_obj, channel_axes, spatial_axes):\n\n manual_pad = collections.OrderedDict([(ax.name, (0, 0)) for ax in in_obj.axes])\n pad_int, extra_pad = self._get_pad_int(spatial_axes)\n manual_pad.update(extra_pad)\n if any((pad != (0, 0)) for pad in manual_pad.values()):\n in_obj = ng.pad(in_obj, manual_pad.values())\n spatial_axes = in_obj.axes.get_by_names(*ng.make_axes(spatial_axes).names)\n\n output_axes = self._output_axes(in_obj.axes,\n pad_int)\n convparams = utils.make_convparams(self.nout, self.filter_shape,\n self.strides, pad_int, self.dilation)\n return ng.deconvolution(convparams,\n in_obj,\n self.W,\n axes=output_axes)", "def test_conv(self):\n for kernel_type in [lambda x: x, SharedTensor]:\n for matrix_width in range(2, 5):\n for kernel_width in range(1, matrix_width):\n for padding in range(kernel_width // 2 + 1):\n matrix_size = (5, matrix_width)\n matrix = get_random_test_tensor(size=matrix_size)\n\n kernel_size = (kernel_width, kernel_width)\n kernel = get_random_test_tensor(size=kernel_size)\n\n matrix = matrix.unsqueeze(0).unsqueeze(0)\n kernel = kernel.unsqueeze(0).unsqueeze(0)\n\n reference = torch.nn.functional.conv2d(\n matrix, kernel, padding=padding)\n encrypted_matrix = SharedTensor(matrix)\n encrypted_kernel = kernel_type(kernel)\n encrypted_conv = encrypted_matrix.conv2d(\n encrypted_kernel, padding=padding\n )\n\n self._check(encrypted_conv, reference, 'conv2d failed')", "def transformer(U, theta, out_size, name='SpatialTransformer', **kwargs):\n\n def _repeat(x, n_repeats):\n with tf.variable_scope('_repeat'):\n rep = tf.transpose(\n tf.expand_dims(tf.ones(shape=tf.stack([n_repeats, ])), 1), [1, 0])\n rep = tf.cast(rep, 'int32')\n x = tf.matmul(tf.reshape(x, (-1, 1)), rep)\n return tf.reshape(x, [-1])\n\n def _interpolate(im, x, y, out_size):\n with tf.variable_scope('_interpolate'):\n # constants\n num_batch = tf.shape(im)[0]\n height = tf.shape(im)[1]\n width = tf.shape(im)[2]\n channels = tf.shape(im)[3]\n\n x = tf.cast(x, 'float32')\n y = tf.cast(y, 'float32')\n height_f = tf.cast(height, 'float32')\n width_f = tf.cast(width, 'float32')\n out_height = out_size[0]\n out_width = out_size[1]\n zero = tf.zeros([], dtype='int32')\n max_y = tf.cast(tf.shape(im)[1] - 1, 'int32')\n max_x = tf.cast(tf.shape(im)[2] - 1, 'int32')\n\n # scale indices from [-1, 1] to [0, width/height]\n x = (x + 1.0)*(width_f) / 2.0\n y = (y + 1.0)*(height_f) / 2.0\n\n # do sampling\n x0 = tf.cast(tf.floor(x), 'int32')\n x1 = x0 + 1\n y0 = tf.cast(tf.floor(y), 'int32')\n y1 = y0 + 1\n\n x0 = tf.clip_by_value(x0, zero, max_x)\n x1 = tf.clip_by_value(x1, zero, max_x)\n y0 = tf.clip_by_value(y0, zero, max_y)\n y1 = tf.clip_by_value(y1, zero, max_y)\n dim2 = width\n dim1 = width*height\n base = _repeat(tf.range(num_batch)*dim1, out_height*out_width)\n base_y0 = base + y0*dim2\n base_y1 = base + y1*dim2\n idx_a = base_y0 + x0\n idx_b = base_y1 + x0\n idx_c = base_y0 + x1\n idx_d = base_y1 + x1\n\n # use indices to lookup pixels in the flat image and restore\n # channels dim\n im_flat = tf.reshape(im, tf.stack([-1, channels]))\n im_flat = tf.cast(im_flat, 'float32')\n Ia = tf.gather(im_flat, idx_a)\n Ib = tf.gather(im_flat, idx_b)\n Ic = tf.gather(im_flat, idx_c)\n Id = tf.gather(im_flat, idx_d)\n\n # and finally calculate interpolated values\n x0_f = tf.cast(x0, 'float32')\n x1_f = tf.cast(x1, 'float32')\n y0_f = tf.cast(y0, 'float32')\n y1_f = tf.cast(y1, 'float32')\n wa = tf.expand_dims(((x1_f-x) * (y1_f-y)), 1)\n wb = tf.expand_dims(((x1_f-x) * (y-y0_f)), 1)\n wc = tf.expand_dims(((x-x0_f) * (y1_f-y)), 1)\n wd = tf.expand_dims(((x-x0_f) * (y-y0_f)), 1)\n output = tf.add_n([wa*Ia, wb*Ib, wc*Ic, wd*Id])\n return output\n\n def _meshgrid(height, width):\n with tf.variable_scope('_meshgrid'):\n # This should be equivalent to:\n # x_t, y_t = np.meshgrid(np.linspace(-1, 1, width),\n # np.linspace(-1, 1, height))\n # ones = np.ones(np.prod(x_t.shape))\n # grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])\n x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])),\n tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))\n y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),\n tf.ones(shape=tf.stack([1, width])))\n\n x_t_flat = tf.reshape(x_t, (1, -1))\n y_t_flat = tf.reshape(y_t, (1, -1))\n\n ones = tf.ones_like(x_t_flat)\n grid = tf.concat(axis=0, values=[x_t_flat, y_t_flat, ones])\n return grid\n\n def _transform(theta, input_dim, out_size):\n with tf.variable_scope('_transform'):\n num_batch = tf.shape(input_dim)[0]\n height = tf.shape(input_dim)[1]\n width = tf.shape(input_dim)[2]\n num_channels = tf.shape(input_dim)[3]\n theta = tf.reshape(theta, (-1, 2, 3))\n theta = tf.cast(theta, 'float32')\n\n # grid of (x_t, y_t, 1), eq (1) in ref [1]\n height_f = tf.cast(height, 'float32')\n width_f = tf.cast(width, 'float32')\n out_height = out_size[0]\n out_width = out_size[1]\n grid = _meshgrid(out_height, out_width)\n grid = tf.expand_dims(grid, 0)\n grid = tf.reshape(grid, [-1])\n grid = tf.tile(grid, tf.stack([num_batch]))\n grid = tf.reshape(grid, tf.stack([num_batch, 3, -1]))\n\n # Transform A x (x_t, y_t, 1)^T -> (x_s, y_s)\n T_g = tf.matmul(theta, grid)\n x_s = tf.slice(T_g, [0, 0, 0], [-1, 1, -1])\n y_s = tf.slice(T_g, [0, 1, 0], [-1, 1, -1])\n x_s_flat = tf.reshape(x_s, [-1])\n y_s_flat = tf.reshape(y_s, [-1])\n\n input_transformed = _interpolate(\n input_dim, x_s_flat, y_s_flat,\n out_size)\n\n output = tf.reshape(\n input_transformed, tf.stack([num_batch, out_height, out_width, num_channels]))\n return output\n\n with tf.variable_scope(name):\n output = _transform(theta, U, out_size)\n return output", "def test_3d_tranpose(): \n dic,data = ng.pipe.read_lowmem(\"common_data/3d_pipe/ft/test%03d.ft3\")\n fdic,fdata = ng.pipe.read(\"common_data/3d_pipe/ft/test%03d.ft3\")\n\n assert_array_equal(data.transpose()[0,1,2],fdata.transpose()[0,1,2])\n assert_array_equal(data.transpose((2,0,1))[0,1,2],\n fdata.transpose((2,0,1))[0,1,2])\n assert_array_equal(data.swapaxes(0,1)[0,1,2],fdata.swapaxes(0,1)[0,1,2])\n assert_array_equal(data.swapaxes(2,0)[0,1,2],fdata.swapaxes(2,0)[0,1,2])", "def cortex_conv(inp, filters, n_out_w=None, n_out_h=None, \n strides=(1, 1, 1, 1), padding='SAME', bias=None):\n\n\n n_out = filters.get_shape()[3].value\n if n_out is None and (n_out_w is None or n_out_h is None):\n raise Exception(\"Filter shape not inferrable from filter tensor \"\n \"and output shape not inferrable from n_out_w and n_out_h.\")\n elif n_out is None:\n n_out = n_out_w * n_out_h\n\n if n_out_h is None:\n if n_out_w is None:\n sqrt = int(math.sqrt(n_out))\n n_out_w = sqrt\n n_out_h = n_out // n_out_w\n else:\n if n_out_w is None:\n n_out_w = n_out // n_out_h\n\n conv_raw = tf.nn.conv2d(inp, filters, strides=strides, padding=padding)\n if bias is not None:\n conv_raw = tf.nn.bias_add(conv_raw, bias)\n shp = [s.value for s in conv_raw.get_shape()]\n reshaped = tf.reshape(conv_raw[:, :, :, :n_out_w * n_out_h],\n (shp[0], shp[1], shp[2], n_out_h, n_out_w))\n transposed = tf.transpose(reshaped, (0, 1, 3, 2, 4))\n output = tf.reshape(transposed, (shp[0], shp[1] * n_out_h, shp[2] * n_out_w,\n 1))\n return output", "def TCN(input_dim): \r\n # Number of dilations in order to use for the temporal blocks.\r\n dilations = np.array([1, 2, 4, 8, 16, 32])\r\n\r\n input_dim.insert(0,1)\r\n print(f\"input_dim: {input_dim}\")\r\n input_layer = Input(shape=input_dim)\r\n cropping = 0\r\n assert (sum(dilations) * block_size + 1) == 127, \"Paper specifies receptive field size should be 127\"\r\n \r\n prev_layer, skip_layer, _ = add_temporal_block(input_layer, None, 1, 1, cropping)\r\n \r\n for dilation in dilations:\r\n prev_layer, skip_layer, cropping = add_temporal_block(prev_layer, skip_layer, 2, dilation, cropping)\r\n\r\n output_layer = PReLU(shared_axes=[2, 3])(skip_layer)\r\n output_layer = SpectralNormalization(Conv1D(fixed_filters, kernel_size=1))(output_layer)\r\n output_layer = PReLU(shared_axes=[2, 3])(output_layer)\r\n output_layer = SpectralNormalization(Conv1D(1, kernel_size=1))(output_layer)\r\n\r\n return Model(input_layer, output_layer)", "def conv_slim_capsule(input_tensor,\n input_dim,\n output_dim,\n layer_name,\n input_atoms=8,\n output_atoms=8,\n stride=2,\n kernel_size=5,\n padding='SAME',\n **routing_args):\n with tf.variable_scope(layer_name):\n # convolution. return [batch_size, 1, 32, 8, 6, 6]\n kernel = variables.weight_variable(shape=[\n kernel_size, kernel_size, input_atoms, output_dim * output_atoms\n ])\n biases = variables.bias_variable([output_dim, output_atoms, 1, 1])\n votes, votes_shape, input_shape = _depthwise_conv3d(\n input_tensor, kernel, input_dim, output_dim, input_atoms, output_atoms,\n stride, padding)\n # convolution End\n\n with tf.name_scope('routing'):\n logit_shape = tf.stack([\n input_shape[0], input_dim, output_dim, votes_shape[2], votes_shape[3]\n ])\n biases_replicated = tf.tile(biases,\n [1, 1, votes_shape[2], votes_shape[3]])\n activations = _update_routing(\n votes=votes,\n biases=biases_replicated,\n logit_shape=logit_shape,\n num_dims=6,\n input_dim=input_dim,\n output_dim=output_dim,\n **routing_args)\n return activations", "def verts_canonization(verts, dim_w, dim_h):\n\n # translate\n verts[:,0] -= dim_w # X, from [0,2W) to [-W,W)\n verts[:,1] -= dim_h # Y, from [0,2H) to [-H,H)\n verts[:,2] -= dim_w # Z, from [0,2D) to [-D,D)\n\n # rescale\n verts[:,0] /= (2.*dim_h) # X, from [-W,W) to (-0.33,0.33)\n verts[:,1] /= (2.*dim_h) # Y, from [-H,H) to (-0.5,0.5)\n verts[:,2] /= (2.*dim_h) # Z, from [-D,D) to (-0.33,0.33)\n\n return verts" ]
[ "0.6420241", "0.63742214", "0.630493", "0.62457377", "0.6166526", "0.61490655", "0.60816073", "0.6013133", "0.59594655", "0.59204423", "0.5848845", "0.5842317", "0.58345294", "0.5829648", "0.58118945", "0.5805117", "0.57954824", "0.5793485", "0.5791456", "0.57831615", "0.57814854", "0.57590306", "0.5734852", "0.5683579", "0.56832623", "0.56548524", "0.5651807", "0.56100535", "0.55967426", "0.5589096", "0.55727893", "0.55623275", "0.5560066", "0.5553191", "0.5550043", "0.55443573", "0.5537838", "0.5500908", "0.5498189", "0.54879296", "0.5476054", "0.54691523", "0.54652864", "0.5451241", "0.5441279", "0.54357123", "0.54350024", "0.5425734", "0.54225993", "0.5421157", "0.54205656", "0.5408382", "0.53965336", "0.53938663", "0.5387418", "0.53727835", "0.5368686", "0.53681403", "0.536041", "0.5354874", "0.5350676", "0.53477883", "0.5325968", "0.53157175", "0.5294313", "0.52839357", "0.52788174", "0.5270046", "0.5262296", "0.52589166", "0.5247236", "0.5246804", "0.5246299", "0.52416086", "0.52367055", "0.52366465", "0.52322537", "0.52293676", "0.52260536", "0.52196676", "0.52188337", "0.52106243", "0.52088845", "0.52043355", "0.5202629", "0.5200203", "0.51963896", "0.5192443", "0.51922244", "0.51830626", "0.5181362", "0.51810926", "0.51787937", "0.51753676", "0.51721513", "0.51696265", "0.5164317", "0.5157374", "0.51570845", "0.5155269", "0.5150676" ]
0.0
-1
This setup was the first big success, solid base config to work from. Note the lack of causal pooling, I hadn't built that module yet.
def decoder_setup_2(): decoder = RetinaDecoder( # pre-pooling {'op': 'avg', 'kernel': (1, 2, 2), 'causal': False}, # grouped temporal conv stacks: [ { 'in': 15, 'out': [45, 45, 15], 'kernel': (2, 1, 1), 'stride': 1, 'groups': 15, 'acivation': nn.ReLU, 'pool': {'op': 'avg', 'kernel': (2, 2, 2), 'causal': False} } ], # spatial conv layers: {in, out, kernel, stride} [ ], # for each ConvRNN cell: [ ], # temporal convolution stack(s) [ { 'in': 15, 'out': [128, 256, 128], 'kernel': (2, 3, 3), 'stride': 1, 'groups': 1, 'acivation': nn.ReLU } ], # ConvTranspose layers: {in, out, kernel, stride} [ {'in': 128, 'out': 64, 'kernel': (3, 3, 3), 'stride': (2, 2, 2)}, {'in': 64, 'out': 1, 'kernel': (3, 3, 3), 'stride': (1, 2, 2)}, ], # post conv layers [ ], ) return decoder
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_configs():", "def config():", "def config():", "def _setup_pipeline_cfg(self):", "def init_config(self):\n pass", "def configure(self):", "def configure(self):", "def configure(self):", "def configure(self):", "def _configure(self):\n pass", "def get_base_config(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tcp = self.config_parser\n\t\t# TODO: what happens if a command-line arg was already set?\n\t\t# BEGIN Read from config files\n\t\t# build - details relating to the build\n\t\tself.build['privileged'] = cp.getboolean('build', 'privileged')\n\t\tself.build['base_image'] = cp.get('build', 'base_image')\n\t\tself.build['dotest'] = cp.get('build', 'dotest')\n\t\tself.build['net'] = cp.get('build', 'net')\n\t\t# Take a command-line arg if given, else default.\n\t\tif self.build['conn_module'] is None:\n\t\t\tself.build['conn_module'] = cp.get('build', 'conn_module')\n\t\t# target - the target of the build, ie the container\n\t\tself.target['hostname'] = cp.get('target', 'hostname')\n\t\tself.target['ports'] = cp.get('target', 'ports')\n\t\tself.target['volumes'] = cp.get('target', 'volumes')\n\t\tself.target['volumes_from'] = cp.get('target', 'volumes_from')\n\t\tself.target['name'] = cp.get('target', 'name')\n\t\tself.target['rm'] = cp.getboolean('target', 'rm')\n\t\t# host - the host on which the shutit script is run\n\t\tself.host['add_shutit_to_path'] = cp.getboolean('host', 'add_shutit_to_path')\n\t\tself.host['docker_executable'] = cp.get('host', 'docker_executable')\n\t\tself.host['dns'] = cp.get('host', 'dns')\n\t\tself.host['password'] = cp.get('host', 'password')\n\t\tif isinstance(self.host['password'],str):\n\t\t\tshutit_global.shutit_global_object.secret_words_set.add(self.host['password'])\n\t\tself.logfile = cp.get('host', 'logfile')\n\t\tself.host['shutit_module_path'] = cp.get('host', 'shutit_module_path').split(':')\n\n\t\t# repository - information relating to docker repository/registry\n\t\tself.repository['name'] = cp.get('repository', 'name')\n\t\tself.repository['server'] = cp.get('repository', 'server')\n\t\tself.repository['push'] = cp.getboolean('repository', 'push')\n\t\tself.repository['tag'] = cp.getboolean('repository', 'tag')\n\t\tself.repository['export'] = cp.getboolean('repository', 'export')\n\t\tself.repository['save'] = cp.getboolean('repository', 'save')\n\t\tself.repository['suffix_date'] = cp.getboolean('repository', 'suffix_date')\n\t\tself.repository['suffix_format'] = cp.get('repository', 'suffix_format')\n\t\tself.repository['user'] = cp.get('repository', 'user')\n\t\tself.repository['password'] = cp.get('repository', 'password')\n\t\tif isinstance(self.repository['password'],str):\n\t\t\tshutit_global.shutit_global_object.secret_words_set.add(self.repository['password'])\n\t\tself.repository['email'] = cp.get('repository', 'email')\n\t\tself.repository['tag_name'] = cp.get('repository', 'tag_name')\n\t\t# END Read from config files\n\n\t\t# BEGIN tidy configs up\n\t\tif self.target['docker_image'] == '':\n\t\t\tself.target['docker_image'] = self.build['base_image']\n\t\t# END tidy configs up\n\n\t\t# FAILS begins\n\t\t# rm is incompatible with repository actions\n\t\tif self.target['rm'] and (self.repository['tag'] or self.repository['push'] or self.repository['save'] or self.repository['export']): # pragma: no cover\n\t\t\tshutit_global.shutit_global_object.shutit_print(\"Can't have [target]/rm and [repository]/(push/save/export) set to true\")\n\t\t\tshutit_global.shutit_global_object.handle_exit(exit_code=1)\n\t\tif self.target['hostname'] != '' and self.build['net'] != '' and self.build['net'] != 'bridge': # pragma: no cover\n\t\t\tshutit_global.shutit_global_object.shutit_print('\\n\\ntarget/hostname or build/net configs must be blank\\n\\n')\n\t\t\tshutit_global.shutit_global_object.handle_exit(exit_code=1)\n\t\t# FAILS ends", "def config():\n\n compared_algorithms_type: AlgorithmsType = AlgorithmsType.LinearRegression\n compared_methods: List = [] # Leave empty for using all solvers.\n numpy_distribution: NumpyDistribution = NumpyDistribution.IntelDistribution\n used_database: DatabaseType = DatabaseType.Synthetic\n experiment_type: ExperimentType = ExperimentType.RunTimeExperiment\n cross_validation_folds: int = 1\n n_alphas: int = 100\n reduction_factor: int = 1\n\n run_time_experiments_config: Dict[str, range] = {\n \"run_time_compared_data_sizes\": range(int(5000 / reduction_factor), int(15000 / reduction_factor),\n int(5000 / reduction_factor)),\n \"calc_transpose_dot_residuals\": compared_algorithms_type == AlgorithmsType.LinearRegression\n }\n number_of_alphas_experiments_config: Dict[str, range] = {\n \"alphas_range\": range(1, 221, 20)\n }\n\n synthetic_data_config: Dict[str, int] = {\n \"data_size\": int(15000 / reduction_factor),\n \"features_num\": 7\n }\n\n sketch_preconditioned_config: Dict[str, float] = {\n \"sampled_rows\": 0.005,\n \"switch_sign_probability\": 0.5,\n \"min_sampled_rows\": 100.0\n }\n resources_path: str = r'Resources'\n results_path: str = r'Results'\n clusters_count: int = _choose_clusters_num(used_database, synthetic_data_config[\"features_num\"])\n elastic_net_factor: float = 0.5 # Rho factor in Elastic-Net regularization.\n is_positive_definite: bool = True", "def initialize_from_config(self):", "def config( **kwargs ):", "def configuration():", "def configure(self) -> None:", "def __init__(self, pool_size):\n \n self.pool_size=pool_size;", "def config():\n config_django()\n config_svisor()", "def _augment_pipeline_cfg(self):", "def __init__(self, pool_size: float = 10):\n self.pool_size = pool_size", "def __init__(self, config):\n super().__init__(config)\n self.collector_host = config.get(\"collector_host\")\n self.schedds = config.get(\"schedds\", [None])\n self.condor_config = config.get(\"condor_config\")\n self.constraint = config.get(\"constraint\", True)\n self.classad_attrs = config.get(\"classad_attrs\")\n self.correction_map = config.get(\"correction_map\")", "def get_config():\n config = ml_collections.ConfigDict()\n config.seed = 42\n\n config.eval_num = 30000\n config.eval_avg_num = 3\n config.num_train_steps = -1\n config.log_loss_every_steps = 1000\n config.eval_every_steps = 1000\n config.checkpoint_every_steps = 5000\n\n config.dataset = \"mscoco\"\n config.coco_version = \"2014\"\n config.data_dir = \"data/\"\n config.return_text = False\n config.return_filename = False\n\n config.trial = 0 # dummy for repeated runs.\n config.beta1 = 0.5\n config.beta2 = 0.999\n config.d_lr = 0.0004\n config.g_lr = 0.0001\n config.polyak_decay = 0.999\n config.show_num = 64\n config.shuffle_buffer_size = 1000\n config.batch_norm_group_size = -1\n config.dtype = \"bfloat16\"\n config.train_shuffle = True\n\n config.image_size = 128\n config.batch_size = 56\n config.eval_batch_size = 7\n\n config.df_dim = 96\n config.gf_dim = 96\n config.z_dim = 128\n config.num_epochs = 500\n config.model_name = \"xmc\"\n config.d_step_per_g_step = 2\n config.g_spectral_norm = False\n config.d_spectral_norm = True\n config.architecture = \"xmc_net\"\n config.gamma_for_g = 15\n config.word_contrastive = True\n config.sentence_contrastive = True\n config.image_contrastive = True\n config.pretrained_image_contrastive = True\n config.cond_size = 16\n\n return config", "def config(ctx):\n return", "def configure(self):\n pass", "def configure(self):\n pass", "def init_config() -> Config:\n ...", "def configure(self):\r\n pass", "def build(config):", "def test_construct_3_default_bootsraps(self):\n configerus.new_config()", "def configure(self):\n\n pass", "def config(self):\n pass", "def config(self):\n pass", "def test_minimal_configuration(self):\n args = argparse.Namespace(cfg=os.path.join(TEST_DATA_DIR, 'minimal-cfg-file.ini'))\n self.cfg = configure(args)\n cfg = ElasticBlastConfig(self.cfg, task = ElbCommand.SUBMIT)\n\n self.assertTrue(cfg.blast.db_source)\n self.assertEqual(cfg.blast.db_source, DBSource.GCP)\n\n self.assertTrue(cfg.blast.batch_len)\n self.assertEqual(cfg.blast.batch_len, 10000)\n\n self.assertTrue(cfg.blast.mem_request)\n self.assertEqual(cfg.blast.mem_request, '0.5G')\n\n self.assertTrue(cfg.blast.mem_limit)\n expected_mem_limit = f'{get_machine_properties(cfg.cluster.machine_type).memory - SYSTEM_MEMORY_RESERVE}G'\n self.assertEqual(cfg.blast.mem_limit, expected_mem_limit)\n\n self.assertTrue(cfg.timeouts.init_pv > 0)\n self.assertTrue(cfg.timeouts.blast_k8s > 0)\n\n ElasticBlastConfig(self.cfg, task = ElbCommand.SUBMIT)", "def configs(self):\n raise NotImplementedError()", "def _init_pool(self, cfg: dict):\n pool = PyMysqlPoolBase(**cfg)\n return pool", "def setUpConfig(self):\n pass", "def __init__(self):\n\n # Primary configuration of the module is via the container environment.\n # We need to recognise that some or all of these may not be defined.\n # All run-time config that's required is given a __CFG prefix to\n # simplify checking whether all that's required has been defined.\n #\n # The SQUONK2_SLUG is limited to 10 characters, when combined with\n # \"Fragalysis {SLUG} \", this leaves (80-22) 58 characters for the\n # use with the target-access-string and session project strings\n # to form Squonk2 Unit and Project names.\n self.__CFG_SQUONK2_ASAPI_URL: Optional[str] =\\\n os.environ.get('SQUONK2_ASAPI_URL')\n self.__CFG_SQUONK2_DMAPI_URL: Optional[str] =\\\n os.environ.get('SQUONK2_DMAPI_URL')\n self.__CFG_SQUONK2_UI_URL: Optional[str] =\\\n os.environ.get('SQUONK2_UI_URL')\n self.__CFG_SQUONK2_ORG_UUID: Optional[str] =\\\n os.environ.get('SQUONK2_ORG_UUID')\n self.__CFG_SQUONK2_UNIT_BILLING_DAY: Optional[str] =\\\n os.environ.get('SQUONK2_UNIT_BILLING_DAY')\n self.__CFG_SQUONK2_PRODUCT_FLAVOUR: Optional[str] =\\\n os.environ.get('SQUONK2_PRODUCT_FLAVOUR')\n self.__CFG_SQUONK2_SLUG: Optional[str] =\\\n os.environ.get('SQUONK2_SLUG', '')[:_MAX_SLUG_LENGTH]\n self.__CFG_SQUONK2_ORG_OWNER: Optional[str] =\\\n os.environ.get('SQUONK2_ORG_OWNER')\n self.__CFG_SQUONK2_ORG_OWNER_PASSWORD: Optional[str] =\\\n os.environ.get('SQUONK2_ORG_OWNER_PASSWORD')\n self.__CFG_OIDC_AS_CLIENT_ID: Optional[str] = \\\n os.environ.get('OIDC_AS_CLIENT_ID')\n self.__CFG_OIDC_DM_CLIENT_ID: Optional[str] = \\\n os.environ.get('OIDC_DM_CLIENT_ID')\n self.__CFG_OIDC_KEYCLOAK_REALM: Optional[str] = \\\n os.environ.get('OIDC_KEYCLOAK_REALM')\n\n # Optional config (no '__CFG_' prefix)\n self.__DUMMY_TARGET_TITLE: Optional[str] =\\\n os.environ.get('DUMMY_TARGET_TITLE')\n self.__DUMMY_USER: Optional[str] =\\\n os.environ.get('DUMMY_USER')\n self.__DUMMY_TAS: Optional[str] =\\\n os.environ.get('DUMMY_TAS')\n self.__SQUONK2_VERIFY_CERTIFICATES: Optional[str] = \\\n os.environ.get('SQUONK2_VERIFY_CERTIFICATES')\n\n # The integer billing day, valid if greater than zero\n self.__unit_billing_day: int = 0\n # True if configured...\n self.__configuration_checked: bool = False\n self.__configured: bool = False\n # Ignore cert errors? (no)\n self.__verify_certificates: bool = True\n\n # The record ID of the Squonk2Org for this deployment.\n # Set on successful 'pre-flight-check'\n self.__org_record: Optional[Squonk2Org] = None\n\n self.__org_owner_as_token: str = ''\n self.__org_owner_dm_token: str = ''\n self.__keycloak_hostname: str = ''\n self.__keycloak_realm: str = ''\n\n # The Safe QuerySet from the security module.\n # Used when we are given a tas (target access string).\n # It allows us to check that a user is permitted to use the access ID\n # and relies on ISPyB credentials present in the environment.\n self.__ispyb_safe_query_set: ISpyBSafeQuerySet = ISpyBSafeQuerySet()", "def base_config():\n return deepcopy(__CONFIG)", "def configure(self, options, conf):", "def __init__(self, config):\n backends = {}\n for k in [x for x in config.keys() if x != 'DEFAULT']:\n backends[k] = config[k]['backend']\n\n # Create base WebNip object\n self.webnip = WebNip(\n modules_path=config['DEFAULT']['modules_path']\n )\n\n # Create backends\n self.backends = []\n for name, module in backends.items():\n try:\n LOGGER.info('Building backend for %s...', name)\n self.backends.append(\n self.webnip.load_backend(\n module,\n name,\n params={\n # Get params, calling the subcommands if necessary\n k: eventually_call_command(v)\n for k, v in config[name].items()\n }\n )\n )\n except Exception as exc:\n LOGGER.error(\n 'An error occured while building backend %s: %s',\n name,\n str(exc)\n )\n if DEBUG:\n raise", "async def init_config(ns: Namespace) -> None:\n async with lock:\n # track targets being actively scanned\n db['active-targets'] = set()\n\n # --brute-pass-list\n if ns.brute_pass_list is None:\n db['brute-pass-list'] = '/usr/share/wordlists/fasttrack.txt'\n else:\n db['brute-pass-list'] = ns.brute_pass_list\n if not ns.no_file_check and not file_exists(db['brute-pass-list']):\n raise BscanConfigError(\n '`--brute-pass-list` file ' + db['brute-pass-list'] +\n ' does not exist')\n\n # --brute-user-list\n if ns.brute_user_list is None:\n db['brute-user-list'] = (\n '/usr/share/wordlists/metasploit/namelist.txt')\n else:\n db['brute-user-list'] = ns.brute_user_list\n if not ns.no_file_check and not file_exists(db['brute-user-list']):\n raise BscanConfigError(\n '`--brute-user-list` file ' + db['brute-user-list'] +\n ' does not exist')\n\n # --cmd-print-width\n try:\n cmd_print_width = (80 if ns.cmd_print_width is None\n else int(ns.cmd_print_width))\n if cmd_print_width < 5:\n raise ValueError\n except ValueError:\n raise BscanConfigError(\n 'Invalid `--cmd-print-width` value specified; must be an '\n 'integer greater than or equal to 5')\n db['cmd-print-width'] = cmd_print_width\n\n # --output-dir\n if ns.output_dir is None:\n db['output-dir'] = os.getcwd()\n else:\n db['output-dir'] = ns.output_dir\n if not dir_exists(db['output-dir']):\n raise BscanConfigError(\n '`--output-dir` directory ' + db['output-dir'] +\n ' does not exist')\n\n # --patterns; also loads from `configuration/patterns.txt`\n patterns = load_config_file(\n 'patterns.txt',\n ns.config_dir).splitlines()\n if ns.patterns is not None:\n if not ns.patterns:\n raise BscanConfigError(\n '`--patterns` requires at least one regex pattern')\n else:\n patterns.extend(ns.patterns)\n db['patterns'] = re.compile('|'.join(patterns))\n\n # --no-program-check\n if not ns.no_program_check:\n not_found_progs = []\n progs = load_config_file(\n 'required-programs.txt',\n ns.config_dir).splitlines()\n for prog in progs:\n if shutil.which(prog) is None:\n not_found_progs.append(prog)\n\n if not_found_progs:\n raise BscanConfigError(\n 'required programs ' + ', '.join(not_found_progs) +\n ' could not be found on this system')\n\n # --no-service-scans\n db['no-service-scans'] = ns.no_service_scans\n\n # load service information from `configuration/service-scans.toml`\n db['services'] = toml.loads(\n load_config_file('service-scans.toml', ns.config_dir))\n\n # load quick scan method configuration\n # derived from `--qs-method` + `configuration/port-scans.toml`\n port_scan_config = toml.loads(\n load_config_file('port-scans.toml', ns.config_dir))\n qs_config = port_scan_config['quick']\n qs_method_name = (ns.qs_method if ns.qs_method is not None else\n qs_config['default'])\n if qs_method_name not in qs_config or qs_method_name == 'default':\n raise BscanConfigError(\n 'Invalid `--qs-method` specified: ' + str(qs_method_name))\n qs_attrs = qs_config[qs_method_name]\n db['quick-scan'] = PortScanConfig(\n qs_method_name,\n re.compile(qs_attrs['pattern']),\n qs_attrs['scan'])\n\n # load thorough scan method configuration\n # derived from `--ts-method` + `configuration/port-scans.toml`\n ts_config = port_scan_config['thorough']\n ts_method_name = (ns.ts_method if ns.ts_method is not None else\n ts_config['default'])\n if ts_method_name not in ts_config or ts_method_name == 'default':\n raise BscanConfigError(\n 'Invalid `--ts-method` specified: ' + str(ts_method_name))\n ts_attrs = ts_config[ts_method_name]\n db['thorough-scan'] = PortScanConfig(\n ts_method_name,\n re.compile(ts_attrs['pattern']),\n ts_attrs['scan'])\n\n # load udp scan method configuration\n # derived from `--udp-method` + `configuration/port-scans.toml`\n udp_config = port_scan_config['udp']\n udp_method_name = (ns.udp_method if ns.udp_method is not None else\n udp_config['default'])\n if udp_method_name not in udp_config or udp_method_name == 'default':\n raise BscanConfigError(\n 'Invalid `--udp-method` specified: ' + str(udp_method_name))\n udp_attrs = udp_config[udp_method_name]\n db['udp-scan'] = PortScanConfig(\n udp_method_name,\n re.compile(udp_attrs['pattern']),\n udp_attrs['scan'])\n\n # --status-interval\n try:\n db['status-interval'] = (30 if ns.status_interval is None\n else int(ns.status_interval))\n except ValueError:\n raise BscanConfigError(\n 'Invalid `--status-interval` integer specified: ' +\n str(ns.status_interval))\n\n # runtime tracking of active subprocesses\n db['subprocesses'] = dict()\n\n # --web-word-list\n if ns.web_word_list is None:\n db['web-word-list'] = '/usr/share/dirb/wordlists/big.txt'\n else:\n db['web-word-list'] = ns.web_word_list\n if not ns.no_file_check and not file_exists(db['web-word-list']):\n raise BscanConfigError(\n '`--web-word-list` file ' + db['web-word-list'] +\n ' does not exist')\n\n # --quick-only\n db['quick-only'] = ns.quick_only\n\n # --hard\n db['hard'] = ns.hard\n\n # --ping-sweep\n if ns.ping_sweep:\n raise BscanConfigError(\n '`--ping-sweep` option not yet implemented')\n db['ping-sweep'] = ns.ping_sweep\n\n # --udp\n db['udp'] = ns.udp\n\n # --verbose-status\n db['verbose-status'] = ns.verbose_status", "def host_bootstrap(args):\n name = args.name\n host = args.host\n port = args.port\n user = args.user\n protocol = args.protocol\n url = args.url\n pool = args.pool\n poolpath = args.poolpath\n baseconfig = Kbaseconfig(client=args.client, debug=args.debug)\n baseconfig.bootstrap(name, host, port, user, protocol, url, pool, poolpath)", "def __init__(self):\n self.config = {}", "def setup(args):\n cfg = get_cfg()\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n # customize reszied parameters\n # cfg['INPUT']['MIN_SIZE_TRAIN'] = (20,)\n # cfg['INPUT']['MAX_SIZE_TRAIN'] = 50\n cfg.freeze()\n default_setup(\n cfg, args\n ) # if you don't like any of the default setup, write your own setup code\n return cfg", "def setup():\n global RBD_POOL, RBD_STORAGE_CLASS, RBD_SECRET, CEPHFS_OBJ, \\\n CEPHFS_STORAGE_CLASS, CEPHFS_SECRET, RBD_PVC, CEPHFS_PVC\n log.info(\"Creating RBD Pool\")\n RBD_POOL = helpers.create_ceph_block_pool()\n\n log.info(\"Creating RBD Secret\")\n RBD_SECRET = helpers.create_secret(constants.CEPHBLOCKPOOL)\n\n log.info(\"Creating RBD StorageClass\")\n RBD_STORAGE_CLASS = helpers.create_storage_class(\n constants.CEPHBLOCKPOOL, RBD_POOL.name, RBD_SECRET.name\n )\n\n log.info(\"Creating CephFilesystem\")\n CEPHFS_OBJ = helpers.create_cephfilesystem()\n\n log.info(\"Creating FS Secret\")\n CEPHFS_SECRET = helpers.create_secret(constants.CEPHFILESYSTEM)\n\n log.info(\"Creating FS StorageClass\")\n CEPHFS_STORAGE_CLASS = helpers.create_storage_class(\n constants.CEPHFILESYSTEM, helpers.get_cephfs_data_pool_name(),\n CEPHFS_SECRET.name\n )\n\n log.info(\"Creating RBC PVC\")\n RBD_PVC = helpers.create_pvc(sc_name=RBD_STORAGE_CLASS.name)\n\n log.info(\"Creating CephFs PVC\")\n CEPHFS_PVC = helpers.create_pvc(sc_name=CEPHFS_STORAGE_CLASS.name)", "def _setup(self):", "def _setup(self):", "def test_construct_1_naked(self):\n config = configerus.new_config(bootstraps=[])\n self.assertIsInstance(config, Config)", "def config(\n data_folder=settings.data_folder,\n logs_folder=settings.logs_folder,\n imgs_folder=settings.imgs_folder,\n cache_folder=settings.cache_folder,\n cache_responses=settings.cache_responses,\n log_file=settings.log_file,\n log_console=settings.log_console,\n log_level=settings.log_level,\n log_name=settings.log_name,\n log_filename=settings.log_filename,\n useful_idf_objects=settings.useful_idf_objects,\n default_weight_factor=\"area\",\n ep_version=settings.ep_version,\n debug=settings.debug,\n):\n # set each global variable to the passed-in parameter value\n settings.cache_responses = cache_responses\n settings.cache_folder = Path(cache_folder).expand().makedirs_p()\n settings.data_folder = Path(data_folder).expand().makedirs_p()\n settings.imgs_folder = Path(imgs_folder).expand().makedirs_p()\n settings.logs_folder = Path(logs_folder).expand().makedirs_p()\n settings.log_console = log_console\n settings.log_file = log_file\n settings.log_level = log_level\n settings.log_name = log_name\n settings.log_filename = log_filename\n settings.useful_idf_objects = useful_idf_objects\n settings.zone_weight.set_weigth_attr(default_weight_factor)\n settings.ep_version = ep_version\n settings.debug = debug\n\n # if logging is turned on, log that we are configured\n if settings.log_file or settings.log_console:\n get_logger(name=\"archetypal\")\n log(\"Configured archetypal\")", "def setup_net(self):\n pass", "def configure(self, config_json):\n log.info(\"Configuring EDD backend for processing\")\n log.debug(\"Configuration string: '{}'\".format(config_json))\n\n yield self.set(config_json)\n\n cfs = json.dumps(self._config, indent=4)\n log.info(\"Final configuration:\\n\" + cfs)\n\n\n\n self.__numa_node_pool = []\n # remove numa nodes with missing capabilities\n for node in numa.getInfo():\n if len(numa.getInfo()[node]['gpus']) < 1:\n log.debug(\"Not enough gpus on numa node {} - removing from pool.\".format(node))\n continue\n elif len(numa.getInfo()[node]['net_devices']) < 1:\n log.debug(\"Not enough nics on numa node {} - removing from pool.\".format(node))\n continue\n else:\n self.__numa_node_pool.append(node)\n\n log.debug(\"{} numa nodes remaining in pool after cosntraints.\".format(len(self.__numa_node_pool)))\n\n if len(self._config['input_data_streams']) > len(self.__numa_node_pool):\n raise FailReply(\"Not enough numa nodes to process {} polarizations!\".format(len(self._config['input_data_streams'])))\n\n self._subprocessMonitor = SubprocessMonitor()\n #ToDo: Check that all input data streams have the same format, or allow different formats\n for i, streamid in enumerate(self._config['input_data_streams']):\n # calculate input buffer parameters\n stream_description = self._config['input_data_streams'][streamid]\n stream_description[\"dada_key\"] = DADABUFFERS[i]\n self.add_input_stream_sensor(streamid)\n self.input_heapSize = stream_description[\"samples_per_heap\"] * stream_description['bit_depth'] / 8\n\n nHeaps = self._config[\"samples_per_block\"] / stream_description[\"samples_per_heap\"]\n input_bufferSize = nHeaps * (self.input_heapSize)\n log.info('Input dada parameters created from configuration:\\n\\\n heap size: {} byte\\n\\\n heaps per block: {}\\n\\\n buffer size: {} byte'.format(self.input_heapSize, nHeaps, input_bufferSize))\n\n\n final_payloads, final_fpss, final_framens = EDD_VDIF_Frame_Size(stream_description['sample_rate'])\n\n if self._config['payload_size'] == 'auto':\n payload_size = final_payloads[-1]\n else:\n payload_size = int(self._config['payload_size'])\n\n log.info('Possible frame payload sizes (add 32 for framesize):')\n for k in range(final_payloads.size):\n if payload_size == final_payloads[k]:\n M = \"*\"\n else:\n M = \" \"\n log.info(' {}{:5.0f} byte {:8.0f} frames per sec {:6.3f} nsec/frame'.format(M, final_payloads[k], final_fpss[k], final_framens[k]))\n\n if payload_size not in final_payloads:\n log.warning(\"Payload size {} possibly not conform with VDIF format!\".format(payload_size))\n\n # calculate output buffer parameters\n size_of_samples = ceil(1. * self._config[\"samples_per_block\"] * 2 / 8.) # byte for two bit mode\n number_of_packages = ceil(size_of_samples / float(payload_size))\n\n output_buffer_size = number_of_packages * (payload_size + self._config['vdif_header_size'])\n\n integration_time = self._config[\"samples_per_block\"] / float(stream_description[\"sample_rate\"])\n self._integration_time_status.set_value(integration_time)\n\n rate = output_buffer_size/ integration_time # in spead documentation BYTE per second and not bit!\n rate *= self._config[\"output_rate_factor\"] # set rate to (100+X)% of expected rate\n self._output_rate_status.set_value(rate / 1E9)\n\n log.info('Output parameters calculated from configuration:\\n\\\n total size of data samples: {} byte\\n\\\n number_of_packages: {}\\n\\\n size of output buffer: {} byte\\n\\\n rate ({:.0f}%): {} Gbps'.format(size_of_samples,\n number_of_packages, output_buffer_size,\n self._config[\"output_rate_factor\"]*100, rate / 1E9))\n\n numa_node = self.__numa_node_pool[i]\n log.debug(\"Associating {} with numa node {}\".format(streamid, numa_node))\n\n # configure dada buffer\n bufferName = stream_description['dada_key']\n yield self._create_ring_buffer(input_bufferSize, 64, bufferName, numa_node)\n\n ofname = bufferName[::-1]\n # we write nSlice blocks on each go\n yield self._create_ring_buffer(output_buffer_size, 8, ofname, numa_node)\n\n # Configure + launch \n physcpu = numa.getInfo()[numa_node]['cores'][0]\n thread_id = self._config['thread_id'][streamid]\n station_id = self._config['thread_id'][streamid]\n cmd = \"taskset -c {physcpu} VLBI --input_key={dada_key} --speadheap_size={heapSize} --thread_id={thread_id} --station_id={station_id} --payload_size={payload_size} --sample_rate={sample_rate} --nbits={bit_depth} -o {ofname} --log_level={log_level} --output_type=dada\".format(ofname=ofname, heapSize=self.input_heapSize, numa_node=numa_node, physcpu=physcpu, thread_id=thread_id, station_id=station_id, payload_size=payload_size, log_level=self._config['log_level'], **stream_description)\n log.debug(\"Command to run: {}\".format(cmd))\n\n cudaDevice = numa.getInfo()[numa_node]['gpus'][0]\n cli = ManagedProcess(cmd, env={\"CUDA_VISIBLE_DEVICES\": cudaDevice})\n self._subprocessMonitor.add(cli, self._subprocess_error)\n self._subprocesses.append(cli)\n\n cfg = self._config.copy()\n cfg.update(stream_description)\n\n ip_range = []\n port = set()\n for key in self._config[\"output_data_streams\"]:\n if streamid in key:\n ip_range.append(self._config[\"output_data_streams\"][key]['ip'])\n port.add(self._config[\"output_data_streams\"][key]['port'])\n if len(port)!=1:\n raise FailReply(\"Output data for one plarization has to be on the same port! \")\n\n if self._config[\"output_type\"] == 'network':\n physcpu = \",\".join(numa.getInfo()[numa_node]['cores'][1:2])\n fastest_nic, nic_params = numa.getFastestNic(numa_node)\n log.info(\"Sending data for {} on NIC {} [ {} ] @ {} Mbit/s\".format(streamid, fastest_nic, nic_params['ip'], nic_params['speed']))\n\n cmd = \"taskset -c {physcpu} vdif_send --input_key {ofname} --if_ip {ibv_if} --dest_ip {mcast_dest} --port {port_tx} --max_rate {rate}\".format(ofname=ofname, \n physcpu=physcpu, ibv_if=nic_params['ip'], mcast_dest=\" \".join(ip_range), port_tx=port.pop(), rate=rate)\n log.debug(\"Command to run: {}\".format(cmd))\n\n elif self._config[\"output_type\"] == 'disk':\n ofpath = os.path.join(cfg[\"output_directory\"], ofname)\n log.debug(\"Writing output to {}\".format(ofpath))\n if not os.path.isdir(ofpath):\n os.makedirs(ofpath)\n cmd = \"dada_dbdisk -k {ofname} -D {ofpath} -W\".format(ofname=ofname, ofpath=ofpath, **cfg)\n else:\n log.warning(\"Selected null output. Not sending data!\")\n cmd = \"dada_dbnull -z -k {}\".format(ofname)\n\n log.debug(\"Command to run: {}\".format(cmd))\n mks = ManagedProcess(cmd, env={\"CUDA_VISIBLE_DEVICES\": cudaDevice})\n self._subprocessMonitor.add(mks, self._subprocess_error)\n self._subprocesses.append(mks)\n\n self._subprocessMonitor.start()", "def config(self):\n raise NotImplementedError", "def __init__(self, config):\n self.config = config", "def __init__(self, config):\n self.config = config", "def __init__(self, config):\n self.config = config", "def setup_config():\n global config\n config = modConfig.Config(cmdline.config)", "def testLoadConfigs(self):\n config_path = GetTestFilePath('unified_lab_config/valid_lab/hosts')\n pool = lab_config.UnifiedLabConfigPool(config_path)\n pool.LoadConfigs()\n self.assertIsNotNone(pool.GetLabConfig())\n self.assertIsNotNone(pool.GetHostConfigs('postsubmit'))\n self.assertIsNotNone(pool.GetHostConfigs('crystalball'))\n self.assertIsNotNone(pool.GetHostConfigs('crystalball-power'))", "def _config_classes(self):\n pass", "def _init_config_(self):\n self._config= {}", "def init_config(self, conf_map):\n pass", "def init():\n # make sure pool is initialized\n global pool\n if not pool:\n pool = aiohttp.ClientSession(\n connector=aiohttp.TCPConnector(limit=config.MAX_PARALLEL_REQUESTS),\n raise_for_status=False,\n trust_env=True,\n auth=aiohttp.BasicAuth( config.CACHE_USERNAME, config.CACHE_PASSWORD ),\n )", "def create_config(self) -> None:\n pass", "def create_config(self) -> None:\n pass", "def _load_common_config(self, config: Dict[str, Any]) -> Dict[str, Any] :\n # Log level\n if 'loglevel' in self.args.loglevel:\n config.update({'verbosity': self.args.loglevel})\n else:\n config.update({'verbosity': 0})\n logging.basicConfig(\n level=logging.INFO if config['verbosity'] < 1 else logging.DEBUG,\n format= '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n )\n set_loggers(config['verbosity'])\n logger.info('Verbosity set to %s', config['verbosity'])\n\n # Add dynamic whitelist if found\n if 'dynamic_whitelist' in self.args and self.args.dynamic_whitelist:\n config['pairlist'] = {\n 'method': 'VolumePairList',\n 'config': {'number_assets': self.args.dynamic_whitelist}\n }\n logger.warning(\n 'Parameter --dynamic-whitelist has been deprecated, '\n 'and will be completely replaced by the whitelist dict in the future. '\n 'For now: using dynamically generated whitelist based on VolumePairList. '\n '(not applicable with Backtesting and Hyperopt)'\n )\n if self.args.db_url and self.args.db_url != constant.DEFAULT_DB_PROD_URL:\n config.update({'db_url': self.args.db_url})\n logger.info('Parameter --db-url detected ...')\n\n if config.get('dry_run', False):\n logger.info('Dry run is enabled')\n if config.get('db_url') in [None, constant.DEFAULT_DB_PROD_URL]:\n # Default to in-memory db for dry_run if not specified\n config['db_url'] = constant.DEFAULT_DB_DRYRUN_URL\n else:\n if not config.get('db_url', None):\n config['db_url'] = constant.DEFAULT_DB_PROD_URL\n logger.info('Dry run is disabled')\n\n if config.get('forcebuy_enable', False):\n logger.warning('`forcebuy` RPC message enabled.')\n\n # Setting max_open_trades to infinite if -1\n if config.get('max_open_trades') == -1:\n config['max_open_trades'] = float('inf')\n\n logger.info(f'Using DB: \"{config[\"db_url\"]}\"')\n\n # Check if the exchange set by the user is supported\n self.check_exchange(config)\n\n return config", "def test_create_namespaced_build_config(self):\n pass", "def configure(self, args):\n pass", "def __init__(self, config, cfg):\n self.config = config\n self.cfg = cfg", "def configure(self, conf):\n return", "def _configure(self):\n FaultCohesive._configure(self)\n self.eqsrcs = self.inventory.eqsrcs\n self.output = self.inventory.output\n return", "def cg_config():\n return {}", "def config(self) -> InstrumentConfig:\n ...", "def config_db():", "def _setupPools(self):\n reactor = MemoryReactorWithClock()\n cph = SteppablePoolHelper(jobSchema + schemaText)\n then = datetime.datetime(2012, 12, 12, 12, 12, 12)\n reactor.advance(astimestamp(then))\n cph.setUp(self)\n qpool = ControllerQueue(reactor, cph.pool.connection, useWorkerPool=False)\n\n realChoosePerformer = qpool.choosePerformer\n performerChosen = []\n\n def catchPerformerChoice(onlyLocally=False):\n result = realChoosePerformer(onlyLocally=onlyLocally)\n performerChosen.append(True)\n return result\n\n qpool.choosePerformer = catchPerformerChoice\n reactor.callLater(0, qpool._workCheck)\n\n qpool.startService()\n cph.flushHolders()\n\n return cph, qpool, reactor, performerChosen", "def read_configfile():\n configtp = namedtuple(\"Config\", [\"lb_user\", \"lb_pwd\", \"lb1\", \"lb2\", \"lb_dg\", \"lb_dg_partition\",\n \"ca\", \"ca_proxy\", \"cm_chain\", \"cm_key\", \"cm_renewal_days\",\n \"cm_delayed_days\", \"plugin\"])\n config = ConfigParser.ConfigParser()\n config.read(CONFIG_FILE)\n if config.getboolean(\"Certificate Authority\", \"use proxy\"):\n ca_proxy = config.get(\"Certificate Authority\", \"proxy\")\n else:\n ca_proxy = False\n\n if config.getboolean(\"Load Balancer\", \"cluster\"):\n bigip1 = config.get(\"Load Balancer\", \"host 1\")\n bigip2 = config.get(\"Load Balancer\", \"host 2\")\n else:\n bigip1 = config.get(\"Load Balancer\", \"host 1\")\n bigip2 = None\n\n try:\n plugin_section = config.items('Plugin')\n except ConfigParser.NoSectionError:\n plugin_section = None\n\n the_config = configtp(\n lb1=bigip1,\n lb2=bigip2,\n lb_user=config.get(\"Load Balancer\", \"username\"),\n lb_pwd=config.get(\"Load Balancer\", \"password\"),\n lb_dg=config.get(\"Load Balancer\", \"datagroup\"),\n lb_dg_partition=config.get(\"Load Balancer\", \"datagroup partition\"),\n ca=config.get(\"Certificate Authority\", \"directory url\"),\n ca_proxy=ca_proxy,\n cm_chain=config.getboolean(\"Common\", \"include chain\"),\n cm_key=config.get(\"Common\", \"account key\"),\n cm_renewal_days=int(config.get(\"Common\", \"renewal days\")),\n cm_delayed_days=int(config.get(\"Common\", \"delayed installation days\")),\n plugin=plugin_section)\n return the_config", "def load_config(self):\n pass", "def __init__(\n self, config: SimpleGCNConfig = SimpleGCNConfig(name=\"simplegcn\")\n ):\n super().__init__()\n self.edge_lengthscale = config.edge_lengthscale\n self.weight_edges = config.weight_edges\n\n self.atom_embedding = nn.Linear(\n config.atom_input_features, config.width\n )\n\n self.layer1 = GraphConv(config.width, config.width)\n self.layer2 = GraphConv(config.width, config.output_features)\n self.readout = AvgPooling()", "def pooling(lconf, poolsize=10):\n pool = Pool(poolsize)\n pool.map(worker, lconf)", "def build_ssl_augmentor(cfg):\n aug_list = []\n\n #1. rotate\n if cfg.AUGMENTOR.ROTATE.ENABLED:\n aug_list.append(\n Rotate(rot90=True,\n p=cfg.AUGMENTOR.ROTATE.P))\n\n #2. flip\n if cfg.AUGMENTOR.FLIP.ENABLED:\n aug_list.append(\n Flip(do_ztrans=cfg.AUGMENTOR.FLIP.DO_ZTRANS,\n p=cfg.AUGMENTOR.FLIP.P))\n\n #3. grayscale\n if cfg.AUGMENTOR.GRAYSCALE.ENABLED:\n aug_list.append(\n Grayscale(p=cfg.AUGMENTOR.GRAYSCALE.P))\n\n #4. missingparts\n if cfg.AUGMENTOR.MISSINGPARTS.ENABLED:\n aug_list.append(\n MissingParts(p=cfg.AUGMENTOR.MISSINGPARTS.P))\n\n #5. motion-blur\n if cfg.AUGMENTOR.MOTIONBLUR.ENABLED:\n aug_list.append(\n MotionBlur( \n sections=cfg.AUGMENTOR.MOTIONBLUR.SECTIONS, \n kernel_size=cfg.AUGMENTOR.MOTIONBLUR.KERNEL_SIZE,\n p=cfg.AUGMENTOR.MOTIONBLUR.P))\n\n #6. cut-blur\n if cfg.AUGMENTOR.CUTBLUR.ENABLED:\n aug_list.append(\n CutBlur(length_ratio=cfg.AUGMENTOR.CUTBLUR.LENGTH_RATIO, \n down_ratio_min=cfg.AUGMENTOR.CUTBLUR.DOWN_RATIO_MIN,\n down_ratio_max=cfg.AUGMENTOR.CUTBLUR.DOWN_RATIO_MAX,\n downsample_z=cfg.AUGMENTOR.CUTBLUR.DOWNSAMPLE_Z,\n p=cfg.AUGMENTOR.CUTBLUR.P))\n\n #7. cut-noise\n if cfg.AUGMENTOR.CUTNOISE.ENABLED:\n aug_list.append(\n CutNoise(length_ratio=cfg.AUGMENTOR.CUTNOISE.LENGTH_RATIO, \n scale=cfg.AUGMENTOR.CUTNOISE.SCALE,\n p=cfg.AUGMENTOR.CUTNOISE.P))\n\n return Compose(transforms=aug_list, \n input_size=cfg.MODEL.INPUT_SIZE, \n smooth=cfg.AUGMENTOR.SMOOTH,\n additional_targets=None)", "def __init__(self,MODULE_NAME='auto_amqp',config=None):\n newConfig = { MODULE_NAME : DEFAULT_CONFIG}\n Configurable.__init__(self,newConfig)\n self.load_conf(config)\n self.MODULE_NAME = MODULE_NAME", "def initialConfig(self):\r\r\n\r\r\n loggerCmw = logging.getLogger('initialConfig')\r\r\n\r\r\n self.set_scenario()\r\r\n\r\r\n self.set_default_rf_settings()\r\r\n\r\r\n self.physical_downlink_settings()\r\r\n\r\r\n self.physical_uplink_settings()\r\r\n\r\r\n self.connection_config()\r\r\n\r\r\n self.network_settings()\r\r\n\r\r\n self.set_conn_type(conn= self.connTypeEnum.CS)\r\r\n\r\r\n self.waitForCompletion()", "def configure(self):\n # Defaults\n self.db_type = DB_TYPE.POSTGRES\n self.db_name = \"ambari\"\n self.db_user = \"ambari\"\n self.db_password = \"bigdata\"\n self.db_host = \"localhost\"\n self.db_url = None\n\n if os.path.exists(AMBARI_PROPERTIES_LOCATION):\n self.ambari_props = self.read_conf_file(AMBARI_PROPERTIES_LOCATION)\n\n if \"server.jdbc.database\" in self.ambari_props:\n self.db_type = self.ambari_props[\"server.jdbc.database\"].upper()\n if \"server.jdbc.database_name\" in self.ambari_props:\n self.db_name = self.ambari_props[\"server.jdbc.database_name\"]\n if \"server.jdbc.user.name\" in self.ambari_props:\n self.db_user = self.ambari_props[\"server.jdbc.user.name\"]\n if \"server.jdbc.user.passwd\" in self.ambari_props:\n self.db_password = self.read_file(self.ambari_props[\"server.jdbc.user.passwd\"])\n if \"server.jdbc.hostname\" in self.ambari_props:\n self.db_host = self.ambari_props[\"server.jdbc.hostname\"]\n if \"server.jdbc.url\" in self.ambari_props:\n self.db_url = self.ambari_props[\"server.jdbc.url\"]\n if \"ambari-server.user\" in self.ambari_props:\n self.ambari_server_user = self.ambari_props[\"ambari-server.user\"]\n\n #Logger.info(\"Using database type: {0}, name: {1}, host: {2}\".format(self.db_type, self.db_name, self.db_host))\n connection_string = \"dbname='{0}' user='{1}' host='{2}' password='{3}'\".format(self.db_name, self.db_user, self.db_host, self.db_password)\n\n if self.db_type == DB_TYPE.POSTGRES:\n try:\n import psycopg2 # covered by GNU Lesser General Public License\n except Exception, e:\n Logger.error(\"Need to install python-psycopg2 package for Postgres DB. E.g., yum install python-psycopg2\\n\")\n self.terminate()\n elif self.db_type == DB_TYPE.MYSQL:\n try:\n import pymysql # covered by MIT License\n except Exception, e:\n Logger.error(\"Need to install PyMySQL package for Python. E.g., yum install python-setuptools && easy_install pip && pip install PyMySQL\\n\")\n self.terminate()\n else:\n Logger.error(\"Unknown database type: {0}.\".format(self.db_type))\n self.terminate()\n\n self.conn = None\n self.cursor = None\n try:\n Logger.debug(\"Initializing database connection and cursor.\")\n if self.db_type == DB_TYPE.POSTGRES:\n self.conn = psycopg2.connect(connection_string)\n self.cursor = self.conn.cursor()\n elif self.db_type == DB_TYPE.MYSQL:\n self.conn = pymysql.connect(self.db_host, self.db_user, self.db_password, self.db_name)\n self.cursor = self.conn.cursor()\n\n Logger.debug(\"Created database connection and cursor.\")\n self.cursor.execute(\"SELECT metainfo_key, metainfo_value FROM metainfo WHERE metainfo_key='version';\")\n rows = self.cursor.fetchall()\n if rows and len(rows) == 1:\n self.ambari_version = rows[0][1]\n # Logger.info(\"Connected to database!!! Ambari version is {0}\\n\".format(self.ambari_version))\n\n # Must be Ambari 2.0.0 or higher\n if self.compare_versions(self.ambari_version, MIN_AMBARI_VERSION) < 0:\n Logger.error(\"Must be running Ambari Version {0} or higher.\\n\".format(MIN_AMBARI_VERSION))\n self.terminate()\n else:\n Logger.error(\"Unable to determine Ambari version.\")\n self.terminate()\n\n self.set_cluster()\n except Exception, e:\n Logger.error(\"I am unable to connect to the database. Error: {0}\\n\".format(e))\n self.terminate()\n else:\n raise Exception(\"Could not find file {0}\".format(AMBARI_PROPERTIES_LOCATION))", "def setup_confighelper(self):\n self.cfghelper = cfgmodule.MCfgModule()\n self.cfghelper.load_configfiles(self.configname, self.get_pkgdirimp_config())", "def test_read_namespaced_build_config(self):\n pass", "def __init__(self, config, maxCores, maxMemory, maxDisk):\n self.config = config\n self.maxCores = maxCores\n self.maxMemory = maxMemory\n self.maxDisk = maxDisk\n self.environment = {}\n \"\"\"\n :type dict[str,str]\n \"\"\"\n self.workerCleanupInfo = WorkerCleanupInfo(workDir=self.config.workDir,\n workflowID=self.config.workflowID,\n cleanWorkDir=self.config.cleanWorkDir)", "def config():\n return _config", "def configure(self, **cfg: Any) -> 'Self | None':", "def __init__(self, config):\n\n self.config = config", "def __init__(self, namespace_model_instance=None, nexus=None,\n remote_user=None, remote_pass=None, private_key_file=None,\n delegate=None, default_task_role=None, default_run_from=None):\n \n super(ConfigModel, self).__init__(nexus=nexus)\n self.namespace_model_instance = namespace_model_instance\n self.remote_user = remote_user\n self.remote_pass = remote_pass\n self.private_key_file = private_key_file\n self.default_task_role = default_task_role\n self.default_run_from = default_run_from\n self.delegate = delegate\n clone_dict = {}\n #NOTE! _node_dict is an inverted dictionary (the string keys are\n #stored as values\n for v, k in self._node_dict.items():\n if not isinstance(v, _ConfigTask):\n raise ConfigException(\"'%s' is not a task\" % k)\n clone = v.clone()\n clone._set_delegate(self)\n clone._set_model_instance(self)\n clone_dict[v] = clone\n for etan in v._embedded_exittask_attrnames():\n clone_dict[getattr(v, etan)] = getattr(clone, etan)\n setattr(self, k, clone)\n _ = getattr(self, k) #this primes the reference machinery\n self.dependencies = [d.clone(clone_dict)\n for d in self.get_class_dependencies()]\n #default option values\n opts = object.__getattribute__(self, _config_options)\n for k, v in opts.items():\n if k == _default_task_role and self.default_task_role is None:\n self.default_task_role = v\n elif k == _remote_user and self.remote_user is None:\n self.remote_user = v\n elif k == _private_key_file and self.private_key_file is None:\n self.private_key_file = v\n elif k == _default_run_from and self.default_run_from is None:\n self.default_run_from = v", "def setUp(self):\n\n self._hash_bins = 10\n self._hash_embedding_dim = 4\n self._embedding_dim = 2\n\n self._default_config = {\n \"hash_bins\": self._hash_bins,\n \"hash_embedding_dim\": self._hash_embedding_dim,\n \"embedding_dim\": self._embedding_dim\n }", "def __init__(__self__, *,\n fs_aio_max_nr: Optional[pulumi.Input[int]] = None,\n fs_file_max: Optional[pulumi.Input[int]] = None,\n fs_inotify_max_user_watches: Optional[pulumi.Input[int]] = None,\n fs_nr_open: Optional[pulumi.Input[int]] = None,\n kernel_threads_max: Optional[pulumi.Input[int]] = None,\n net_core_netdev_max_backlog: Optional[pulumi.Input[int]] = None,\n net_core_optmem_max: Optional[pulumi.Input[int]] = None,\n net_core_rmem_default: Optional[pulumi.Input[int]] = None,\n net_core_rmem_max: Optional[pulumi.Input[int]] = None,\n net_core_somaxconn: Optional[pulumi.Input[int]] = None,\n net_core_wmem_default: Optional[pulumi.Input[int]] = None,\n net_core_wmem_max: Optional[pulumi.Input[int]] = None,\n net_ipv4_ip_local_port_range: Optional[pulumi.Input[str]] = None,\n net_ipv4_neigh_default_gc_thresh1: Optional[pulumi.Input[int]] = None,\n net_ipv4_neigh_default_gc_thresh2: Optional[pulumi.Input[int]] = None,\n net_ipv4_neigh_default_gc_thresh3: Optional[pulumi.Input[int]] = None,\n net_ipv4_tcp_fin_timeout: Optional[pulumi.Input[int]] = None,\n net_ipv4_tcp_keepalive_probes: Optional[pulumi.Input[int]] = None,\n net_ipv4_tcp_keepalive_time: Optional[pulumi.Input[int]] = None,\n net_ipv4_tcp_max_syn_backlog: Optional[pulumi.Input[int]] = None,\n net_ipv4_tcp_max_tw_buckets: Optional[pulumi.Input[int]] = None,\n net_ipv4_tcp_tw_reuse: Optional[pulumi.Input[bool]] = None,\n net_ipv4_tcpkeepalive_intvl: Optional[pulumi.Input[int]] = None,\n net_netfilter_nf_conntrack_buckets: Optional[pulumi.Input[int]] = None,\n net_netfilter_nf_conntrack_max: Optional[pulumi.Input[int]] = None,\n vm_max_map_count: Optional[pulumi.Input[int]] = None,\n vm_swappiness: Optional[pulumi.Input[int]] = None,\n vm_vfs_cache_pressure: Optional[pulumi.Input[int]] = None):\n if fs_aio_max_nr is not None:\n pulumi.set(__self__, \"fs_aio_max_nr\", fs_aio_max_nr)\n if fs_file_max is not None:\n pulumi.set(__self__, \"fs_file_max\", fs_file_max)\n if fs_inotify_max_user_watches is not None:\n pulumi.set(__self__, \"fs_inotify_max_user_watches\", fs_inotify_max_user_watches)\n if fs_nr_open is not None:\n pulumi.set(__self__, \"fs_nr_open\", fs_nr_open)\n if kernel_threads_max is not None:\n pulumi.set(__self__, \"kernel_threads_max\", kernel_threads_max)\n if net_core_netdev_max_backlog is not None:\n pulumi.set(__self__, \"net_core_netdev_max_backlog\", net_core_netdev_max_backlog)\n if net_core_optmem_max is not None:\n pulumi.set(__self__, \"net_core_optmem_max\", net_core_optmem_max)\n if net_core_rmem_default is not None:\n pulumi.set(__self__, \"net_core_rmem_default\", net_core_rmem_default)\n if net_core_rmem_max is not None:\n pulumi.set(__self__, \"net_core_rmem_max\", net_core_rmem_max)\n if net_core_somaxconn is not None:\n pulumi.set(__self__, \"net_core_somaxconn\", net_core_somaxconn)\n if net_core_wmem_default is not None:\n pulumi.set(__self__, \"net_core_wmem_default\", net_core_wmem_default)\n if net_core_wmem_max is not None:\n pulumi.set(__self__, \"net_core_wmem_max\", net_core_wmem_max)\n if net_ipv4_ip_local_port_range is not None:\n pulumi.set(__self__, \"net_ipv4_ip_local_port_range\", net_ipv4_ip_local_port_range)\n if net_ipv4_neigh_default_gc_thresh1 is not None:\n pulumi.set(__self__, \"net_ipv4_neigh_default_gc_thresh1\", net_ipv4_neigh_default_gc_thresh1)\n if net_ipv4_neigh_default_gc_thresh2 is not None:\n pulumi.set(__self__, \"net_ipv4_neigh_default_gc_thresh2\", net_ipv4_neigh_default_gc_thresh2)\n if net_ipv4_neigh_default_gc_thresh3 is not None:\n pulumi.set(__self__, \"net_ipv4_neigh_default_gc_thresh3\", net_ipv4_neigh_default_gc_thresh3)\n if net_ipv4_tcp_fin_timeout is not None:\n pulumi.set(__self__, \"net_ipv4_tcp_fin_timeout\", net_ipv4_tcp_fin_timeout)\n if net_ipv4_tcp_keepalive_probes is not None:\n pulumi.set(__self__, \"net_ipv4_tcp_keepalive_probes\", net_ipv4_tcp_keepalive_probes)\n if net_ipv4_tcp_keepalive_time is not None:\n pulumi.set(__self__, \"net_ipv4_tcp_keepalive_time\", net_ipv4_tcp_keepalive_time)\n if net_ipv4_tcp_max_syn_backlog is not None:\n pulumi.set(__self__, \"net_ipv4_tcp_max_syn_backlog\", net_ipv4_tcp_max_syn_backlog)\n if net_ipv4_tcp_max_tw_buckets is not None:\n pulumi.set(__self__, \"net_ipv4_tcp_max_tw_buckets\", net_ipv4_tcp_max_tw_buckets)\n if net_ipv4_tcp_tw_reuse is not None:\n pulumi.set(__self__, \"net_ipv4_tcp_tw_reuse\", net_ipv4_tcp_tw_reuse)\n if net_ipv4_tcpkeepalive_intvl is not None:\n pulumi.set(__self__, \"net_ipv4_tcpkeepalive_intvl\", net_ipv4_tcpkeepalive_intvl)\n if net_netfilter_nf_conntrack_buckets is not None:\n pulumi.set(__self__, \"net_netfilter_nf_conntrack_buckets\", net_netfilter_nf_conntrack_buckets)\n if net_netfilter_nf_conntrack_max is not None:\n pulumi.set(__self__, \"net_netfilter_nf_conntrack_max\", net_netfilter_nf_conntrack_max)\n if vm_max_map_count is not None:\n pulumi.set(__self__, \"vm_max_map_count\", vm_max_map_count)\n if vm_swappiness is not None:\n pulumi.set(__self__, \"vm_swappiness\", vm_swappiness)\n if vm_vfs_cache_pressure is not None:\n pulumi.set(__self__, \"vm_vfs_cache_pressure\", vm_vfs_cache_pressure)", "def manage_config() -> dict:\n required_args = {\"embedding_size\", \"hidden_size\", \"num_layers\", \"corpus_dir\"}\n arg_groups = {\n \"general\": {\"recoding_type\"},\n \"model\": {\"embedding_size\", \"hidden_size\", \"num_layers\", \"dropout\"},\n \"train\": {\"weight_decay\", \"learning_rate\", \"batch_size\", \"num_epochs\", \"clip\", \"print_every\", \"eval_every\",\n \"model_save_path\", \"device\", \"model_name\"},\n \"logging\": {\"log_dir\"},\n \"corpus\": {\"corpus_dir\", \"max_seq_len\"},\n \"recoding\": {\"step_type\", \"num_samples\", \"mc_dropout\", \"prior_scale\", \"hidden_size\", \"weight_decay\",\n \"data_noise\", \"share_anchor\", \"use_cross_entropy\"},\n \"step\": {\"predictor_layers\", \"window_size\", \"step_size\", \"hidden_size\"}\n }\n argparser = init_argparser()\n config_object = ConfigSetup(argparser, required_args, arg_groups)\n config_dict = config_object.config_dict\n\n return config_dict", "def _get_MindtPy_config():\n CONFIG = ConfigBlock('MindtPy')\n\n _add_common_configs(CONFIG)\n _add_subsolver_configs(CONFIG)\n _add_tolerance_configs(CONFIG)\n _add_fp_configs(CONFIG)\n _add_bound_configs(CONFIG)\n _add_roa_configs(CONFIG)\n return CONFIG", "def __init__(self, configure, base_path, bin_path, settings):\r\n # Variables setup\r\n self.base_path = base_path\r\n self.bin_path = bin_path\r\n self.settings = settings\r\n self.ssh_key = bin_path.joinpath('brick_id_rsa')\r\n self.ssh_pub = bin_path.joinpath('brick_id_rsa.pub')\r\n\r\n # Path setup\r\n self.src_path = self.base_path.joinpath(self.base_path.parent, 'src')\r\n self.log_path = self.base_path.joinpath(self.base_path.parent, 'logs')\r\n\r\n # Start re-configuration or create new one\r\n if configure or not self.ssh_key.exists():\r\n self.__setup_deploy()", "def setup(self, num_qubit, fusion_enable, use_cu1):", "def mock_config():\n from .. import config\n\n _old_fs = os.getenv('FREESURFER_HOME')\n if not _old_fs:\n os.environ['FREESURFER_HOME'] = mkdtemp()\n\n filename = Path(pkgrf('fmriprep', 'data/tests/config.toml'))\n settings = loads(filename.read_text())\n for sectionname, configs in settings.items():\n if sectionname != 'environment':\n section = getattr(config, sectionname)\n section.load(configs, init=False)\n config.nipype.omp_nthreads = 1\n config.nipype.init()\n config.loggers.init()\n config.init_spaces()\n\n config.execution.work_dir = Path(mkdtemp())\n config.execution.bids_dir = Path(pkgrf('fmriprep', 'data/tests/ds000005')).absolute()\n config.execution.fmriprep_dir = Path(mkdtemp())\n config.execution.init()\n\n yield\n\n shutil.rmtree(config.execution.work_dir)\n shutil.rmtree(config.execution.fmriprep_dir)\n\n if not _old_fs:\n del os.environ[\"FREESURFER_HOME\"]", "def configure(self):\n # Every single node produces node stats\n self._init_local_node_stats_publisher()\n\n if self._track_processes:\n # Processes stats are optional\n self._init_local_processes_stats_publisher()\n else:\n self._stub_processes_stats_routes()\n\n if self._is_lb:\n # Load balancer node also provides proxies stats\n self._init_local_proxies_stats_publisher()\n else:\n self._stub_proxies_stats_routes()\n\n if self._is_master:\n # Master collects stats from all nodes and provides API for access\n self._init_cluster_node_stats_publisher()\n if self._track_processes:\n self._init_cluster_processes_stats_publisher()\n self._init_cluster_proxies_stats_publisher()\n else:\n self._stub_cluster_stats_routes()", "def setup_aws():\n setup_queues()\n setup_buckets()\n setup_domains()", "def setup(args):\n cfg = get_cfg()\n\n cfg.merge_from_file(model_zoo.get_config_file(\"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\"))\n cfg.merge_from_list(args.opts)\n\n # configs for training\n if args.small_vidor: # cfg.DATASETS.VIDOR.SIZE == 'small':\n cfg.DATASETS.TRAIN = (\"vidor_small_train\",)\n elif args.small_vidor_10imgs: # cfg.DATASETS.VIDOR.SIZE == 'small-10imgs':\n cfg.DATASETS.TRAIN = (\"vidor_small_10imgs_train\",)\n else:\n cfg.DATASETS.TRAIN = (\"vidor_large_train\",)\n # cfg.DATALOADER.NUM_WORKERS = 2\n if not args.eval_only:\n cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(\"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\") # Let training initialize from model zoo\n factor = 4\n cfg.SOLVER.IMS_PER_BATCH = 16 * factor\n cfg.SOLVER.BASE_LR = 0.0001 * factor # finetune using 10x smaller base_lr\n cfg.SOLVER.MAX_ITER = 270000 // factor \n cfg.SOLVER.STEPS = [210000 // factor, 250000 // factor]\n # cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # default: 512\n cfg.MODEL.ROI_HEADS.NUM_CLASSES = 78\n\n # configs for testing\n # cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, \"model_final.pth\")\n if args.small_vidor: # cfg.DATASETS.VIDOR.SIZE == 'small':\n cfg.DATASETS.TEST = (\"vidor_small_val\",)\n elif args.small_vidor_10imgs: # cfg.DATASETS.VIDOR.SIZE == 'small-10imgs':\n cfg.DATASETS.TEST = (\"vidor_small_10imgs_val\",)\n else:\n cfg.DATASETS.TEST = (\"vidor_large_val\",)\n # cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5\n\n # cfg.OUTPUT_DIR = './output/train_vidor_with_pseudo_labels'\n \n \n if not args.eval_only:\n os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)\n cfg.freeze()\n default_setup(cfg, args)\n return cfg", "def pibooth_configure(cfg):", "def configure_nccl():\n os.environ[\"NCCL_DEBUG\"] = \"INFO\"\n os.environ[\"NCCL_SOCKET_IFNAME\"] = \"ib0\"\n os.environ[\"NCCL_IB_DISABLE\"] = \"1\"\n\n os.environ[\"NCCL_LAUNCH_MODE\"] = \"PARALLEL\"\n os.environ[\"NCCL_IB_HCA\"] = subprocess.getoutput(\n \"cd /sys/class/infiniband/ > /dev/null; for i in mlx5_*; \"\n \"do cat $i/ports/1/gid_attrs/types/* 2>/dev/null \"\n \"| grep v >/dev/null && echo $i ; done; > /dev/null\"\n )\n os.environ[\"NCCL_IB_GID_INDEX\"] = \"3\"\n os.environ[\"NCCL_IB_TC\"] = \"106\"", "def __init__(self):\n self.database = None\n self.dataset = None\n \n self._database_temp_cache_dir = None\n self.path_of_pickle_file=None\n # open default config\n self.default_config = YamlConfig(DEXNET_API_DEFAULTS_FILE)\n # Resolve gripper_dir and cache_dir relative to dex-net root\n self.default_config['cache_dir'] = None\n for key in ['gripper_dir']:\n if not os.path.isabs(self.default_config[key]):\n self.default_config[key] = os.path.realpath(DEXNET_DIR + self.default_config[key])" ]
[ "0.6253901", "0.62495774", "0.62495774", "0.61576325", "0.6040787", "0.60355586", "0.60355586", "0.60355586", "0.60355586", "0.59917283", "0.5979285", "0.59686154", "0.593352", "0.59138405", "0.5896729", "0.58624494", "0.58605725", "0.58148235", "0.5778351", "0.57762766", "0.57652366", "0.5738404", "0.5734233", "0.5732025", "0.5732025", "0.57314175", "0.571138", "0.5701344", "0.5679329", "0.56379074", "0.55903506", "0.55903506", "0.5584635", "0.55837303", "0.55694336", "0.55328596", "0.5528231", "0.5528137", "0.5505465", "0.54892796", "0.54887843", "0.54884595", "0.5479118", "0.5460506", "0.54571706", "0.5446277", "0.5446277", "0.54368466", "0.543486", "0.54272604", "0.54202473", "0.54198474", "0.5414647", "0.5414647", "0.5414647", "0.5408048", "0.54028285", "0.53976274", "0.5393748", "0.53905815", "0.53902954", "0.5390134", "0.5390134", "0.5385415", "0.53853285", "0.5384289", "0.53729546", "0.53618336", "0.53578466", "0.53499234", "0.5344167", "0.53417534", "0.53358155", "0.53285086", "0.5327071", "0.53222215", "0.53107315", "0.52990735", "0.52986366", "0.5295334", "0.52944607", "0.5290745", "0.52868235", "0.52858293", "0.5284968", "0.5281274", "0.52785206", "0.5277393", "0.52770036", "0.5276997", "0.52756053", "0.52712554", "0.5265846", "0.5265744", "0.5262863", "0.526276", "0.5258565", "0.52584374", "0.52575725", "0.5256954", "0.52554095" ]
0.0
-1
This is the same as setup_2, which was the first breakthrough network, except here the pooling operations have been set to causal mode. On colab, 2x 20 epochs with lr=1e1 and batch_sz=8 has produced strong strong decoding results.
def decoder_setup_5(): decoder = RetinaDecoder( # pre-pooling {'op': 'avg', 'kernel': (1, 2, 2), 'causal': True}, # grouped temporal conv stacks: [ { 'in': 15, 'out': [45, 45, 15], 'kernel': (2, 1, 1), 'stride': 1, 'groups': 15, 'acivation': nn.ReLU, 'pool': {'op': 'avg', 'kernel': (2, 2, 2), 'causal': True} } ], # spatial conv layers: {in, out, kernel, stride} [ # {'in': 15, 'out': 64, 'kernel': (1, 3, 3), 'stride': 1} ], # for each ConvRNN cell: [ ], # temporal convolution stack(s) [ { 'in': 15, 'out': [128, 256, 128], 'kernel': (2, 3, 3), 'stride': 1, 'groups': 1, 'acivation': nn.ReLU } ], # ConvTranspose layers: {in, out, kernel, stride} [ {'in': 128, 'out': 64, 'kernel': (3, 3, 3), 'stride': (2, 2, 2)}, {'in': 64, 'out': 1, 'kernel': (3, 3, 3), 'stride': (1, 2, 2)}, ], # post conv layers [ ], ) return decoder
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main_train(lr, bs, cuda_id, not_distrib, fp16, loss_scale):\r\n torch.backends.cudnn.benchmark = True\r\n if fp16: assert torch.backends.cudnn.enabled, \"missing cudnn\"\r\n stats = (np.array([ 0.4914 , 0.48216, 0.44653]), np.array([ 0.24703, 0.24349, 0.26159]))\r\n sz=32\r\n PATH = Path(\"../../data/cifar10/\")\r\n tfms = tfms_from_stats(stats, sz, aug_tfms=[RandomCrop(sz), RandomFlip()], pad=sz//8)\r\n data1 = ImageClassifierData.from_paths(PATH, val_name='test', tfms=tfms, bs=bs)\r\n m = wrn_22().cuda()\r\n if not not_distrib: m = nn.parallel.DistributedDataParallel(m, device_ids=[cuda_id], output_device=cuda_id)\r\n learn = ConvLearner.from_model_data(m, data1)\r\n learn.crit = nn.CrossEntropyLoss()\r\n learn.metrics = [accuracy]\r\n trn_tfms = CustomTfm(0.5, 4, 32, 1)\r\n val_tfms = None\r\n data = DataBunch.from_files(PATH, trn_tfms, val_tfms, stats, torch.device('cuda', cuda_id), distrib=not not_distrib, val_name='test', bs=bs)\r\n learn.data.trn_dl, learn.data.val_dl = data.trn_dl, data.val_dl\r\n if fp16: learn.half()\r\n x,y = next(iter(data.trn_dl))\r\n opt_fn = get_opt_fn('Adam', 0.95, 0.99, False)\r\n learn.opt_fn = opt_fn\r\n cyc_len, pct = 30, 0.075\r\n nbs = [cyc_len * (1-pct) / 2, cyc_len * (1-pct) / 2, cyc_len * pct]\r\n phases = get_phases(lr, (0.95,0.85), opt_fn, 10, nbs, 0.1, True, False)\r\n #print_lr = PrintLR(learn)\r\n learn.fit_opt_sched(phases, loss_scale=loss_scale)", "def train_segmentation():\n\n start = time.time()\n\n model_base = load_pretrained(get_base(), PRETRAINED)\n cut, lr = model_meta[arch]\n m = to_gpu(Unet34(model_base))\n model = UnetModel(m)\n\n sz = 256\n bs = 64\n\n md = get_data(sz, bs)\n\n learn = ConvLearner(md, model)\n learn.opt_fn = optim.Adam()\n learn.crit = LossBinary(jaccard_weight=5)\n learn.metrics = [accuracy_thresh(0.5), dice, IoU]\n wd = 1e-7\n lr = 1e-2\n\n learn.freeze_to(1)\n learn.fit(lr, 1, wds=wd, cycle_len=1, use_clr=(5,8))\n learn.unfreeze() # unfreeze encoder\n learn.bn_freeze(True)\n\n lrs = np.array([lr/100, lr/10, lr])\n learn.fit(lrs/3, 2, wds=wd, cycle_len=2, use_clr=(20,8))\n\n learn.save('./models/weighted_unet_256_p1')\n\n sz = 384\n bs = 32\n\n md = get_data(sz, bs)\n learn.set_data(md)\n learn.unfreeze()\n learn.bn_freeze(True)\n\n learn.fit(lrs/5, 1, wds=wd, cycle_len=2, use_clr(10,8)) # first increase in image size with decreased bs\n learn.save('./models/weighted_unet_384_p1')\n\n sz = 512\n bs = 16\n\n md = get_data(sz, bs)\n learn.set_data(md)\n learn.unfreeze()\n learn.bn_freeze(True)\n\n learn.fit(lrs/10, 2, wds=wd, cycle_len=1, use_clr=(10,8), best_save_name='./models/weighted_unet_512_p1') # second increase in image size with further decreased bs\n\n sz = 768\n bs = 8\n\n md = get_data(sz, bs)\n learn.set_data(md)\n learn.unfreeze()\n learn.bn_freeze(True)\n\n learn.fit(lrs/50, 10, wds=5e-8, cycle_len=1, use_clr=(10,10), best_save_name='./models/weighted_unet_768_p1') # full image size with further decreased bs\n\n learn.crit = MixedLoss(10., 2.)\n learn.fit(lrs/50, 10, wds=5e-8, cycle_len=1, use_clr=(10,10), best_save_name='./models/weighted_unet_768_p2') # full image size with further decreased bs (final run)\n\n learn.save('./models/weighted_unet_768_final')\n\n print(f'Training finished in {time.time() - start) / 60 :.3} minutes.')", "def __init__(self):\n #conv1\n n = inp_width*inp_height\n #poczatkowe wagi sieci sa ustalane losowo z rozkladu normalnego. Umieszczane sa one na liscie matryc wag\n self.Weights = [np.random.randn(layers[0][1],inp_channels,layers[0][2],layers[0][2])/np.sqrt(n)]\n out_Size = inp_width - layers[0][2] + 1 #zmienna zawiera rozmiar wyjscia danej warstwy\n #inicjalizacja progow \n self.Biases = [initBias*np.ones( layers[0][1] )]\n #przypisanie parametrow warstwie poolingu\n self.poolParams = [(layers[1][1], layers[1][2])]\n out_Size = out_Size/2 \n #conv 2\n n = out_Size*out_Size*layers[0][1]\n self.Weights.append(np.random.randn(layers[2][1],layers[0][1],layers[2][2],layers[2][2])/np.sqrt(n))\n out_Size = out_Size - layers[2][2]+1\n self.Biases.append(initBias*np.ones(layers[2][1]))\n #pool 2\n self.poolParams.append((layers[3][1],layers[3][2]))\n out_Size = out_Size/2 \n #conv 3\n n = out_Size*out_Size*layers[2][1]\n self.Weights.append(np.random.randn(layers[4][1],layers[2][1],out_Size,out_Size)/np.sqrt(n))\n out_Size = 1\n self.Biases.append(initBias*np.ones(layers[4][1]))\n #fully connected 1\n n = layers[4][1]\n self.Weights.append(np.random.randn(layers[5][1],layers[4][1])/np.sqrt(n))\n self.Biases.append(initBias*np.ones(layers[5][1]))\n #fully connected 2\n n = layers[5][1]\n self.Weights.append(np.random.randn(layers[6][1],layers[5][1])/np.sqrt(n))\n self.Biases.append(initBias*np.ones(layers[6][1]))\n\n self.Weights = np.asarray(self.Weights)\n self.Biases = np.asarray(self.Biases)\n \n delta_W = []\n delta_B = []\n for i in range(5):\n delta_W.append(np.zeros(self.Weights[i].shape))\n delta_B.append(np.zeros(self.Biases[i].shape))\n self.delta_W = np.asarray(delta_W)\n self.delta_B = np.asarray(delta_B)", "def example():\n base_path = Path(TMPDIR)\n\n discriminator = Model(num_input=28 * 28)\n discriminator.add(Layer(512, activation=af.RELU))\n discriminator.add(Layer(1, activation=af.SIGMOID))\n\n generator_discriminator = Model(num_input=100)\n generator_discriminator.add(Layer(512, activation=af.LEAKY_RELU))\n generator_discriminator.add(Layer(28 * 28, activation=af.SIGMOID))\n generator_discriminator.add(Layer(512, activation=af.RELU)) # Needs to match discriminator\n generator_discriminator.add(Layer(1, activation=af.SIGMOID)) # Needs to match discriminator\n\n nn_discriminator = NeuralNetwork(discriminator, learning_rate=0.0002, cost_function=cf.CROSS_ENTROPY,\n\n optimizer=opt.ADAM,\n optimizer_settings=AdamOptimizer(beta1=0.5, beta2=0.999, epsilon=1e-8),\n batch_size=32)\n\n discriminator_weight_path = Path(DISCRIMINATOR_WEIGHTS_FILE_PATH)\n if discriminator_weight_path.exists():\n log.info(\"Discriminator weight file detected. Loading.\")\n nn_discriminator.load(discriminator_weight_path)\n\n nn_generator_discriminator = NeuralNetwork(generator_discriminator,\n use_layer_from=[{\"model\": nn_discriminator,\n \"layer_map\": [{\"from\": 1, \"to\": 3},\n {\"from\": 2, \"to\": 4}]}],\n\n learning_rate=0.0002, cost_function=cf.CROSS_ENTROPY, # Slower than D\n optimizer=opt.ADAM,\n optimizer_settings=AdamOptimizer(beta1=0.5, beta2=0.999, epsilon=1e-8),\n batch_size=32,\n weight_parameter=wparam(init_type=wparam.NORMAL, stddev=0.02))\n\n generator_weight_path = Path(GENERATOR_WEIGHTS_FILE_PATH)\n if generator_weight_path.exists():\n log.info(\"Generator weight file detected. Loading.\")\n nn_generator_discriminator.load(generator_weight_path)\n\n noise = np.random.normal(size=(NUM_IMAGES_TO_GENERATE, 100))\n\n print(\"Generating...\")\n test_images = nn_generator_discriminator.predict_intermediate(noise, 2)\n\n for p in range(test_images.shape[0]):\n img = test_images[p].reshape((28, 28)).copy()\n img *= 255.0\n img_pil = Image.fromarray(np.uint8(img))\n image_path = base_path / Path(\"%d.jpg\" % (p))\n img_pil.save(image_path)", "def __init__(\n self,\n include_top=True,\n weights='imagenet',\n input_tensor=None,\n input_shape=None,\n pooling=None,\n classes=1000,\n **kwargs):\n backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)\n self.include_top = include_top\n self.pooling = pooling\n self.weights = weights\n self.backend = backend\n self.layers = layers\n self.classes = classes\n\n if not (weights in {'imagenet', None} or os.path.exists(weights)):\n raise ValueError('The `weights` argument should be either '\n '`None` (random initialization), `imagenet` '\n '(pre-training on ImageNet), '\n 'or the path to the weights file to be loaded.')\n\n if weights == 'imagenet' and include_top and classes != 1000:\n raise ValueError('If using `weights` as `\"imagenet\"` with `include_top`'\n ' as true, `classes` should be 1000')\n self.block1_conv1 = []\n self.block1_conv2 = []\n self.block1_pool = []\n\n self.block2_conv1 = []\n self.block2_conv2 = []\n self.block2_pool = []\n\n self.block3_conv1 = []\n self.block3_conv2 = []\n self.block3_conv3 = []\n self.block3_conv4 = []\n self.block3_pool = []\n\n self.block4_conv1 = []\n self.block4_conv2 = []\n self.block4_conv3 = []\n self.block4_conv4 = []\n self.block4_pool = []\n\n self.block5_conv1 = []\n self.block5_conv2 = []\n self.block5_conv3 = []\n self.block5_conv4 = []\n self.block5_pool = []\n\n for i in xrange(FLAGS.num_replica):\n # Block 1\n self.block1_conv1.append(layers.Conv2D(64, (3, 3),\n activation='relu',\n padding='same',\n name='block1_conv1'))\n self.block1_conv2.append(layers.Conv2D(64, (3, 3),\n activation='relu',\n padding='same',\n name='block1_conv2'))\n self.block1_pool.append(layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool'))\n\n # Block 2\n self.block2_conv1.append(layers.Conv2D(128, (3, 3),\n activation='relu',\n padding='same',\n name='block2_conv1'))\n self.block2_conv2.append(layers.Conv2D(128, (3, 3),\n activation='relu',\n padding='same',\n name='block2_conv2'))\n self.block2_pool.append(layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool'))\n\n # Block 3\n self.block3_conv1.append(layers.Conv2D(256, (3, 3),\n activation='relu',\n padding='same',\n name='block3_conv1'))\n self.block3_conv2.append(layers.Conv2D(256, (3, 3),\n activation='relu',\n padding='same',\n name='block3_conv2'))\n self.block3_conv3.append(layers.Conv2D(256, (3, 3),\n activation='relu',\n padding='same',\n name='block3_conv3'))\n self.block3_conv4.append(layers.Conv2D(256, (3, 3),\n activation='relu',\n padding='same',\n name='block3_conv4'))\n self.block3_pool.append(layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool'))\n\n # Block 4\n self.block4_conv1.append(layers.Conv2D(512, (3, 3),\n activation='relu',\n padding='same',\n name='block4_conv1'))\n self.block4_conv2.append(layers.Conv2D(512, (3, 3),\n activation='relu',\n padding='same',\n name='block4_conv2'))\n self.block4_conv3.append(layers.Conv2D(512, (3, 3),\n activation='relu',\n padding='same',\n name='block4_conv3'))\n self.block4_conv4.append(layers.Conv2D(512, (3, 3),\n activation='relu',\n padding='same',\n name='block4_conv4'))\n self.block4_pool.append(layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool'))\n\n # Block 5\n self.block5_conv1.append(layers.Conv2D(512, (3, 3),\n activation='relu',\n padding='same',\n name='block5_conv1'))\n self.block5_conv2.append(layers.Conv2D(512, (3, 3),\n activation='relu',\n padding='same',\n name='block5_conv2'))\n self.block5_conv3.append(layers.Conv2D(512, (3, 3),\n activation='relu',\n padding='same',\n name='block5_conv3'))\n self.block5_conv4.append(layers.Conv2D(512, (3, 3),\n activation='relu',\n padding='same',\n name='block5_conv4'))\n self.block5_pool.append(layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool'))\n\n if include_top:\n # Classification block\n self.flatten = layers.Flatten(name='flatten')\n self.fc1 = layers.Dense(4096, activation='relu', name='fc1')\n self.fc2 = layers.Dense(4096, activation='relu', name='fc2')\n self.predict = layers.Dense(classes, activation='softmax', name='predictions')\n else:\n if pooling == 'avg':\n self.pool = layers.GlobalAveragePooling2D()\n elif pooling == 'max':\n self.pool = layers.GlobalMaxPooling2D()", "def train():\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n model = MLP(n_hidden=dnn_hidden_units,n_classes=10,batch_size=FLAGS.batch_size, input_dim=32*32*3, \n weight_decay=FLAGS.weight_reg_strength, weight_scale=FLAGS.weight_init_scale)\n\n Datasets = utils.get_cifar10(data_dir = DATA_DIR_DEFAULT, one_hot = True, validation_size = 0)\n \n for i in range(1500): #(FLAGS.max_steps):\n train_batch = Datasets.train.next_batch(batch_size = FLAGS.batch_size)\n #Get the model output\n logits = model.inference(x=train_batch[0].reshape([FLAGS.batch_size,32*32*3]))\n #Get the loss and let the model set the loss derivative.\n loss = model.loss(logits=logits, labels=train_batch[1])\n #Perform training step\n model.train_step(loss=loss, flags=FLAGS)\n\n #Every 100th iteratin print accuracy on the whole test set.\n if i % 100 == 0:\n # for layer in model.layers:\n test_batch = Datasets.test.next_batch(batch_size = 200) #Datasets.test.num_examples\n logits = model.inference(x=test_batch[0].reshape([200,32*32*3]))\n print('-- Step: ', i, \" accuracy: \",model.accuracy(logits=logits,labels=test_batch[1]),'loss', loss )\n\n ########################\n # END OF YOUR CODE #\n #######################", "def _make_conv_layers(self):\n conv = nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=64, kernel_size=7, stride=2, padding=1), # padding=3 so, output is 224.\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n\n nn.Conv2d(64, 192, 3, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(192, 128, 1, padding=1), ## kernel size = 1 이므로 padding = 0(defalut)\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(128, 256, 3, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(512, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(256, 512, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 512, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1), \n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(1024, 512, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(1024, 512, 1, padding=1),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1),\n nn.LeakyReLU(0.1, inplace=True)\n )\n return conv", "def train():\n\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n # Get negative slope parameter for LeakyReLU\n neg_slope = FLAGS.neg_slope\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n import matplotlib.pyplot as plt\n\n data = cifar10_utils.get_cifar10(FLAGS.data_dir)\n train = data['train']\n test = data['test']\n dim_x = train.images.shape[1]*train.images.shape[2]*train.images.shape[3]\n\n mlp = MLP(dim_x, dnn_hidden_units, train.labels.shape[1], neg_slope)\n loss_module = CrossEntropyModule()\n\n loss_train = np.zeros((int(np.floor(FLAGS.max_steps/FLAGS.eval_freq), )))\n loss_test = np.zeros((int(np.floor(FLAGS.max_steps/FLAGS.eval_freq), )))\n accuracy_test = np.zeros((int(np.floor(FLAGS.max_steps/FLAGS.eval_freq), )))\n\n images_test = test.images\n labels_test = test.labels\n images_test = np.reshape(images_test, (images_test.shape[0], dim_x))\n\n for i in range(0, FLAGS.max_steps):\n if PRINTS:\n print('iter', i+1, end='\\r')\n images, labels = train.next_batch(FLAGS.batch_size) \n images = np.reshape(images, (images.shape[0], dim_x))\n\n pred = mlp.forward(images)\n loss = loss_module.forward(pred, labels)\n loss_grad = loss_module.backward(pred, labels)\n mlp.backward(loss_grad)\n\n for module in reversed(mlp.modules):\n if isinstance(module, LinearModule):\n module.params['weight'] -= 1/FLAGS.batch_size*FLAGS.learning_rate*module.grads['weight']\n module.params['bias'] -= 1/FLAGS.batch_size*FLAGS.learning_rate*module.grads['bias']\n if (i+1) % FLAGS.eval_freq == 0:\n pred_test = mlp.forward(images_test)\n loss_train[i // FLAGS.eval_freq] = loss\n accuracy_test[i // FLAGS.eval_freq] = accuracy(pred_test, labels_test)\n loss_test[i // FLAGS.eval_freq] = loss_module.forward(pred_test, labels_test)\n if PRINTS:\n print()\n print('test_loss:', loss_test[i // FLAGS.eval_freq])\n print('test_accuracy:', accuracy_test[i // FLAGS.eval_freq])\n print('train_loss:', loss_train[i // FLAGS.eval_freq])\n\n if PLOTS:\n fig, ax = plt.subplots(1, 2, figsize=(10,5))\n fig.suptitle('Training curves for Numpy MLP\\nFinal test accuracy: {:0.4f}, default configuration'.format(accuracy_test[i // FLAGS.eval_freq]))\n\n ax[0].set_title('Loss')\n ax[0].set_ylabel('Loss value')\n ax[0].set_xlabel('No of batches seen x{}'.format(FLAGS.eval_freq))\n ax[0].plot(loss_train, label='Train')\n ax[0].plot(loss_test, label='Test')\n ax[0].legend()\n\n ax[1].set_title('Accuracy')\n ax[1].set_ylabel('Accuracy value')\n ax[1].set_xlabel('No of batches seen x{}'.format(FLAGS.eval_freq))\n ax[1].plot(accuracy_test, label='Test')\n ax[1].legend()\n plt.show()\n\n\n ########################\n # END OF YOUR CODE #\n #######################", "def __init__(self):\n super(AlexNet, self).__init__()\n\n initializer = tf.keras.initializers.RandomNormal(mean=0., stddev=0.01)\n bias = tf.keras.initializers.Ones()\n bias0 = tf.keras.initializers.Zeros()\n self.drop = tf.keras.layers.Dropout(.5)\n\n # Input is 227 and not 224 as stated on the doc.\n # See issue: https://stackoverflow.com/questions/36733636/the-number-of-neurons-in-alexnet\n self.conv1_1 = tf.keras.layers.Conv2D(48, 11, strides=4, activation=\"relu\", input_shape=[227, 227, 3],\n kernel_initializer=initializer, bias_initializer=bias0)\n self.conv1_2 = tf.keras.layers.Conv2D(48, 11, strides=4, activation=\"relu\", input_shape=[227, 227, 3],\n kernel_initializer=initializer, bias_initializer=bias0)\n # Output: 227 - 11 / 4 + 1 = 55\n # Maxpool: 55 / 2 = 27.5 = ~27\n\n self.conv2_1 = tf.keras.layers.Conv2D(128, 5, activation=\"relu\", kernel_initializer=initializer,\n bias_initializer=bias, padding=\"SAME\")\n self.conv2_2 = tf.keras.layers.Conv2D(128, 5, activation=\"relu\", kernel_initializer=initializer,\n bias_initializer=bias, padding=\"SAME\")\n # Output: 27\n # Maxpool: 27 / 2 = 13.5 = ~13\n\n self.conv3_1 = tf.keras.layers.Conv2D(192, 3, activation=\"relu\", kernel_initializer=initializer,\n bias_initializer=bias, padding=\"SAME\")\n self.conv3_2 = tf.keras.layers.Conv2D(192, 3, activation=\"relu\", kernel_initializer=initializer,\n bias_initializer=bias, padding=\"SAME\")\n # Output: 13\n\n self.conv4_1 = tf.keras.layers.Conv2D(192, 3, activation=\"relu\", kernel_initializer=initializer,\n bias_initializer=bias, padding=\"SAME\")\n self.conv4_2 = tf.keras.layers.Conv2D(192, 3, activation=\"relu\", kernel_initializer=initializer,\n bias_initializer=bias, padding=\"SAME\")\n # Output: 13\n\n self.conv5_1 = tf.keras.layers.Conv2D(128, 3, activation=\"relu\", kernel_initializer=initializer,\n bias_initializer=bias, padding=\"SAME\")\n self.conv5_2 = tf.keras.layers.Conv2D(128, 3, activation=\"relu\", kernel_initializer=initializer,\n bias_initializer=bias, padding=\"SAME\")\n # Output: 13\n\n self.max_pool = tf.keras.layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2))\n # Output: 13 / 2 = 6.5 = ~6\n\n self.flatten = tf.keras.layers.Flatten()\n\n # Input: 6 * 6 * 128 * 2 = 9216\n self.fc1_1 = tf.keras.layers.Dense(2048, activation=\"relu\", kernel_initializer=initializer,\n bias_initializer=bias)\n self.fc1_2 = tf.keras.layers.Dense(2048, activation=\"relu\", kernel_initializer=initializer,\n bias_initializer=bias)\n\n self.fc2_1 = tf.keras.layers.Dense(2048, activation=\"relu\", kernel_initializer=initializer,\n bias_initializer=bias)\n self.fc2_2 = tf.keras.layers.Dense(2048, activation=\"relu\", kernel_initializer=initializer,\n bias_initializer=bias)\n\n self.fc3 = tf.keras.layers.Dense(1000, activation=\"softmax\")", "def __init__(self, momentum: float = .5):\n super(VanillaEncoder, self).__init__()\n self.conv1 = PointNetConv2Layer(64, momentum)\n self.conv2 = PointNetConv2Layer(64, momentum)\n self.conv3 = PointNetConv2Layer(64, momentum)\n self.conv4 = PointNetConv2Layer(128, momentum)\n self.conv5 = PointNetConv2Layer(1024, momentum)", "def build_dc_classifier():\n # return nn.Sequential(\n # Unflatten(Batch_size, 1, 28, 28),\n # nn.Conv2d(1, 32, kernel_size=5, stride=1),\n # nn.LeakyReLU(negative_slope=0.01),\n # nn.MaxPool2d(2, stride=2),\n # nn.Conv2d(32, 64, kernel_size=5, stride=1),\n # nn.LeakyReLU(negative_slope=0.01),\n # nn.MaxPool2d(kernel_size=2, stride=2),\n # Flatten(),\n # nn.Linear(4 * 4 * 64, 4 * 4 * 64),\n # nn.LeakyReLU(negative_slope=0.01),\n # nn.Linear(4 * 4 * 64, 1)\n # )\n\n return nn.Sequential(\n Unflatten(Batch_size, 1, 128, 128), #28,28,32 #128,128,16\n nn.Conv2d(1, 16,kernel_size=5, stride=1), #24,24,32 #124,124,16\n nn.LeakyReLU(negative_slope=0.01),\n nn.MaxPool2d(2, stride=2), #12,12,32 #62,62,16\n nn.Conv2d(16, 32,kernel_size=5, stride=1), # 8, 8,64 #58,58,32\n nn.LeakyReLU(negative_slope=0.01),\n nn.MaxPool2d(kernel_size=2, stride=2), # 4, 4,64 #29,29,32\n nn.Conv2d(32, 64, kernel_size=5, stride=1), #25,25,64\n nn.LeakyReLU(negative_slope=0.01),\n nn.MaxPool2d(kernel_size=2, stride=2), #12,12,64\n nn.Conv2d(64, 128, kernel_size=5, stride=1), # 8, 8,128\n nn.LeakyReLU(negative_slope=0.01),\n nn.MaxPool2d(kernel_size=2, stride=2), # 4, 4,128\n Flatten(),\n nn.Linear(4*4*128, 4*4*128), # 4*4*64 # 4*4*128\n nn.LeakyReLU(negative_slope=0.01),\n nn.Linear(4*4*128,1) # 4*4*64 # 4*4*128\n )", "def train_one_epoch(self):\n print('Training......')\n\n # set mode train\n self.network.train()\n\n # prepare data\n train_loss = 0\n transform = transforms.Compose([Rescale(params.rescale_size),\n RandomCrop(params.image_size),\n RandomHorizontalFlip(),\n ToTensor()\n ])\n\n\n\n dataset = Cityscapes(params.dataset_root, mode='train', transforms = transform)\n\n train_loader = DataLoader(dataset,\n batch_size=params.train_batch,\n shuffle=params.shuffle,\n num_workers=params.dataloader_workers)\n \n train_size = 1896\n if train_size % self.params.train_batch != 0:\n total_batch = train_size // self.params.train_batch + 1\n else:\n total_batch = train_size // self.params.train_batch\n recal = 0\n precision = 0\n F_one = 0\n IOU = 0\n accuracy_new = 0 \n # train through dataset\n for batch_idx, batch in enumerate(train_loader):\n self.pb.click(batch_idx, total_batch)\n image, label = batch['image'], batch['label']\n image_cuda, label_cuda = image.cuda(), label.cuda()\n\n # checkpoint split\n if self.params.should_split:\n image_cuda.requires_grad_()\n out = checkpoint_sequential(self.network, self.params.split, image_cuda)\n else:\n out = self.network(image_cuda)\n\n\n loss = self.loss_fn(out, label_cuda)\n \n #display_image(out, label_cuda)\n TP, FP, TN, FN = confusion(out, label_cuda)\n recal = recal+TP\n precision = precision+FP\n F_one = F_one + TN\n IOU = IOU+ FN \n accuracy_final = accuracy(out, label_cuda)\n accuracy_new = accuracy_new + accuracy_final\n\n # optimize\n self.opt.zero_grad()\n loss.backward()\n self.opt.step()\n\n # accumulate\n train_loss += loss.item()\n\n # record first loss\n if self.train_loss == []:\n self.train_loss.append(train_loss)\n self.summary_writer.add_scalar('loss/train_loss', train_loss, 0)\n \n print(\"\\t\")\n print(recal/total_batch, precision/ total_batch, F_one/ total_batch, IOU/ total_batch)\n print(accuracy_new/total_batch)\n \n self.pb.close()\n train_loss /= total_batch\n self.train_loss.append(train_loss)\n\n # add to summary\n self.summary_writer.add_scalar('loss/train_loss', train_loss, self.epoch)", "def __init__(self):\n super(Encoder2, self).__init__()\n self.lblocks = nn.ModuleList(\n [\n nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n ),\n nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n ),\n nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n ),\n nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n ),\n nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n ),\n ]\n )\n\n self.blocks = nn.ModuleList(\n [\n nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n ),\n ]\n )", "def __init__(self, num_1d=None):\n super(Net, self).__init__()\n\n self.lconv1 = nn.Sequential(\n nn.Conv1d(4, 64, kernel_size=9, padding=4),\n nn.BatchNorm1d(64),\n nn.Conv1d(64, 64, kernel_size=9, padding=4),\n nn.BatchNorm1d(64),\n )\n\n self.conv1 = nn.Sequential(\n nn.Conv1d(64, 64, kernel_size=9, padding=4),\n nn.BatchNorm1d(64),\n nn.ReLU(inplace=True),\n nn.Conv1d(64, 64, kernel_size=9, padding=4),\n nn.BatchNorm1d(64),\n nn.ReLU(inplace=True),\n )\n\n self.lconv2 = nn.Sequential(\n nn.MaxPool1d(kernel_size=4, stride=4),\n nn.Conv1d(64, 96, kernel_size=9, padding=4),\n nn.BatchNorm1d(96),\n nn.Conv1d(96, 96, kernel_size=9, padding=4),\n nn.BatchNorm1d(96),\n )\n\n self.conv2 = nn.Sequential(\n nn.Conv1d(96, 96, kernel_size=9, padding=4),\n nn.BatchNorm1d(96),\n nn.ReLU(inplace=True),\n nn.Conv1d(96, 96, kernel_size=9, padding=4),\n nn.BatchNorm1d(96),\n nn.ReLU(inplace=True),\n )\n\n self.lconv3 = nn.Sequential(\n nn.MaxPool1d(kernel_size=4, stride=4),\n nn.Conv1d(96, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv3 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )\n\n self.lconv4 = nn.Sequential(\n nn.MaxPool1d(kernel_size=5, stride=5),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv4 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )\n\n self.lconv5 = nn.Sequential(\n nn.MaxPool1d(kernel_size=5, stride=5),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv5 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )\n\n self.lconv6 = nn.Sequential(\n nn.MaxPool1d(kernel_size=5, stride=5),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv6 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )\n\n self.lconv7 = nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv7 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )\n\n self.lconvtwos = nn.ModuleList(\n [\n nn.Sequential(\n nn.Dropout(p=0.1),\n nn.Conv2d(128, 32, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n ]\n )\n\n self.convtwos = nn.ModuleList(\n [\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n ]\n )\n self.final = nn.Sequential(\n nn.Conv2d(64, 5, kernel_size=(1, 1), padding=0),\n nn.BatchNorm2d(5),\n nn.ReLU(inplace=True),\n nn.Conv2d(5, 1, kernel_size=(1, 1), padding=0),\n )\n if num_1d is not None:\n self.final_1d = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=1, padding=0),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, num_1d, kernel_size=1, padding=0),\n nn.Sigmoid(),\n )\n self.num_1d = num_1d", "def _init_layers(self):\n self.relu = nn.ReLU(inplace=True)\n self.rpn_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.rpn_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.rpn_cls = nn.Conv2d(\n self.feat_channels, self.num_anchors * 1, 3, padding=1)\n self.rpn_reg = nn.Conv2d(\n self.feat_channels, self.num_anchors * 4 * (self.reg_max + 1), 3, padding=1)\n self.rpn_iou = nn.Conv2d(\n self.feat_channels, self.num_anchors * 1, 3, padding=1)\n self.scales = nn.ModuleList(\n [Scale(1.0) for _ in self.anchor_generator.strides])\n\n ##############V2################\n conf_vector = [nn.Conv2d(self.num_anchors * 4 * self.total_dim, self.num_anchors * self.reg_channels, 1)]\n conf_vector += [self.relu]\n conf_vector += [nn.Conv2d(self.num_anchors * self.reg_channels, self.num_anchors, 1), nn.Sigmoid()]\n\n self.reg_conf = nn.Sequential(*conf_vector)\n ##############V2################", "def __init__(\n self, \n dim_feat_raw, \n dim_feat_smooth, \n dim_label_raw, \n dim_label_smooth, \n arch_gnn, \n aug_feat,\n num_ensemble, \n train_params\n ):\n super().__init__()\n self.mulhead = 1\n self.num_layers = arch_gnn[\"num_layers\"]\n self.dropout, self.dropedge = train_params[\"dropout\"], train_params['dropedge']\n self.mulhead = int(arch_gnn[\"heads\"]) # only useful for GAT\n\n self.branch_sharing = arch_gnn['branch_sharing'] # only for ensemble\n\n self.type_feature_augment = aug_feat\n assert dim_feat_raw <= dim_feat_smooth, \"smoothened feature cannot have smaller shape than the original one\"\n # NOTE: dim_label_raw may be larger than dim_label_smooth ==> label is not used as input\n self.num_classes = dim_label_raw\n self.dim_label_in = dim_label_smooth\n self.dim_feat_in = dim_feat_smooth\n self.dim_hidden = arch_gnn['dim']\n # build the model below\n dim, act = arch_gnn['dim'], arch_gnn['act']\n self.aug_layers, self.conv_layers, self.res_pool_layers = [], [], []\n for i in range(num_ensemble):\n # feat aug\n if len(self.type_feature_augment) > 0:\n self.aug_layers.append(nn.ModuleList(\n nn.Linear(_dim, self.dim_feat_in) for _, _dim in self.type_feature_augment\n ))\n # graph convs\n convs = []\n if i == 0 or not self.branch_sharing:\n for j in range(arch_gnn['num_layers']):\n cls_gconv = DeepGNN.NAME2CLS[arch_gnn['aggr']]\n dim_in = (self.dim_feat_in + self.dim_label_in) if j == 0 else dim\n convs.append(cls_gconv(dim_in, dim, dropout=self.dropout, act=act, mulhead=self.mulhead))\n self.conv_layers.append(nn.Sequential(*convs))\n else: # i > 0 and branch_sharing\n self.conv_layers.append(self.conv_layers[-1])\n # skip-pooling layer\n type_res = arch_gnn['residue'].lower()\n type_pool = arch_gnn['pooling'].split('-')[0].lower()\n cls_res_pool = layers.ResPool\n args_pool = {}\n if type_pool == 'sort':\n args_pool['k'] = int(arch_gnn['pooling'].split('-')[1])\n self.res_pool_layers.append(\n cls_res_pool(dim, dim, arch_gnn['num_layers'], type_res, type_pool,\n dropout=self.dropout, act=act, args_pool=args_pool\n ))\n if len(self.aug_layers) > 0:\n self.aug_layers = nn.ModuleList(self.aug_layers)\n self.conv_layers = nn.ModuleList(self.conv_layers)\n self.res_pool_layers = nn.ModuleList(self.res_pool_layers)\n # ------- ensembler + classifier -------\n if num_ensemble == 1:\n self.ensembler = layers.EnsembleDummy()\n else:\n self.ensembler = layers.EnsembleAggregator(dim, dim, num_ensemble, dropout=self.dropout, \n type_dropout=train_params[\"ensemble_dropout\"], act=arch_gnn[\"ensemble_act\"])\n self.classifier = DeepGNN.NAME2CLS['mlp'](dim, self.num_classes, act='I', dropout=0.)\n # ---- optimizer, etc. ----\n self.lr = train_params[\"lr\"]\n self.sigmoid_loss = arch_gnn[\"loss\"] == \"sigmoid\"\n self.loss, self.opt_op = 0, None\n self.optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)\n\n self.num_ensemble = num_ensemble", "def train_conv_net(datasets,datasets_weights,\n U, U_Topical,\n img_w=300, \n filter_hs=[3,4,5],\n hidden_units=[100,2], \n dropout_rate=[0.5],\n shuffle_batch=True,\n n_epochs=25, \n batch_size=50, \n lr_decay = 0.95,\n conv_non_linear=\"relu\",\n use_valid_set=True,\n show_states=False,\n activations=[Iden],\n sqr_norm_lim=9,\n non_static=True): \n rng = np.random.RandomState(3435)\n img_h = len(datasets[0][0])-1 \n U_Topical.dtype = \"float32\"\n (num_topics,topic_dim) = U_Topical.shape\n word_w = img_w\n img_w = int(img_w + num_topics*topic_dim)\n filter_w = img_w \n feature_maps = hidden_units[0]\n filter_shapes = []\n pool_sizes = []\n for filter_h in filter_hs: \n filter_shapes.append((feature_maps, 1, filter_h, filter_w)) # 100 1 3 300\n pool_sizes.append((img_h-filter_h+1, img_w-filter_w+1)) # size of words samples one\n parameters = [(\"image shape\",img_h,img_w),(\"filter shape\",filter_shapes), (\"hidden_units\",hidden_units),\n (\"dropout\", dropout_rate), (\"batch_size\",batch_size),(\"non_static\", non_static),\n (\"learn_decay\",lr_decay), (\"conv_non_linear\", conv_non_linear), (\"non_static\", non_static)\n ,(\"sqr_norm_lim\",sqr_norm_lim),(\"shuffle_batch\",shuffle_batch)]\n #print parameters \n \n #define model architecture\n index = T.lscalar()\n x = T.matrix('x') \n y = T.ivector('y')\n x_topic = T.tensor3('x_topic')\n Words = theano.shared(value = U, name = \"Words\")\n Topics = theano.shared(value=U_Topical,name=\"Topics\")\n zero_vec_tensor = T.vector()\n zero_vec = np.zeros(word_w, dtype='float32')\n set_zero = theano.function([zero_vec_tensor], updates=[(Words, T.set_subtensor(Words[0,:], zero_vec_tensor))])\n layer0_input_words = Words[T.cast(x.flatten(),dtype=\"int32\")].reshape((x.shape[0],1,x.shape[1],Words.shape[1])) \n layer0_inputs_topics = []\n for i in range(num_topics):\n sin_topic = x_topic[:,:,i]\n Topic = Topics[i].reshape((1,Topics[i].shape[0]))\n weights = sin_topic.flatten()\n weights = weights.reshape((weights.shape[0],1))\n layer0_inputs_topics.append(T.dot(weights, Topic))\n layer0_input_topics = T.concatenate(layer0_inputs_topics,1)\n layer0_input_topics = layer0_input_topics.reshape((x_topic.shape[0],1,x_topic.shape[1],num_topics*topic_dim))\n layer0_input = T.concatenate([layer0_input_words,layer0_input_topics],3) \n conv_layers = []\n layer1_inputs = []\n for i in xrange(len(filter_hs)):\n filter_shape = filter_shapes[i]\n pool_size = pool_sizes[i]\n conv_layer = LeNetConvPoolLayer(rng, input=layer0_input,image_shape=(batch_size, 1, img_h, img_w),\n filter_shape=filter_shape, poolsize=pool_size, non_linear=conv_non_linear)\n layer1_input = conv_layer.output.flatten(2)\n conv_layers.append(conv_layer)\n layer1_inputs.append(layer1_input)\n layer1_input = T.concatenate(layer1_inputs,1)\n hidden_units[0] = feature_maps*len(filter_hs) \n classifier = MLPDropout(rng, input=layer1_input, layer_sizes=hidden_units, activations=activations, dropout_rates=dropout_rate)\n \n #define parameters of the model and update functions using adadelta\n params = classifier.params \n for conv_layer in conv_layers:\n params += conv_layer.params\n \n if non_static:\n #if word vectors are allowed to change, add them as model parameters\n params += [Words] #params are model parameters\n params += [Topics] #Topics embedding are adjusted\n cost = classifier.negative_log_likelihood(y) \n dropout_cost = classifier.dropout_negative_log_likelihood(y) \n grad_updates = sgd_updates_adadelta(params, dropout_cost, lr_decay, 1e-6, sqr_norm_lim)\n \n #shuffle dataset and assign to mini batches. if dataset size is not a multiple of mini batches, replicate \n #extra data (at random)\n np.random.seed(3435)\n if datasets[0].shape[0] % batch_size > 0:\n extra_data_num = batch_size - datasets[0].shape[0] % batch_size\n random_index = np.random.permutation(np.arange(datasets[0].shape[0])) \n random_index.astype('int32')\n train_set = datasets[0][random_index,:]\n train_set_weights = datasets_weights[0][random_index,:,:]\n extra_data = train_set[:extra_data_num]\n extra_data_weights = train_set_weights[:extra_data_num]\n new_data=np.append(datasets[0],extra_data,axis=0)\n new_data_weights = np.append(datasets_weights[0],extra_data_weights,axis = 0)\n else:\n new_data = datasets[0]\n new_data_weights = datasets_weights[0]\n random_index = np.random.permutation(np.arange(new_data.shape[0])) \n random_index.astype('int32')\n new_data = new_data[random_index]\n new_data_weights = new_data_weights[random_index]\n n_batches = new_data.shape[0]/batch_size\n n_train_batches = int(np.round(n_batches*0.9))\n \n test_set_x = np.asarray(datasets[1][:,:img_h] ,\"float32\")\n test_set_x_topic = np.asarray(datasets_weights[1][:,:img_h,:] ,\"float32\")\n test_set_y = np.asarray(datasets[1][:,-1],\"int32\")\n if use_valid_set:\n train_set = new_data[:n_train_batches*batch_size,:]\n train_set_weights = new_data_weights[:n_train_batches*batch_size,:,:]\n val_set = new_data[n_train_batches*batch_size:,:]\n val_set_weights = new_data_weights[n_train_batches*batch_size:,:,:] \n train_set_x, train_set_x_topic, train_set_y = shared_dataset((train_set[:,:img_h],train_set_weights,train_set[:,-1]))\n val_set_x, val_set_x_topic, val_set_y = shared_dataset((val_set[:,:img_h],val_set_weights,val_set[:,-1]))\n n_val_batches = n_batches - n_train_batches\n val_model = theano.function([index], classifier.errors(y),\n givens={\n x: val_set_x[index * batch_size: (index + 1) * batch_size],\n x_topic: val_set_x_topic[index * batch_size: (index + 1) * batch_size],\n y: val_set_y[index * batch_size: (index + 1) * batch_size]})\n else:\n train_set = new_data[:,:] \n train_set_x, train_set_x_topic, train_set_y = shared_dataset((train_set[:,:img_h],train_set_weights,train_set[:,-1])) \n \n #make theano functions to get train/val/test errors\n test_model = theano.function([index], classifier.errors(y),\n givens={\n x: train_set_x[index * batch_size: (index + 1) * batch_size],\n x_topic: train_set_x_topic[index * batch_size: (index + 1) * batch_size],\n y: train_set_y[index * batch_size: (index + 1) * batch_size]}) \n train_model = theano.function([index], cost, updates=grad_updates,\n givens={\n x: train_set_x[index*batch_size:(index+1)*batch_size],\n x_topic: train_set_x_topic[index * batch_size: (index + 1) * batch_size],\n y: train_set_y[index*batch_size:(index+1)*batch_size]}) \n test_pred_layers = []\n test_size = test_set_x.shape[0]\n \n \n\n test_layer0_input_words = Words[T.cast(x.flatten(),dtype=\"int32\")].reshape((test_size,1,img_h,Words.shape[1])) \n test_layer0_inputs_topics = []\n for i in range(num_topics):\n sin_topic = x_topic[:,:,i]\n Topic = Topics[i].reshape((1,Topics[i].shape[0]))\n weights = sin_topic.flatten()\n weights = weights.reshape((weights.shape[0],1))\n test_layer0_inputs_topics.append(T.dot(weights, Topic))\n test_layer0_input_topics = T.concatenate(test_layer0_inputs_topics,1)\n test_layer0_input_topics = test_layer0_input_topics.reshape((test_size,1,img_h,num_topics*topic_dim))\n test_layer0_input = T.concatenate([test_layer0_input_words,test_layer0_input_topics],3) \n\n\n\n for conv_layer in conv_layers:\n test_layer0_output = conv_layer.predict(test_layer0_input, test_size)\n test_pred_layers.append(test_layer0_output.flatten(2))\n test_layer1_input = T.concatenate(test_pred_layers, 1)\n test_y_pred = classifier.predict(test_layer1_input)\n\n test_error = T.mean(T.neq(test_y_pred, y))\n test_model_all = theano.function([x,x_topic,y], test_error) \n \n #start training over mini-batches\n print '... training'\n epoch = 0\n best_val_perf = 0\n val_perf = 0\n test_perf = 0 \n cost_epoch = 0 \n while (epoch < n_epochs): \n epoch = epoch + 1\n if shuffle_batch:\n for minibatch_index in np.random.permutation(range(n_train_batches)):\n cost_epoch = train_model(minibatch_index)\n set_zero(zero_vec)\n else:\n for minibatch_index in xrange(n_train_batches):\n cost_epoch = train_model(minibatch_index) \n set_zero(zero_vec)\n train_losses = [test_model(i) for i in xrange(n_train_batches)]\n train_perf = 1 - np.mean(train_losses)\n if use_valid_set:\n val_losses = [val_model(i) for i in xrange(n_val_batches)]\n val_perf = 1- np.mean(val_losses)\n\n if val_perf >= best_val_perf:\n params_conv = [] \n params_output = {}\n test_loss = test_model_all(test_set_x,test_set_x_topic, test_set_y) \n test_perf = 1- test_loss \n best_val_perf = val_perf \n for conv_layer in conv_layers:\n params_conv.append(conv_layer.get_params())\n params_output = classifier.get_params()\n word_vec = Words.get_value()\n Topic_vec = Topics.get_value()\n else :\n val_perf = 0 \n if show_states:\n print('epoch %i, train perf %f %%, val perf %f' % (epoch, train_perf * 100., val_perf*100.))\n \n if not use_valid_set:\n params_conv = [] \n params_output = {}\n test_loss = test_model_all(test_set_x,test_set_x_topic, test_set_y) \n test_perf = 1- test_loss \n \n for conv_layer in conv_layers:\n params_conv.append(conv_layer.get_params())\n params_output = classifier.get_params()\n word_vec = Words.get_value()\n Topic_vec = Topics.get_value() \n \n return test_perf, [params_conv, params_output, word_vec,Topic_vec]", "def __init__(self, n_filters = 64,\n n_kernels = 3,\n n_outputs = 10,\n inp_shape = (28,28),\n residual=True,\n regularizer = None,\n intializer = None,\n use_pool= False,\n use_dropout = False,\n use_batchnorm = False\n ):\n super(CNNModel, self).__init__()\n self.conv_dim = len(inp_shape)-1\n self.n_filters = n_filters\n self.initializer = intializer\n self.n_kernels = n_kernels\n self.projection = 3\n self.n_outputs = n_outputs\n self.num_layers = 1\n self.inp_shape = inp_shape\n self.regularizer = regularizer\n self.use_pool = use_pool\n self.residual = residual\n self.use_dropout = use_dropout\n self.use_batchnorm = use_batchnorm\n\n kernel_initializer = initializers.RandomNormal(mean=0.0, stddev=0.05)\n\n if self.conv_dim == 1:\n self.input_layer = layers.Conv1D(self.n_filters, (self.projection),\n activation = \"linear\",\n input_shape = self.inp_shape,\n name ='cnn_input',\n padding = 'same',\n kernel_regularizer = self.regularizer,\n bias_regularizer = self.regularizer,\n kernel_initializer=kernel_initializer,\n bias_initializer=initializers.get(\"zeros\")\n )\n self.output_layer = layers.Conv1D(self.n_kernels, (self.projection),\n activation=\"linear\",\n input_shape=(None, self.inp_shape[0], self.n_filters),\n name='cnn_output',\n padding = 'same',\n kernel_regularizer=self.regularizer,\n bias_regularizer=self.regularizer,\n kernel_initializer=kernel_initializer,\n bias_initializer=initializers.get(\"zeros\")\n )\n if self.use_pool:\n self.pool = layers.MaxPool1D()\n elif self.conv_dim == 2:\n self.input_layer = layers.Conv2D(self.n_filters, (self.projection,self.projection),\n activation=\"linear\",\n input_shape=self.inp_shape,\n name='cnn_input',\n padding = 'same',\n kernel_regularizer=self.regularizer,\n bias_regularizer=self.regularizer,\n kernel_initializer=kernel_initializer,\n bias_initializer=initializers.get(\"zeros\")\n )\n self.output_layer = layers.Conv2D(self.n_kernels, (self.projection, self.projection),\n activation= \"linear\",\n input_shape=(None, self.inp_shape[0],self.inp_shape[1], self.n_filters),\n name=\"cnn_output\",\n padding = 'same',\n kernel_regularizer=self.regularizer,\n bias_regularizer=self.regularizer,\n kernel_initializer=kernel_initializer,\n bias_initializer=initializers.get(\"zeros\")\n )\n if self.use_pool:\n self.pool = layers.MaxPool2D()\n self.list_cnn = [self.input_layer]\n self.flatten = layers.Flatten()\n\n #compute input shape after flatten for the dense layer\n if not self.use_pool:\n self.class_inp = np.prod(self.inp_shape[:-1])*self.n_kernels\n else:\n self.class_inp = np.prod(self.inp_shape[:-1])*self.n_kernels//(2**self.conv_dim)\n # self.classify = MyDenseLayer(\n # self.n_outputs,shape = (None,self.class_inp),\n # layer_name = 'classify',\n # initializer = \"RandomNormal\")\n self.classify = layers.Dense(units = self.n_outputs,\n activation = 'softmax', use_bias = True,\n input_shape = self.class_inp,\n kernel_initializer = kernel_initializer, bias_initializer=initializers.get(\"zeros\"),\n name = 'classification_layer')", "def __init__(self, depth=7, latent_size=512, use_eql=True, gpu_parallelize=False):\r\n from torch.nn import ModuleList, Conv2d\r\n from CustomLayers import GenGeneralConvBlock, GenInitialBlock, _equalized_conv2d\r\n\r\n super().__init__()\r\n\r\n assert latent_size != 0 and ((latent_size & (latent_size - 1)) == 0), \\\r\n \"latent size not a power of 2\"\r\n if depth >= 4:\r\n assert latent_size >= np.power(2, depth - 4), \"latent size will diminish to zero\"\r\n\r\n # state of the generator:\r\n self.use_eql = use_eql\r\n self.depth = depth\r\n self.latent_size = latent_size\r\n\r\n # register the modules required for the Generator Below ...\r\n # create the ToRGB layers for various outputs:\r\n if self.use_eql:\r\n def to_rgb(in_channels):\r\n return _equalized_conv2d(in_channels, 1, (1, 1), bias=True)\r\n else:\r\n def to_rgb(in_channels):\r\n return Conv2d(in_channels, 1, (1, 1), bias=True)\r\n\r\n # create a module list of the other required general convolution blocks\r\n self.layers = ModuleList([GenInitialBlock(self.latent_size, use_eql=self.use_eql)])\r\n self.rgb_converters = ModuleList([to_rgb(self.latent_size)])\r\n\r\n # create the remaining layers\r\n for i in range(self.depth - 1):\r\n if i <= 2:\r\n layer = GenGeneralConvBlock(self.latent_size, self.latent_size,\r\n use_eql=self.use_eql)\r\n rgb = to_rgb(self.latent_size)\r\n else:\r\n layer = GenGeneralConvBlock(\r\n int(self.latent_size // np.power(2, i - 3)),\r\n int(self.latent_size // np.power(2, i - 2)),\r\n use_eql=self.use_eql\r\n )\r\n rgb = to_rgb(int(self.latent_size // np.power(2, i - 2)))\r\n self.layers.append(layer)\r\n self.rgb_converters.append(rgb)", "def train(trial_num, image_num, filter_num, filter_size, input_size, channel_num, pooling_rate, left_upper_padding, right_lower_padding):\n\n input_batch_num = 1\n batch_num = 2\n\n init_filters = np.array(np.random.normal(size=filter_num * channel_num *\n filter_size*filter_size), dtype=\"float32\")\n #init_filters = np.array([1.0] * filter_num * channel_num * filter_size * filter_size, dtype=\"float32\")\n init_filters = 0.01 * init_filters.reshape(filter_num, channel_num*filter_size*filter_size)\n\n init_hbias = np.array([-0.1] * filter_num, dtype=\"float32\").reshape(filter_num, 1)\n\n init_vbias = np.array([0.0] * channel_num, dtype=\"float32\").reshape(channel_num, 1)\n\n libnvcrbm = __import__(\"nvcrbm\")\n cur_filters = libnvcrbm.init(filter_num, filter_size, \n input_batch_num, input_size, channel_num,\n pooling_rate, left_upper_padding, right_lower_padding,\n init_filters, init_hbias, init_vbias)\n\n imgs = cPickle.load(open(\"../data/kyoto_large_train.pkl\", \"r\"))\n img_size = imgs[0].shape[0]\n\n for trial_idx in xrange(trial_num):\n for img_idx in xrange(image_num):\n for batch_idx in xrange(batch_num):\n row_idx = np.arange(0, input_size) + np.random.random_integers(img_size - 2 * filter_size - input_size) + filter_size - 1\n col_idx = np.arange(0, input_size) + np.random.random_integers(img_size - 2 * filter_size - input_size) + filter_size - 1\n #row_idx = np.arange(0, input_size) + 200\n #col_idx = np.arange(0, input_size) + 200\n\n batch_data = imgs[img_idx][row_idx][:,col_idx]\n batch_data = batch_data - batch_data.mean()\n batch_data = np.asarray(batch_data.reshape(1, input_size * input_size), dtype=\"float32\")\n \n libnvcrbm.run_batch(trial_idx, img_idx, batch_idx, batch_data)\n\n libnvcrbm.print_result()\n cur_filters = libnvcrbm.get_gpu_filters()\n dump_filter_image(cur_filters, \"../data/kyoto/filters/trial_%d.png\" % trial_idx)\n\n first_layer = {}\n first_layer[\"filters\"] = cur_filters\n first_layer[\"bias\"] = libnvcrbm.get_gpu_hbias()\n cPickle.dump(first_layer, open(\"../data/first_layer.dat\", \"w+\"))", "def __init__(self):\n super(DLStudio.ExperimentsWithCIFAR.Net2, self).__init__()\n self.relu = nn.ReLU()\n strides = []\n patch_size = 2\n ## conv1:\n out_ch, ker_size, conv_stride, pool_stride = 128,5,1,2\n self.conv1 = nn.Conv2d(3, out_ch, (ker_size,ker_size), padding=(ker_size-1)//2) \n self.pool1 = nn.MaxPool2d(patch_size, pool_stride) \n strides += (conv_stride, pool_stride)\n ## conv2:\n in_ch = out_ch\n out_ch, ker_size, conv_stride, pool_stride = 128,3,1,2\n self.conv2 = nn.Conv2d(in_ch, out_ch, ker_size, padding=(ker_size-1)//2)\n self.pool2 = nn.MaxPool2d(patch_size, pool_stride) \n strides += (conv_stride, pool_stride)\n ## conv3: \n ## meant for repeated invocation, must have same in_ch, out_ch and strides of 1\n in_ch = out_ch\n out_ch, ker_size, conv_stride, pool_stride = in_ch,2,1,1\n self.conv3 = nn.Conv2d(in_ch, out_ch, ker_size, padding=1)\n self.pool3 = nn.MaxPool2d(patch_size, pool_stride) \n# strides += (conv_stride, pool_stride)\n ## figure out the number of nodes needed for entry into fc:\n in_size_for_fc = out_ch * (32 // np.prod(strides)) ** 2 ## (A)\n self.in_size_for_fc = in_size_for_fc\n self.fc1 = nn.Linear(in_size_for_fc, 150)\n self.fc2 = nn.Linear(150, 100)\n self.fc3 = nn.Linear(100, 10)", "def build_bisenet(inputs, num_classes):\n\n ### The spatial path\n ### The number of feature maps for each convolution is not specified in the paper\n ### It was chosen here to be equal to the number of feature maps of a classification\n ### model at each corresponding stage\n # spatial_net = fluid.layers.resize_bilinear(inputs, [Image_Height/8, Image_Width/8])\n # print('spatial_net_1',spatial_net)\n\n ## spatial path\n spatial_net = ConvBlock(inputs, num_filters=64, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=128, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=256, kernel_size=3, stride=2)\n # print(\"spatial_net:\", spatial_net)\n\n # spatial_net = fluid.layers.resize_bilinear(spatial_net, [Image_Height/8, Image_Width/8])\n # print('spatial_net_2',spatial_net)\n ### Context path\n model = ResNet(is_test=False)\n # spatial_net = model.bottleneck_block1(inputs)\n end_points_16, end_points_32 = model.net(inputs)\n net_4 = AttentionRefinementModule(end_points_16, num_filters=512)\n net_5 = AttentionRefinementModule(end_points_32, num_filters=1024)\n global_channels = fluid.layers.reduce_mean(net_5, [2, 3], keep_dim=True)\n net_5_scaled = fluid.layers.elementwise_mul(net_5, global_channels, axis=0)\n\n ### Combining the paths\n net_4 = Upsample(net_4, scale=2)\n net_5_scaled = Upsample(net_5_scaled, scale=4)\n # print('net_4, net_5:', [net_4, net_5_scaled])\n # layers_concat = list()\n # layers_concat.append(spatial_net)\n ## layers_concat.append(net_4)\n # layers_concat.append(net_5_scaled)\n context_net = fluid.layers.concat([spatial_net, net_4, net_5_scaled], axis=1) #\n # context_net = fluid.layers.concat(input=layers_concat,axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([net_4, net_5_scaled], axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([spatial_net,context_net], axis=1)\n # print('context_net2',context_net)\n\n ### FFM\n # net = FeatureFusionModule(input_1=spatial_net, input_2=context_net, num_filters=num_classes)\n net = FeatureFusionModule(inputs=context_net, num_filters=num_classes)\n\n # print('net', net)\n\n ## [batch_zize, num_filters, 128, 64]\n\n ### Final upscaling and finish\n # net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 128])\n # print('conv2d_transpose', net)\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn1')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[128, 256])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn2')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 512])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn3')\n #net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[512, 1024])\n #net = batch_normalization(net, relu=True, name='conv2d_transpose_bn4')\n # print('net',net)\n net = fluid.layers.image_resize(net, out_shape=[512, 1024], resample='BILINEAR')\n\n net = fluid.layers.conv2d(net, num_classes, 1)\n return net", "def build_resnet101(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')\n conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, use_batch_norm)\n conv1_feats = nonlinear(conv1_feats, 'relu')\n pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')\n\n res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, use_batch_norm, 64, 1)\n res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, use_batch_norm, 64)\n res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, use_batch_norm, 64)\n \n res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, use_batch_norm, 128) \n temp = res3a_feats\n for i in range(1, 4):\n temp = self.basic_block2(temp, 'res3b'+str(i), 'bn3b'+str(i), is_train, use_batch_norm, 128)\n res3b3_feats = temp\n \n res4a_feats = self.basic_block(res3b3_feats, 'res4a', 'bn4a', is_train, use_batch_norm, 256)\n temp = res4a_feats\n for i in range(1, 23):\n temp = self.basic_block2(temp, 'res4b'+str(i), 'bn4b'+str(i), is_train, use_batch_norm, 256)\n res4b22_feats = temp\n\n res5a_feats = self.basic_block(res4b22_feats, 'res5a', 'bn5a', is_train, use_batch_norm, 512)\n res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, use_batch_norm, 512)\n res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, use_batch_norm, 512)\n\n res5c_feats_flat = tf.reshape(res5c_feats, [self.batch_size, 49, 2048])\n self.conv_feats = res5c_feats_flat\n self.conv_feat_shape = [49, 2048]\n self.num_ctx = 49 \n self.dim_ctx = 2048\n\n self.imgs = imgs\n self.is_train = is_train", "def setup(args):\n cfg = get_cfg()\n\n cfg.merge_from_file(model_zoo.get_config_file(\"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\"))\n cfg.merge_from_list(args.opts)\n\n # configs for training\n if args.small_vidor: # cfg.DATASETS.VIDOR.SIZE == 'small':\n cfg.DATASETS.TRAIN = (\"vidor_small_train\",)\n elif args.small_vidor_10imgs: # cfg.DATASETS.VIDOR.SIZE == 'small-10imgs':\n cfg.DATASETS.TRAIN = (\"vidor_small_10imgs_train\",)\n else:\n cfg.DATASETS.TRAIN = (\"vidor_large_train\",)\n # cfg.DATALOADER.NUM_WORKERS = 2\n if not args.eval_only:\n cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(\"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\") # Let training initialize from model zoo\n factor = 4\n cfg.SOLVER.IMS_PER_BATCH = 16 * factor\n cfg.SOLVER.BASE_LR = 0.0001 * factor # finetune using 10x smaller base_lr\n cfg.SOLVER.MAX_ITER = 270000 // factor \n cfg.SOLVER.STEPS = [210000 // factor, 250000 // factor]\n # cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # default: 512\n cfg.MODEL.ROI_HEADS.NUM_CLASSES = 78\n\n # configs for testing\n # cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, \"model_final.pth\")\n if args.small_vidor: # cfg.DATASETS.VIDOR.SIZE == 'small':\n cfg.DATASETS.TEST = (\"vidor_small_val\",)\n elif args.small_vidor_10imgs: # cfg.DATASETS.VIDOR.SIZE == 'small-10imgs':\n cfg.DATASETS.TEST = (\"vidor_small_10imgs_val\",)\n else:\n cfg.DATASETS.TEST = (\"vidor_large_val\",)\n # cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5\n\n # cfg.OUTPUT_DIR = './output/train_vidor_with_pseudo_labels'\n \n \n if not args.eval_only:\n os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)\n cfg.freeze()\n default_setup(cfg, args)\n return cfg", "def define(self, optimizer = Adam(lr=1e-5)): \n \n self.optimizer = optimizer\n\n model = Sequential()\n\n #Layer 1\n model.add(Conv2D( filters = 96, \n kernel_size = (11,11), \n strides = 4, \n padding = 'same', \n activation = 'relu', \n input_shape = (224, 224, 3), \n kernel_initializer = 'he_normal'))\n model.add(MaxPooling2D( pool_size = (3,3), \n strides = (2,2), \n padding= 'same', \n data_format = None)) # overlapping pooling\n #Layer 2\n model.add(Conv2D( filters = 256, \n kernel_size = (5,5), \n strides = 1, \n padding = 'same', \n activation = 'relu', \n kernel_initializer = 'he_normal'))\n model.add(MaxPooling2D( pool_size = (3,3), \n strides = (2,2), \n padding= 'same', \n data_format = None)) \n #Layer 3\n model.add(Conv2D( filters = 384, \n kernel_size = (3,3), \n strides = 1, padding = 'same', \n activation = 'relu', kernel_initializer = 'he_normal'))\n #Layer 4\n model.add(Conv2D( filters = 384, \n kernel_size = (3,3), \n strides = 1, padding = 'same', \n activation = 'relu', \n kernel_initializer = 'he_normal'))\n #Layer 5\n model.add(Conv2D( filters = 256, \n kernel_size = (3,3), \n strides = 1, padding = 'same', \n activation = 'relu', \n kernel_initializer = 'he_normal'))\n #Layer 6\n model.add(MaxPooling2D( pool_size = (3,3), \n strides = (2,2), \n padding= 'same', \n data_format = None))\n \n #Layer 7\n model.add(Flatten())\n \n #Layer 8\n model.add(Dense( units = 4096, activation = 'relu'))\n model.add(Dense( units = 1024, activation = 'relu'))\n model.add(Dense( units = 512, activation = 'relu'))\n model.add(Dense( units = 256, activation = 'relu'))\n model.add(Dense( units = 128, activation = 'relu'))\n \n #Layer end\n model.add(Dense( units = 3, activation = 'softmax'))\n model.summary()\n \n self.model = model", "def __init__(self,\n num_class=2,\n layer_nums=(3, 5, 5),\n layer_strides=(2, 2, 2),\n num_filters=(128, 128, 256),\n upsample_strides=(1, 2, 4),\n num_upsample_filters=(256, 256, 256),\n num_input_features=128,\n num_anchor_per_loc=2,\n use_groupnorm=False,\n num_groups=32,\n box_code_size=7,\n num_direction_bins=2):\n super(RPN, self).__init__()\n self._num_anchor_per_loc = num_anchor_per_loc\n self._box_code_size=box_code_size\n self._num_class=num_class\n self._num_direction_bins=num_direction_bins\n assert len(layer_nums) == 3\n assert len(layer_strides) == len(layer_nums)\n assert len(num_filters) == len(layer_nums)\n assert len(upsample_strides) == len(layer_nums)\n assert len(num_upsample_filters) == len(layer_nums)\n upsample_strides=[int(i) for i in upsample_strides]\n\n factors = []\n for i in range(len(layer_nums)):\n assert int(np.prod(\n layer_strides[:i + 1])) % upsample_strides[i] == 0\n factors.append(\n np.prod(layer_strides[:i + 1]) // upsample_strides[i])\n assert all([x == factors[0] for x in factors])\n\n # note that when stride > 1, conv2d with same padding isn't\n # equal to pad-conv2d. we should use pad-conv2d.\n block2_input_filters = num_filters[0]\n if use_groupnorm:\n BatchNorm2d = change_default_args(\n num_groups=num_groups, eps=1e-3)(GroupNorm)\n else:\n BatchNorm2d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm2d)\n\n self.block1 = Sequential(\n nn.ZeroPad2d(1),\n nn.Conv2d(num_input_features, num_filters[0], 3,\n stride=layer_strides[0],bias=False),\n BatchNorm2d(num_filters[0]),\n nn.ReLU(),)\n for i in range(layer_nums[0]):\n self.block1.add(\n nn.Conv2d(num_filters[0], num_filters[0], 3,padding=1,bias=False))\n self.block1.add(BatchNorm2d(num_filters[0]))\n self.block1.add(nn.ReLU())\n self.deconv1 = Sequential(\n nn.ConvTranspose2d(num_filters[0],num_upsample_filters[0],\n upsample_strides[0],stride=upsample_strides[0],bias=False),\n BatchNorm2d(num_upsample_filters[0]),\n nn.ReLU(),)\n self.block2 = Sequential(\n nn.ZeroPad2d(1),\n nn.Conv2d(block2_input_filters,num_filters[1],3,\n stride=layer_strides[1],bias=False),\n BatchNorm2d(num_filters[1]),\n nn.ReLU(),)\n for i in range(layer_nums[1]):\n self.block2.add(\n nn.Conv2d(num_filters[1], num_filters[1], 3, padding=1,bias=False))\n self.block2.add(BatchNorm2d(num_filters[1]))\n self.block2.add(nn.ReLU())\n self.deconv2 = Sequential(\n nn.ConvTranspose2d(num_filters[1],num_upsample_filters[1],\n upsample_strides[1],stride=upsample_strides[1],bias=False),\n BatchNorm2d(num_upsample_filters[1]),\n nn.ReLU(),)\n self.block3 = Sequential(\n nn.ZeroPad2d(1),\n nn.Conv2d(num_filters[1], num_filters[2], 3, stride=layer_strides[2],bias=False),\n BatchNorm2d(num_filters[2]),\n nn.ReLU(),)\n for i in range(layer_nums[2]):\n self.block3.add(nn.Conv2d(num_filters[2], num_filters[2], 3, padding=1,bias=False))\n self.block3.add(BatchNorm2d(num_filters[2]))\n self.block3.add(nn.ReLU())\n self.deconv3 = Sequential(\n nn.ConvTranspose2d(\n num_filters[2],num_upsample_filters[2],\n upsample_strides[2],stride=upsample_strides[2],bias=False),\n BatchNorm2d(num_upsample_filters[2]),\n nn.ReLU(),)\n\n num_cls = num_anchor_per_loc * num_class\n self.conv_cls = nn.Conv2d(sum(num_upsample_filters), num_cls, 1)\n self.conv_box = nn.Conv2d(sum(num_upsample_filters), num_anchor_per_loc * box_code_size, 1)\n self.conv_dir_cls = nn.Conv2d(sum(num_upsample_filters),num_anchor_per_loc * num_direction_bins, 1)", "def runBestNet(train_dl, test_dl, best_test, outputPath , nfold , class_len , net, feature_name , test_len ):\n class_test_correct = np.zeros(class_len)\n class_test_total = np.zeros(class_len)\n class_train_correct = np.zeros(class_len)\n class_train_total = np.zeros(class_len)\n correct_pred = []\n Y_predit= []\n Y_true = []\n L = []#[np.array([\"Name\", \"Labels\"] + [\"Proba class \"+ str(i) for i in range(class_len)])]\n best_value = np.zeros((1,1))\n net.load_state_dict(torch.load(str(outputPath)+\"best_net\"))\n net.eval()\n First_train = True\n for i,batch in enumerate(tqdm(train_dl)):\n x = batch[0]\n labels = batch[1]\n if torch.cuda.is_available():\n x = x.cuda()\n labels = labels.cuda()\n encoder_out, decoder_out = net(x)\n with torch.no_grad():\n c = (encoder_out.max(1)[1]==labels).squeeze()\n for i in range(len(x)):\n \n label = int(labels[i].item())\n if c.dim() ==0 : \n \n class_train_correct[label] += int(c.item())\n else : \n class_train_correct[label] += int(c[i].item())\n class_train_total[label] += 1\n if First_train:\n data_decoded_train = torch.cat((decoder_out,labels.view(-1,1)), dim = 1)\n data_encoder_train = torch.cat((encoder_out,labels.view(-1,1)), dim = 1)\n \n First_train=False\n else:\n \n tmp1 = torch.cat((decoder_out,labels.view(-1,1)), dim = 1)\n data_decoded_train = torch.cat((data_decoded_train,tmp1),dim= 0)\n \n tmp2 = torch.cat((encoder_out,labels.view(-1,1)), dim = 1)\n data_encoder_train = torch.cat((data_encoder_train ,tmp2 ),dim= 0)\n \n First = True\n for i,batch in enumerate(tqdm(test_dl)):\n with torch.no_grad():\n x = batch[0]\n labels = batch[1]\n index = batch[2]\n if torch.cuda.is_available():\n x = x.cuda()\n labels = labels.cuda() \n encoder_out, decoder_out = net(x)\n m = nn.Softmax(dim=1)\n L.append( [index[0], labels.item()] + m(encoder_out).detach().cpu().numpy().tolist()[0])\n Y_predit.append(encoder_out.max(1)[1].item())\n Y_true.append(labels.item())\n c = (encoder_out.max(1)[1]==labels).squeeze()\n for i in range(len(x)):\n label = int(labels[i].item())\n if c.dim() == 0 : \n class_test_correct[label] += int(c.item())\n if c.item() : \n correct_pred.append(index[0])\n else : \n class_test_correct[label] += int(c[i].item())\n if c[i].item() : \n correct_pred.append(index[i][0])\n class_test_total[label] += 1 \n \n \n\n if First:\n data_decoded = torch.cat((decoder_out,labels.view(-1,1)), dim = 1)\n data_encoder = torch.cat((encoder_out,labels.view(-1,1)), dim = 1)\n \n First=False\n else:\n \n tmp1 = torch.cat((decoder_out,labels.view(-1,1)), dim = 1)\n data_decoded = torch.cat((data_decoded,tmp1),dim= 0)\n \n tmp2 = torch.cat((encoder_out,labels.view(-1,1)), dim = 1)\n data_encoder = torch.cat((data_encoder,tmp2 ),dim= 0)\n print(best_test , class_test_correct)\n \n if best_test != sum(class_test_correct):\n print(\"!!!!!!! Problem !!!!!!!\")\n class_train = (class_train_correct/class_train_total).reshape(1,-1) \n best_value[0] = sum(class_train_correct)/sum(class_train_total)\n class_train = np.hstack((best_value,class_train))\n class_test = (class_test_correct/class_test_total).reshape(1,-1)\n best_value[0] = sum(class_test_correct)/sum(class_test_total)\n class_test = np.hstack((best_value ,class_test)) \n\n \n try : \n if nfold !=0 : \n df = pd.read_csv('{}Labelspred_softmax.csv'.format(outputPath),sep=';', header = 0 )\n \n soft = pd.DataFrame(L)\n soft = pd.DataFrame(np.concatenate((df.values, soft.values[: , : ])), columns = [\"Name\", \"Labels\"] + [\"Proba class \"+ str(i) for i in range(class_len)])\n soft.to_csv('{}Labelspred_softmax.csv'.format(outputPath),sep=';',index=0)\n else : \n soft = pd.DataFrame(L, columns = [\"Name\", \"Labels\"] + [\"Proba class \"+ str(i) for i in range(class_len)])\n soft.to_csv('{}Labelspred_softmax.csv'.format(outputPath),sep=';',index=0)\n except FileNotFoundError:\n soft = pd.DataFrame(L, columns = [\"Name\", \"Labels\"] + [\"Proba class \"+ str(i) for i in range(class_len)])\n soft.to_csv('{}Labelspred_softmax.csv'.format(outputPath),sep=';',index=0)\n \n\n \n \n \n \n Lung_decoded = data_decoded.cpu().detach().numpy()\n Label = ['Label']+list(Lung_decoded[:,-1].astype(int)+1)\n Name = ['Name'] + [x+2 for x in range(test_len)]\n Label = np.vstack( (np.array(Name),np.array(Label)) )\n Lung = np.delete(Lung_decoded, -1, axis =1 )\n Lung = np.hstack( (feature_name.reshape(-1,1), Lung.T) )\n #Lung = np.vstack((Label, np.array( L).T, Lung))\n #res = pd.DataFrame(Lung)\n #res.to_csv('{}recondecoded_BestNet_{}.csv'.format(outputPath, str(run_model)),sep=';',index=0, header=0) \n print(\"-----------------------\")\n print(\"Saved file to \",str(outputPath))\n print(\"-----------------------\")\n normGenes = selectf(net.state_dict()['encoder.0.weight'] , feature_name)\n \n #cm = confusion_matrix(np.array(Lung_decoded[:,-1].astype(int)), Y_predit)\n #print(cm)\n #sn.heatmap(cm , annot=True)\n return data_encoder, data_decoded, class_train , class_test , normGenes, correct_pred, soft, Y_true, Y_predit, data_encoder_train, data_decoded_train", "def __init__(self, embed_size=256, finetune=False, cnn_type='resnet50',\n use_abs=False, no_imgnorm=False):\n super(EncoderImageFull, self).__init__()\n self.embed_size = embed_size\n self.no_imgnorm = no_imgnorm\n self.use_abs = use_abs\n\n # Load a pre-trained model\n model = get_model(name=cnn_type, num_classes=5607)\n model = torch.nn.DataParallel(model)\n model.to(\"cuda\")\n checkpoint = torch.load(\"/mnt/data2/betty/webvision_train/results/resnet50/5000classes_onemonth/model_best.tar\")\n model.load_state_dict(checkpoint['state_dict'])\n \n print(\"Successfully load the saved model at model_best.tar\") \n\n self.cnn = model\n\n\n # For efficient memory usage.\n for param in self.cnn.parameters():\n param.requires_grad = False\n\n # Replace the last fully connected layer of CNN with a new one\n \n if cnn_type.startswith('resnet'):\n self.fc = nn.Linear(self.cnn.module.fc.in_features, embed_size)\n self.cnn.module.fc = nn.Sequential()\n else:\n print(\"error in chosing the architecture\")\n return\n\n self.init_weights()", "def run_experiments() :\n #%%\n target_size=(32,32)\n g_specs = {\n \"batch_size\" : [ 30 , 60, 100 ],\n \"learning_rate\" : [ 0.0002, 0.0003, 0.0005 ],\n \"drop_out_rate\" : [ 0.2, 0.25, 0.3 ],\n \"rescale_mode\" : [ \"max_q\" , \"max\", \"\" ]\n }\n\n model_traits = MODEL_TRAITS[\"model2\"].copy()\n tt_obj = model_traits[\"trainer_tester_class\"]( model_traits )\n del model_traits[\"trainer_tester_class\"]\n\n cnt = 0\n for batchs, lrate, do_rate, resc_mode in product( g_specs[\"batch_size\"],\n g_specs[\"learning_rate\"],\n g_specs[\"drop_out_rate\"],\n g_specs[\"rescale_mode\"] ) :\n\n tt_obj.model_traits.update( {\"batch_size\" : batchs,\n \"learning_rate\" : lrate,\n \"rescale_mode\" : resc_mode,\n \"drop_out_rate\" : do_rate } )\n\n train_4d, train_gt = tu.make_4d_arrays( images_dir=\"images/train\",\n target_size=target_size )\n\n test_4d, test_gt = tu.make_4d_arrays( images_dir=\"images/test\",\n target_size=target_size )\n\n data = {\"train_4d\" : train_4d,\n \"test_4d\" : test_4d,\n \"train_y\" : train_gt,\n \"test_y\" : test_gt}\n\n valid_accu_log, train_accu_log = tt_obj.train( model_traits, data,\n logl=100 )\n idx_v = int(np.argmax( valid_accu_log))\n idx_t = int(np.argmax( train_accu_log))\n\n model_traits.update({\"valid_accu_log\" : valid_accu_log,\n \"train_accu_log\" : train_accu_log,\n \"best_valid\" : max(valid_accu_log),\n \"best_valid_at\" : idx_v,\n \"train_at_best_valid\" : train_accu_log[idx_v],\n \"best_train\" : max(train_accu_log),\n \"best_train_at\": idx_t })\n\n #print(cnt, pformat(model_traits) )\n print( \"%d : best_train = %.4f, best_valid = %.4f\" % \\\n (cnt, max(train_accu_log), max(valid_accu_log) ))\n\n with open( \"exp_results_%d.json\" % cnt,\n \"wt\" , encoding=\"utf8\" ) as f_out :\n print( json.dumps( model_traits ), file=f_out)\n\n\n cnt += 1\n #%%", "def IMPALA_resnet_head(input_shape, l2_weight=0.0):\n # Total number of layers in this resnet. Used to approximiately get the\n # FixUp initialization right\n TOTAL_RESIDUAL_BLOCKS = 6\n\n model = layers.Input(shape=input_shape)\n input_layer = model\n\n for i, (num_channels, num_blocks) in enumerate([[16, 2], [32, 2], [32, 2]]):\n model = layers.Conv2D(\n num_channels, kernel_size=(3, 3), strides=(1, 1), padding=\"same\", activation=None,\n kernel_regularizer=regularizers.l2(l2_weight)\n )(model)\n model = layers.MaxPool2D(pool_size=(3, 3), strides=(2, 2))(model)\n\n for j in range(num_blocks):\n block_input = model\n model = layers.ReLU()(model)\n model = layers.Conv2D(\n num_channels, kernel_size=(3, 3), strides=(1, 1), padding='same', activation=None,\n kernel_regularizer=regularizers.l2(l2_weight),\n kernel_initializer=keras.initializers.VarianceScaling(\n # Scaling is L^(-1/(2m - 2)) . In our case m = 2 (two layers in branch),\n # so our rescaling is L^(-1/2) = 1 / sqrt(L)\n scale=1 / np.sqrt(TOTAL_RESIDUAL_BLOCKS)\n )\n )(model)\n model = layers.ReLU()(model)\n model = layers.Conv2D(\n num_channels, kernel_size=(3, 3), strides=(1, 1), padding='same', activation=None,\n kernel_initializer=\"zero\", bias_initializer=\"zero\",\n kernel_regularizer=regularizers.l2(l2_weight)\n )(model)\n model = layers.add([model, block_input])\n\n model = layers.ReLU()(model)\n model = layers.Flatten()(model)\n model = layers.Dense(256, activation=\"relu\")(model)\n\n return input_layer, model", "def _make_conv_layers_bn_padding(self): ## 20 Convs, used for pretrained by IMAGE Net 1000 class\n conv = nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=64, kernel_size=7, stride=2, padding=3, bias=False), # padding=3 so, output is 224.\n nn.BatchNorm2d(64),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n\n nn.Conv2d(64, 192, 3, padding=1, bias=False),\n nn.BatchNorm2d(192),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(192, 128, 1, bias=False), ## kernel size = 1 이므로 padding = 0(defalut)\n nn.BatchNorm2d(128),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(128, 256, 3, padding=1, bias=False), \n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1, bias=False), \n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 512, 1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1, bias=False),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(1024, 512, 1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1, bias=False),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(1024, 512, 1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1, bias=False),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(0.1, inplace=True)\n )\n return conv", "def __init__(self):\n super(Encoder3, self).__init__()\n self.lblocks = nn.ModuleList(\n [\n nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n ),\n nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n ),\n nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n ),\n ]\n )\n\n self.blocks = nn.ModuleList(\n [\n nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n ),\n ]\n )", "def create(self):\n \n \"\"\" A solo prepressing reduction network in the head \"\"\"\n print(\"pre_reduction\")\n with tf.name_scope('pre_reduction'):\n conv1 = NW.conv(self.X, 7, 7, 64, 2, 2, name='conv1')\n pool1 = NW.max_pool(conv1, 3, 3, 2, 2, name='pool1')\n norm1 = NW.lrn(pool1, 2, 2e-05, 0.75, name='norm1')\n reduction2 = NW.conv(norm1, 1, 1, 64, 1, 1, name='reduction2')\n conv2 = NW.conv(reduction2, 3, 3, 192, 1, 1,name='conv2')\n norm2 = NW.lrn(conv2, 2, 2e-05, 0.75, name='norm2')\n pool2 = NW.max_pool(norm2, 3, 3, 2, 2, name='pool2')\n \n \"\"\" 1st inception layer group \"\"\"\n print(\"icp1\")\n with tf.name_scope('icp1'):\n # branch 0\n icp1_out0 = NW.conv(pool2, 1, 1, 64, 1, 1, name='icp1_out0')\n # branch 1\n icp1_reduction1 = NW.conv(pool2, 1, 1, 96, 1, 1, name='icp1_reduction1')\n icp1_out1 = NW.conv(icp1_reduction1, 3, 3, 128, 1, 1, name='icp1_out1')\n # branch 2\n icp1_reduction2 = NW.conv(pool2, 1, 1, 16, 1, 1, name='icp1_reduction2')\n icp1_out2 = NW.conv(icp1_reduction2, 5, 5, 32, 1, 1, name='icp1_out2')\n # branch 3\n icp1_pool = NW.max_pool(pool2, 3, 3, 1, 1, name='icp1_pool')\n icp1_out3 = NW.conv(icp1_pool, 1, 1, 32, 1, 1, name='icp1_out3')\n # concat\n icp2_in = NW.concat([icp1_out0,\n icp1_out1,\n icp1_out2,\n icp1_out3], 3, 'icp2_in')\n\n \"\"\" 2nd inception layer group \"\"\"\n print(\"icp2\")\n with tf.name_scope('icp2'):\n # branch 0\n icp2_out0 = NW.conv(icp2_in, 1, 1, 128, 1, 1, name='icp2_out0')\n # branch 1\n icp2_reduction1 = NW.conv(icp2_in, 1, 1, 128, 1, 1, name='icp2_reduction1')\n icp2_out1 = NW.conv(icp2_reduction1, 3, 3, 192, 1, 1, name='icp2_out1')\n # branch 2\n icp2_reduction2 = NW.conv(icp2_in, 1, 1, 32, 1, 1, name='icp2_reduction2')\n icp2_out2 = NW.conv(icp2_reduction2, 5, 5, 96, 1, 1, name='icp2_out2')\n # branch 3\n icp2_pool = NW.max_pool(icp2_in, 3, 3, 1, 1, name='icp2_pool')\n icp2_out3 = NW.conv(icp2_pool, 1, 1, 64, 1, 1, name='icp2_out3')\n # concat\n icp2_out = NW.concat([icp2_out0,\n icp2_out1,\n icp2_out2,\n icp2_out3], 3, 'icp2_out')\n \n \"\"\" 3rd inception layer group \"\"\"\n print(\"icp3\")\n with tf.name_scope('icp3'):\n icp3_in = NW.max_pool(icp2_out, 3, 3, 2, 2, name='icp3_in')\n # branch 0\n icp3_out0 = NW.conv(icp3_in, 1, 1, 192, 1, 1, name='icp3_out0')\n # branch 1\n icp3_reduction1 = NW.conv(icp3_in, 1, 1, 96, 1, 1, name='icp3_reduction1')\n icp3_out1 = NW.conv(icp3_reduction1, 3, 3, 208, 1, 1, name='icp3_out1')\n # branch 2\n icp3_reduction2 = NW.conv(icp3_in, 1, 1, 16, 1, 1, name='icp3_reduction2')\n icp3_out2 = NW.conv(icp3_reduction2, 5, 5, 48, 1, 1, name='icp3_out2')\n # branch 3\n icp3_pool = NW.max_pool(icp3_in, 3, 3, 1, 1, name='icp3_pool')\n icp3_out3 = NW.conv(icp3_pool, 1, 1, 64, 1, 1, name='icp3_out3')\n # concat\n icp3_out = NW.concat([icp3_out0,\n icp3_out1,\n icp3_out2,\n icp3_out3], 3, 'icp3_out')\n \n \"\"\" 1st classify branch \"\"\"\n with tf.name_scope('cls1'):\n cls1_pool = NW.avg_pool(icp3_out, 5, 5, 3, 3, padding='VALID', name='cls1_pool')\n cls1_reduction_pose = NW.conv(cls1_pool, 1, 1, 128, 1, 1, name='cls1_reduction_pose')\n cls1_fc1_pose = NW.fc(cls1_reduction_pose, 1024, name='cls1_fc1_pose')\n cls1_fc_pose_xy = NW.fc(cls1_fc1_pose, 2, relu=False, name='cls1_fc_pose_xy')\n cls1_fc_pose_ab = NW.fc(cls1_fc1_pose, 2, relu=False, name='cls1_fc_pose_ab')\n self.layers[\"cls1_fc_pose_xy\"] = cls1_fc_pose_xy\n self.layers[\"cls1_fc_pose_ab\"] = cls1_fc_pose_ab\n \n \"\"\" 4st inception layer group \"\"\"\n print(\"icp4\")\n with tf.name_scope('icp4'):\n # branch 0\n icp4_out0 = NW.conv(icp3_out, 1, 1, 160, 1, 1, name='icp4_out0')\n # branch 1\n icp4_reduction1 = NW.conv(icp3_out, 1, 1, 112, 1, 1, name='icp4_reduction1')\n icp4_out1 = NW.conv(icp4_reduction1, 3, 3, 224, 1, 1, name='icp4_out1')\n # branch 2\n icp4_reduction2 = NW.conv(icp3_out, 1, 1, 24, 1, 1, name='icp4_reduction2')\n icp4_out2 = NW.conv(icp4_reduction2, 5, 5, 64, 1, 1, name='icp4_out2')\n # branch 3\n icp4_pool = NW.max_pool(icp3_out, 3, 3, 1, 1, name='icp4_pool')\n icp4_out3 = NW.conv(icp4_pool, 1, 1, 64, 1, 1, name='icp4_out3')\n # concat\n icp4_out = NW.concat([icp4_out0,\n icp4_out1,\n icp4_out2,\n icp4_out3],3, name='icp4_out')\n\n \"\"\" 5st inception layer group \"\"\"\n print(\"icp5\")\n with tf.name_scope('icp5'):\n # branch 0\n icp5_out0 = NW.conv(icp4_out, 1, 1, 128, 1, 1, name='icp5_out0')\n # branch 1\n icp5_reduction1 = NW.conv(icp4_out, 1, 1, 128, 1, 1, name='icp5_reduction1')\n icp5_out1 = NW.conv(icp5_reduction1, 3, 3, 256, 1, 1, name='icp5_out1')\n # branch 2\n icp5_reduction2 = NW.conv(icp4_out,1, 1, 24, 1, 1, name='icp5_reduction2')\n icp5_out2 = NW.conv(icp5_reduction2, 5, 5, 64, 1, 1, name='icp5_out2')\n # branch 3\n icp5_pool = NW.max_pool(icp4_out,3, 3, 1, 1, name='icp5_pool')\n icp5_out3 = NW.conv(icp5_pool, 1, 1, 64, 1, 1, name='icp5_out3')\n # concat\n icp5_out = NW.concat([icp5_out0, \n icp5_out1, \n icp5_out2, \n icp5_out3], 3, name='icp5_out')\n \n \"\"\" 6st inception layer group \"\"\"\n print(\"icp6\")\n with tf.name_scope('icp6'):\n # branch 0\n icp6_out0 = NW.conv(icp5_out, 1, 1, 112, 1, 1, name='icp6_out0')\n # branch 1\n icp6_reduction1 = NW.conv(icp5_out, 1, 1, 144, 1, 1, name='icp6_reduction1')\n icp6_out1 = NW.conv(icp6_reduction1, 3, 3, 288, 1, 1, name='icp6_out1')\n # branch 2\n icp6_reduction2 = NW.conv(icp5_out, 1, 1, 32, 1, 1, name='icp6_reduction2')\n icp6_out2 = NW.conv(icp6_reduction2, 5, 5, 64, 1, 1, name='icp6_out2')\n # branch 3\n icp6_pool = NW.max_pool(icp5_out,3, 3, 1, 1, name='icp6_pool')\n icp6_out3 = NW.conv(icp6_pool, 1, 1, 64, 1, 1, name='icp6_out3')\n # concat\n icp6_out = NW.concat([icp6_out0,\n icp6_out1,\n icp6_out2,\n icp6_out3], 3, name='icp6_out')\n\n \"\"\" 2nd classify branch \"\"\"\n with tf.name_scope('cls2'):\n cls2_pool = NW.avg_pool(icp6_out, 5, 5, 3, 3, padding='VALID', name='cls2_pool')\n cls2_reduction_pose = NW.conv(cls2_pool, 1, 1, 128, 1, 1, name='cls2_reduction_pose')\n cls2_fc1 = NW.fc(cls2_reduction_pose, 1024, name='cls2_fc1')\n cls2_fc_pose_xy = NW.fc(cls2_fc1, 2, relu=False, name='cls2_fc_pose_xy')\n cls2_fc_pose_ab = NW.fc(cls2_fc1, 2, relu=False, name='cls2_fc_pose_ab')\n self.layers[\"cls2_fc_pose_xy\"] = cls2_fc_pose_xy\n self.layers[\"cls2_fc_pose_ab\"] = cls2_fc_pose_ab\n\n \"\"\" 7st inception layer group \"\"\"\n print(\"icp7\")\n with tf.name_scope('icp7'):\n # branch 0\n icp7_out0 = NW.conv(icp6_out, 1, 1, 256, 1, 1, name='icp7_out0')\n # branch 1\n icp7_reduction1 = NW.conv(icp6_out, 1, 1, 160, 1, 1, name='icp7_reduction1')\n icp7_out1 = NW.conv(icp7_reduction1, 3, 3, 320, 1, 1, name='icp7_out1')\n # branch 2\n icp7_reduction2 = NW.conv(icp6_out, 1, 1, 32, 1, 1, name='icp7_reduction2')\n icp7_out2 = NW.conv(icp7_reduction2, 5, 5, 128, 1, 1, name='icp7_out2')\n # branch 3\n icp7_pool = NW.max_pool(icp6_out, 3, 3, 1, 1, name='icp7_pool')\n icp7_out3 = NW.conv(icp7_pool, 1, 1, 128, 1, 1, name='icp7_out3')\n # concat\n icp7_out = NW.concat([icp7_out0,\n icp7_out1,\n icp7_out2,\n icp7_out3], 3, name='icp7_out')\n\n \"\"\" 8st inception layer group \"\"\"\n print(\"icp8\")\n with tf.name_scope('icp8'):\n icp8_in = NW.max_pool(icp7_out, 3, 3, 2, 2, name='icp8_in')\n # branch 0\n icp8_out0 = NW.conv(icp8_in, 1, 1, 256, 1, 1, name='icp8_out0')\n # branch 1\n icp8_reduction1 = NW.conv(icp8_in, 1, 1, 160, 1, 1, name='icp8_reduction1')\n icp8_out1 = NW.conv(icp8_reduction1, 3, 3, 320, 1, 1, name='icp8_out1')\n # branch 2\n icp8_reduction2 = NW.conv(icp8_in, 1, 1, 32, 1, 1, name='icp8_reduction2')\n icp8_out2 = NW.conv(icp8_reduction2, 5, 5, 128, 1, 1, name='icp8_out2')\n # branch 3\n icp8_pool = NW.max_pool(icp8_in, 3, 3, 1, 1, name='icp8_pool')\n icp8_out3 = NW.conv(icp8_pool, 1, 1, 128, 1, 1, name='icp8_out3')\n # concat\n icp8_out = NW.concat([icp8_out0,\n icp8_out1,\n icp8_out2,\n icp8_out3], 3, name='icp8_out')\n \n \"\"\" 9st inception layer group \"\"\"\n print(\"icp9\")\n with tf.name_scope('icp9'):\n # branch 0\n icp9_out0 = NW.conv(icp8_out, 1, 1, 384, 1, 1, name='icp9_out0')\n # branch 1\n icp9_reduction1 = NW.conv(icp8_out, 1, 1, 192, 1, 1, name='icp9_reduction1')\n icp9_out1 = NW.conv(icp9_reduction1, 3, 3, 384, 1, 1, name='icp9_out1')\n # branch 2\n icp9_reduction2 = NW.conv(icp8_out, 1, 1, 48, 1, 1, name='icp9_reduction2')\n icp9_out2 = NW.conv(icp9_reduction2, 5, 5, 128, 1, 1, name='icp9_out2')\n # branch 3\n icp9_pool = NW.max_pool(icp8_out, 3, 3, 1, 1, name='icp9_pool')\n icp9_out3 = NW.conv(icp9_pool, 1, 1, 128, 1, 1, name='icp9_out3')\n # concat\n icp9_out = NW.concat([icp9_out0,\n icp9_out1,\n icp9_out2,\n icp9_out3], 3, name='icp9_out')\n\n \"\"\" 3rd classify branch \"\"\"\n with tf.name_scope('cls3'):\n cls3_pool = NW.avg_pool(icp9_out, 7, 7, 1, 1, padding='VALID', name='cls3_pool')\n cls3_fc1_pose = NW.fc(cls3_pool, 2048, name='cls3_fc1_pose')\n cls3_fc_pose_xy = NW.fc(cls3_fc1_pose, 2, relu=False, name='cls3_fc_pose_xy')\n cls3_fc_pose_ab = NW.fc(cls3_fc1_pose, 2, relu=False, name='cls3_fc_pose_ab')\n self.layers[\"cls3_fc_pose_xy\"] = cls3_fc_pose_xy\n self.layers[\"cls3_fc_pose_ab\"] = cls3_fc_pose_ab", "def baseUNet(input_shape,conv_depth,n_classes,init_w,dropout):\n inputs = Input(input_shape)\n\n c1=Conv2D(conv_depth,\n (3,3),\n activation='relu',\n padding='same',\n kernel_initializer=init_w)(inputs)\n\n c1=Conv2D(conv_depth,\n (3,3),\n activation='relu',\n padding=\"same\",\n kernel_initializer=init_w)(c1)\n\n # pool down to next layer\n pool1 = MaxPooling2D((2,2),strides = (2,2))(c1)\n\n conv_depth *= 2\n\n # convolute down again\n conv2 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(pool1)\n\n conv2 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv2)\n \n # pool down again\n pool2 = MaxPooling2D((2,2),strides = (2,2))(conv2)\n\n conv_depth *= 2 \n\n # Convolution\n conv3 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(pool2)\n\n conv3 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv3)\n \n # pool down\n pool3 = MaxPooling2D((2,2),strides = (2,2))(conv3)\n\n conv_depth *= 2 \n # Convolution\n conv4 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(pool3)\n\n conv4 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv4)\n \n # pool down \n pool4 = MaxPooling2D((2,2),strides = (2,2))(conv4)\n\n conv_depth *=2 \n\n # Convolution\n conv5 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(pool4)\n\n conv5 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv5)\n\n drop = Dropout(dropout)(conv5)\n\n conv_depth /= 2\n conv_depth = int(conv_depth) \n # do upsampling\n up1 = UpSampling2D(size = (2,2))(drop)\n conv6 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(up1)\n \n # add in skip info\n cat1 = concatenate([conv4,conv6],axis = 3)\n conv6 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(cat1)\n\n conv6 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv6)\n\n conv_depth /= 2\n conv_depth = int(conv_depth)\n # do upsampling\n up2 = UpSampling2D(size = (2,2))(conv6)\n conv7 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(up2)\n \n # add in skip info\n cat2 = concatenate([conv3,conv7],axis = 3)\n conv7 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(cat2)\n\n conv7 = Conv2D(conv_depth,\n activation = 'relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv7)\n \n conv_depth /= 2\n conv_depth = int(conv_depth)\n # do upsampling\n up3 = UpSampling2D(size = (2,2))(conv7)\n conv8 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size=(3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(up3)\n \n # add in skip info\n cat3 = concatenate([conv2,conv8],axis = 3)\n conv8 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(cat3)\n\n conv8 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv8)\n \n conv_depth /= 2\n conv_depth = int(conv_depth)\n # do upsampling\n up4 = UpSampling2D(size = (2,2))(conv8)\n conv9 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(up4)\n \n # add in skip info\n cat4 = concatenate([c1,conv9],axis = 3)\n conv9 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(cat4)\n\n conv9 = Conv2D(conv_depth,\n activation ='relu',\n kernel_size = (3,3),\n strides = (1,1),\n padding = \"same\",\n kernel_initializer=init_w)(conv9)\n\n outputs = Conv2D(n_classes, 1, activation = 'softmax')(conv9)\n\n return outputs,inputs", "def mini_imagenet_tasksets(\n train_ways=5,\n train_samples=10,\n test_ways=5,\n test_samples=10,\n root='~/data',\n data_augmentation=None,\n device=None,\n **kwargs,\n):\n if data_augmentation is None:\n train_data_transforms = None\n test_data_transforms = None\n elif data_augmentation == 'normalize':\n train_data_transforms = Compose([\n lambda x: x / 255.0,\n ])\n test_data_transforms = train_data_transforms\n elif data_augmentation == 'lee2019':\n normalize = Normalize(\n mean=[120.39586422/255.0, 115.59361427/255.0, 104.54012653/255.0],\n std=[70.68188272/255.0, 68.27635443/255.0, 72.54505529/255.0],\n )\n train_data_transforms = Compose([\n ToPILImage(),\n RandomCrop(84, padding=8),\n ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),\n RandomHorizontalFlip(),\n ToTensor(),\n normalize,\n ])\n test_data_transforms = Compose([\n normalize,\n ])\n else:\n raise ValueError('Invalid data_augmentation argument.')\n\n train_dataset = l2l.vision.datasets.MiniImagenet(\n root=root,\n mode='train',\n download=True,\n )\n valid_dataset = l2l.vision.datasets.MiniImagenet(\n root=root,\n mode='validation',\n download=True,\n )\n test_dataset = l2l.vision.datasets.MiniImagenet(\n root=root,\n mode='test',\n download=True,\n )\n if device is None:\n train_dataset.transform = train_data_transforms\n valid_dataset.transform = test_data_transforms\n test_dataset.transform = test_data_transforms\n else:\n train_dataset = l2l.data.OnDeviceDataset(\n dataset=train_dataset,\n transform=train_data_transforms,\n device=device,\n )\n valid_dataset = l2l.data.OnDeviceDataset(\n dataset=valid_dataset,\n transform=test_data_transforms,\n device=device,\n )\n test_dataset = l2l.data.OnDeviceDataset(\n dataset=test_dataset,\n transform=test_data_transforms,\n device=device,\n )\n train_dataset = l2l.data.MetaDataset(train_dataset)\n valid_dataset = l2l.data.MetaDataset(valid_dataset)\n test_dataset = l2l.data.MetaDataset(test_dataset)\n\n train_transforms = [\n NWays(train_dataset, train_ways),\n KShots(train_dataset, train_samples),\n LoadData(train_dataset),\n RemapLabels(train_dataset),\n ConsecutiveLabels(train_dataset),\n ]\n valid_transforms = [\n NWays(valid_dataset, test_ways),\n KShots(valid_dataset, test_samples),\n LoadData(valid_dataset),\n ConsecutiveLabels(valid_dataset),\n RemapLabels(valid_dataset),\n ]\n test_transforms = [\n NWays(test_dataset, test_ways),\n KShots(test_dataset, test_samples),\n LoadData(test_dataset),\n RemapLabels(test_dataset),\n ConsecutiveLabels(test_dataset),\n ]\n\n _datasets = (train_dataset, valid_dataset, test_dataset)\n _transforms = (train_transforms, valid_transforms, test_transforms)\n return _datasets, _transforms", "def shallow_CNN(num_bands = None, k_1 = None, k_2 = None, k_3 = None):\n active = 'relu'\n active2 = 'tanh'\n active3 = 'linear'\n inp = Input(shape=(None, None, num_bands))\n# bn = BatchNormalization()(inp)\n l1 = Conv2D(64, kernel_size=k_1, activation= active, padding='same', kernel_initializer='he_normal' )(inp)\n l2 = Conv2D(48, kernel_size=k_2, activation=active, padding='same', kernel_initializer='he_normal')(l1)\n l3 = Conv2D(32, kernel_size=k_3, activation=active, padding='same', kernel_initializer='he_normal')(l2)\n l4 = Conv2D(1, kernel_size=k_3, activation=active2, padding='same', kernel_initializer='he_normal',name=\"details\")(l3)\n# l4= Conv2D(1, kernel_size=k_3, activation=active2, padding='same', kernel_initializer='he_normal')(l3)\n# inp2 = Input(shape=(None, None, 1))\n inp1 = Input(shape=(None, None, 1))\n out = Add(name=\"band\")([l4, inp1])\n out1 = Conv2D(1, kernel_size=k_3, activation=active3, padding='same', kernel_initializer='he_normal',name=\"struct\")(out)\n out2 = Conv2D(1, kernel_size=k_3, activation=active3, padding='same', kernel_initializer='he_normal',name=\"TV\")(out)\n model = Model([inp, inp1], [out, out1, out2], name='shallow_CNN')\n \n# out= Conv2D(1, kernel_size=k_3, activation='relu', padding='same', kernel_initializer='he_normal',name=\"nothing\")(out1)\n# model = Model(inp, l4, name='shallow_CNN')\n return model", "def setUp(self):\n # The short NSC used in this example\n self.net_nsc = [\n (1, 4, 0, 0, 0), # Layer 1: Identity(input)\n (2, 1, 1, 1, 0), # Layer 2: Convolution(Layer1)\n (3, 1, 3, 2, 0), # Layer 3: Convolution(Layer2)\n (4, 1, 1, 1, 0), # Layer 4: Convolution(Layer1)\n (5, 1, 5, 4, 0), # Layer 5: Convolution(Layer4)\n (6, 6, 0, 3, 5), # Layer 6: Concat(Layer3, Layer5)\n (7, 2, 3, 1, 0), # Layer 7: MaxPooling(Layer1)\n (8, 1, 1, 7, 0), # Layer 8: Convolution(Layer7)\n (9, 6, 0, 6, 8), # Layer 9: Concat(Layer6, Layer8)\n (10, 7, 0, 0, 0), # Layer 10: Terminal\n ]\n\n # Load training and eval data\n (train_data, train_labels), (eval_data, eval_labels) = \\\n tf.keras.datasets.mnist.load_data()\n\n # Fix the dataset\n self.train_data = normalize_dataset(dataset=train_data, baseline=255)\n self.train_labels = train_labels.astype(np.int32)\n\n self.eval_data = normalize_dataset(dataset=eval_data, baseline=255)\n self.eval_labels = eval_labels.astype(np.int32)\n\n # The batch size\n self.batch_size = 256\n\n # Workspace directory\n workspace_dir = \"./workspace\"\n self.training_dir = \"{workspace}/trainer_test\".format(\n workspace=workspace_dir\n )", "def make_model(self, inputs, is_training):\n with tf.variable_scope('ResNet50'):\n x = conv2d(inputs, 64, [7, 7], strides=[1, 2, 2, 1], name='conv1') # size 1/2\n x = bn(x, is_training)\n x = relu(x)\n x = max_pool(x, ksize=[1, 3, 3, 1], name='pool1') # size 1/4\n\n x = self.conv_block(x, [64, 64, 256], '2_1', is_training, s=1)\n x = self.identity_block(x, [64, 64, 256], '2_2', is_training)\n x = self.identity_block(x, [64, 64, 256], '2_3', is_training)\n\n x = self.conv_block(x, [128, 128, 512], '3_1', is_training)\n x = self.identity_block(x, [128, 128, 512], '3_2', is_training)\n x = self.identity_block(x, [128, 128, 512], '3_3', is_training)\n\n x = self.atrous_conv_block(x, [256, 256, 1024], '4_1', 2, is_training, s=1)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_2', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_3', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_4', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_5', 2, is_training)\n x = self.atrous_identity_block(x, [256, 256, 1024], '4_6', 2, is_training)\n\n x = self.atrous_conv_block(x, [512, 512, 2048], '5_1', 4, is_training, s=1)\n x = self.atrous_identity_block(x, [512, 512, 2048], '5_2', 4, is_training)\n x = self.atrous_identity_block(x, [512, 512, 2048], '5_3', 4, is_training)\n\n\n\n \"\"\"\n Astrous Pyrimid Pooling. Decoder\n \"\"\"\n with tf.variable_scope('ASPP'):\n feature_map_shape = x.get_shape().as_list()\n\n # global average pooling\n # feature 맵의 height, width를 평균을 낸다.\n feature_map = tf.reduce_mean(x, [1, 2], keepdims=True)\n\n feature_map = conv2d(feature_map, 256, [1, 1], name='gap_feature_map')\n feature_map = tf.image.resize_bilinear(feature_map, [feature_map_shape[1], feature_map_shape[2]])\n\n rate1 = conv2d(x, 256, [1, 1], name='rate1')\n rate6 = atrous_conv2d(x, 256, [3, 3], rate=6, name='rate6')\n rate12 = atrous_conv2d(x, 256, [3, 3], rate=12, name='rate12')\n rate18 = atrous_conv2d(x, 256, [3, 3], rate=18, name='rate18')\n\n concated = tf.concat([feature_map, rate1, rate6, rate12, rate18], axis=3)\n\n net = conv2d(concated, 256, [1, 1], name='net')\n\n logits = conv2d(net, self.N_CLASS, [1, 1], name='logits')\n logits = tf.image.resize_bilinear(logits, size=[self.RESIZE, self.RESIZE], name='out')\n\n pred = tf.argmax(logits, axis=3)\n pred = tf.expand_dims(pred, dim=3)\n\n return logits, pred", "def setUp(self):\n # The short NSC used in this example\n self.net_nsc = [\n (1, 4, 0, 0, 0), # Layer 1: Identity(input)\n (2, 1, 1, 1, 0), # Layer 2: Convolution(Layer1)\n (3, 1, 3, 2, 0), # Layer 3: Convolution(Layer2)\n (4, 1, 1, 1, 0), # Layer 4: Convolution(Layer1)\n (5, 1, 5, 4, 0), # Layer 5: Convolution(Layer4)\n (6, 6, 0, 3, 5), # Layer 6: Concat(Layer3, Layer5)\n (7, 2, 3, 1, 0), # Layer 7: MaxPooling(Layer1)\n (8, 1, 1, 7, 0), # Layer 8: Convolution(Layer7)\n (9, 6, 0, 6, 8), # Layer 9: Concat(Layer6, Layer8)\n (10, 7, 0, 0, 0), # Layer 10: Terminal\n ]\n\n # Load training and eval data\n (train_data, train_labels), (eval_data, eval_labels) = \\\n tf.keras.datasets.mnist.load_data()\n\n # Fix the dataset\n self.train_data = normalize_dataset(dataset=train_data, baseline=255)\n self.train_labels = train_labels.astype(np.int32)\n\n self.eval_data = normalize_dataset(dataset=eval_data, baseline=255)\n self.eval_labels = eval_labels.astype(np.int32)\n\n # The batch size\n self.batch_size = 256\n\n # Workspace directory\n workspace_dir = \"./workspace\"\n self.training_dir = \"{workspace}/trainer_test_earlystop\".format(\n workspace=workspace_dir\n )", "def __init__(self):\n super(Decoder_1m, self).__init__()\n self.lconvtwos = nn.ModuleList(\n [\n nn.Sequential(\n nn.Dropout(p=0.1),\n nn.Conv2d(128, 32, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n ]\n )\n\n self.convtwos = nn.ModuleList(\n [\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n ]\n )\n self.final = nn.Sequential(\n nn.Conv2d(64, 5, kernel_size=(1, 1), padding=0),\n nn.BatchNorm2d(5),\n nn.ReLU(inplace=True),\n nn.Conv2d(5, 1, kernel_size=(1, 1), padding=0),\n )", "def __init__(self, depth=7, feature_size=512, use_eql=True, gpu_parallelize=False):\r\n from torch.nn import ModuleList\r\n from CustomLayers import DisGeneralConvBlock, DisFinalBlock, _equalized_conv2d\r\n from torch.nn import Conv2d\r\n\r\n super().__init__()\r\n\r\n assert feature_size != 0 and ((feature_size & (feature_size - 1)) == 0), \\\r\n \"latent size not a power of 2\"\r\n if depth >= 4:\r\n assert feature_size >= np.power(2, depth - 4), \\\r\n \"feature size cannot be produced\"\r\n\r\n # create state of the object\r\n self.gpu_parallelize = gpu_parallelize\r\n self.use_eql = use_eql\r\n self.depth = depth\r\n self.feature_size = feature_size\r\n\r\n # create the fromRGB layers for various inputs:\r\n if self.use_eql:\r\n def from_rgb(out_channels):\r\n return _equalized_conv2d(1, out_channels, (1, 1), bias=True)\r\n else:\r\n def from_rgb(out_channels):\r\n return Conv2d(1, out_channels, (1, 1), bias=True)\r\n\r\n self.rgb_to_features = ModuleList()\r\n self.final_converter = from_rgb(self.feature_size // 2)\r\n\r\n # create a module list of the other required general convolution blocks\r\n self.layers = ModuleList()\r\n self.final_block = DisFinalBlock(self.feature_size, use_eql=self.use_eql)\r\n\r\n # create the remaining layers\r\n for i in range(self.depth - 1):\r\n if i > 2:\r\n layer = DisGeneralConvBlock(\r\n int(self.feature_size // np.power(2, i - 2)),\r\n int(self.feature_size // np.power(2, i - 2)),\r\n use_eql=self.use_eql\r\n )\r\n rgb = from_rgb(int(self.feature_size // np.power(2, i - 1)))\r\n else:\r\n layer = DisGeneralConvBlock(self.feature_size, self.feature_size // 2,\r\n use_eql=self.use_eql)\r\n rgb = from_rgb(self.feature_size // 2)\r\n\r\n self.layers.append(layer)\r\n self.rgb_to_features.append(rgb)\r\n\r\n # just replace the last converter\r\n self.rgb_to_features[self.depth - 2] = \\\r\n from_rgb(self.feature_size // np.power(2, i - 2))\r\n\r\n # parallelize the modules from the module-lists if asked to:\r\n if self.gpu_parallelize:\r\n for i in range(len(self.layers)):\r\n self.layers[i] = torch.nn.DataParallel(self.layers[i])\r\n self.rgb_to_features[i] = torch.nn.DataParallel(\r\n self.rgb_to_features[i])\r\n\r\n # Note that since the FinalBlock contains the StdDev layer,\r\n # it cannot be parallelized so easily. It will have to be parallelized\r\n # from the Lower level (from CustomLayers). This much parallelism\r\n # seems enough for me.\r", "def computeNN(train, test):\n \n shallow_NN = test[['user_id', 'movie_id']].copy()\n deep_NN = test[['user_id', 'movie_id']].copy()\n \n categorical_train_y = np.zeros([train.shape[0], 5])\n categorical_train_y[np.arange(train.shape[0]), train.rating - 1] = 1\n\n\n categorical_test_y = np.zeros([test.shape[0], 5])\n categorical_test_y[np.arange(test.shape[0]), test.rating - 1] = 1\n \n n_items = 1000\n n_users = 10000\n \n \n def shallow_net():\n features = 48\n\n input_i = layers.Input(shape=[1])\n i = layers.Embedding(n_items + 1, features)(input_i)\n i = layers.Flatten()(i)\n i = layers.normalization.BatchNormalization()(i)\n\n input_u = layers.Input(shape=[1])\n u = layers.Embedding(n_users + 1, features)(input_u)\n u = layers.Flatten()(u)\n u = layers.normalization.BatchNormalization()(u)\n\n nn = layers.concatenate([i, u])\n\n nn = layers.Dense(512, activation='relu')(nn)\n nn = layers.Dropout(0.5)(nn)\n nn = layers.normalization.BatchNormalization()(nn)\n\n nn = layers.Dense(128, activation='relu')(nn)\n\n output = layers.Dense(5, activation='softmax')(nn)\n\n model = models.Model([input_i, input_u], output)\n model.compile(optimizer='adamax', loss='categorical_crossentropy')\n return model\n \n def deep_net():\n features = 48\n\n input_i = layers.Input(shape=[1])\n i = layers.Embedding(n_items + 1, features)(input_i)\n i = layers.Flatten()(i)\n i = layers.normalization.BatchNormalization()(i)\n\n input_u = layers.Input(shape=[1])\n u = layers.Embedding(n_users + 1, features)(input_u)\n u = layers.Flatten()(u)\n u = layers.normalization.BatchNormalization()(u)\n\n nn = layers.concatenate([i, u])\n\n nn = layers.Dense(1024, activation='relu')(nn)\n nn = layers.Dropout(0.5)(nn)\n nn = layers.normalization.BatchNormalization()(nn)\n nn = layers.Dense(512, activation='relu')(nn)\n nn = layers.Dropout(0.5)(nn)\n nn = layers.normalization.BatchNormalization()(nn)\n nn = layers.Dense(256, activation='relu')(nn)\n nn = layers.Dropout(0.5)(nn)\n nn = layers.normalization.BatchNormalization()(nn)\n nn = layers.Dense(128, activation='relu')(nn)\n\n output = layers.Dense(5, activation='softmax')(nn)\n\n model = models.Model([input_i, input_u], output)\n model.compile(optimizer='adamax', loss='categorical_crossentropy')\n\n return model\n\n model_deep = deep_net()\n model_shallow = shallow_net()\n print (\"Starting to compute shallow neural network...\")\n model_shallow.fit([train.movie_id, train.user_id], y=categorical_train_y, batch_size=20480, epochs=20)\n pred_shallow = model_shallow.predict([test.movie_id, test.user_id])\n print (\"... Finished sucessfully\")\n \n print (\"Starting to compute deep neural network...\")\n model_deep.fit([train.movie_id, train.user_id], y=categorical_train_y, batch_size=20480, epochs=20)\n pred_deep = model_deep.predict([test.movie_id, test.user_id])\n print (\"... Finished sucessfully\")\n \n \n shallow_NN['NN_shallow_rating'] = np.dot(pred_shallow,[1,2, 3, 4, 5])\n deep_NN['NN_deep_rating'] = np.dot(pred_deep,[1,2, 3, 4, 5])\n \n NN_rating = shallow_NN\\\n .merge(deep_NN, on=['user_id', 'movie_id'])\n \n return NN_rating", "def _init_layers(self) -> None:\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg))\n pred_pad_size = self.pred_kernel_size // 2\n self.atss_cls = nn.Conv2d(\n self.feat_channels,\n self.num_anchors * self.cls_out_channels,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.atss_reg = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * 4,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.atss_centerness = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * 1,\n self.pred_kernel_size,\n padding=pred_pad_size)\n self.scales = nn.ModuleList(\n [Scale(1.0) for _ in self.prior_generator.strides])", "def __init__(self, gpu_ids='0', isTrain=False, checkpoints_dir='./checkpoints', name='experiment_name', continue_train=False, model='cycle_gan'):\n \n assert(not isTrain)\n BaseModel.__init__(self, gpu_ids=gpu_ids, isTrain=isTrain, checkpoints_dir=checkpoints_dir, name=name, continue_train=continue_train, verbose=False)\n\n self.input_nc = 3\n self.output_nc = 3\n self.ngf = 64 # num of gen filters in the last conv layer\n self.ndf = 64 # num of discriminator filters in the first conv layer'\n self.netG = 'resnet_9blocks' # specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]\n self.norm = 'instance' # instance normalization or batch normalization [instance | batch | none]\n self.no_dropout = True\n self.init_type = 'normal' # network initialization [normal | xavier | kaiming | orthogonal]\n self.init_gain = 0.02\n self.netD = 'basic' # specify discriminator architecture [basic | n_layers | pixel]\n self.n_layers_D = 3 # only used if netD==n_layers\n self.pool_size = 50 # the size of image buffer that stores previously generated images\n self.lr = 0.0002\n self.beta1 = 0.5 # momentum term of adam\n self.gan_mode = 'lsgan' # the type of GAN objective. [vanilla| lsgan | wgangp]\n self.model_suffix = ''\n\n self.loss_names = []\n self.visual_names = ['real', 'fake']\n self.model_names = ['G' + self.model_suffix] # only generator is needed.\n self.netG = networks.define_G(self.input_nc, self.output_nc, self.ngf, self.netG,\n self.norm, not self.no_dropout, self.init_type, self.init_gain, self.gpu_ids)\n\n setattr(self, 'netG' + self.model_suffix, self.netG) # store netG in self.", "def __init__(self):\n super(Encoder, self).__init__()\n\n self.lconv1 = nn.Sequential(\n nn.Conv1d(4, 64, kernel_size=9, padding=4),\n nn.BatchNorm1d(64),\n nn.Conv1d(64, 64, kernel_size=9, padding=4),\n nn.BatchNorm1d(64),\n )\n\n self.conv1 = nn.Sequential(\n nn.Conv1d(64, 64, kernel_size=9, padding=4),\n nn.BatchNorm1d(64),\n nn.ReLU(inplace=True),\n nn.Conv1d(64, 64, kernel_size=9, padding=4),\n nn.BatchNorm1d(64),\n nn.ReLU(inplace=True),\n )\n\n self.lconv2 = nn.Sequential(\n nn.MaxPool1d(kernel_size=4, stride=4),\n nn.Conv1d(64, 96, kernel_size=9, padding=4),\n nn.BatchNorm1d(96),\n nn.Conv1d(96, 96, kernel_size=9, padding=4),\n nn.BatchNorm1d(96),\n )\n\n self.conv2 = nn.Sequential(\n nn.Conv1d(96, 96, kernel_size=9, padding=4),\n nn.BatchNorm1d(96),\n nn.ReLU(inplace=True),\n nn.Conv1d(96, 96, kernel_size=9, padding=4),\n nn.BatchNorm1d(96),\n nn.ReLU(inplace=True),\n )\n\n self.lconv3 = nn.Sequential(\n nn.MaxPool1d(kernel_size=4, stride=4),\n nn.Conv1d(96, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv3 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )\n\n self.lconv4 = nn.Sequential(\n nn.MaxPool1d(kernel_size=5, stride=5),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv4 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )\n\n self.lconv5 = nn.Sequential(\n nn.MaxPool1d(kernel_size=5, stride=5),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv5 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )\n\n self.lconv6 = nn.Sequential(\n nn.MaxPool1d(kernel_size=5, stride=5),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv6 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )\n\n self.lconv7 = nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n )\n\n self.conv7 = nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n )", "def build_resnet152(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')\n conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, use_batch_norm)\n conv1_feats = nonlinear(conv1_feats, 'relu')\n pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')\n\n res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, use_batch_norm, 64, 1)\n res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, use_batch_norm, 64)\n res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, use_batch_norm, 64)\n \n res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, use_batch_norm, 128) \n temp = res3a_feats\n for i in range(1, 8):\n temp = self.basic_block2(temp, 'res3b'+str(i), 'bn3b'+str(i), is_train, use_batch_norm, 128)\n res3b7_feats = temp\n \n res4a_feats = self.basic_block(res3b7_feats, 'res4a', 'bn4a', is_train, use_batch_norm, 256)\n temp = res4a_feats\n for i in range(1, 36):\n temp = self.basic_block2(temp, 'res4b'+str(i), 'bn4b'+str(i), is_train, use_batch_norm, 256)\n res4b35_feats = temp\n\n res5a_feats = self.basic_block(res4b35_feats, 'res5a', 'bn5a', is_train, use_batch_norm, 512)\n res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, use_batch_norm, 512)\n res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, use_batch_norm, 512)\n\n res5c_feats_flat = tf.reshape(res5c_feats, [self.batch_size, 49, 2048])\n self.conv_feats = res5c_feats_flat\n self.conv_feat_shape = [49, 2048]\n self.num_ctx = 49 \n self.dim_ctx = 2048\n\n self.imgs = imgs\n self.is_train = is_train", "def train(**kwargs):\n\n # Roll out the parameters\n batch_size = kwargs[\"batch_size\"]\n n_batch_per_epoch = kwargs[\"n_batch_per_epoch\"]\n nb_epoch = kwargs[\"nb_epoch\"]\n model_name = kwargs[\"model_name\"]\n generator = kwargs[\"generator\"]\n image_dim_ordering = kwargs[\"image_dim_ordering\"]\n img_dim = kwargs[\"img_dim\"]\n patch_size = kwargs[\"patch_size\"]\n bn_mode = kwargs[\"bn_mode\"]\n label_smoothing = kwargs[\"use_label_smoothing\"]\n label_flipping = kwargs[\"label_flipping\"]\n dset = kwargs[\"dset\"]\n use_mbd = kwargs[\"use_mbd\"]\n\n epoch_size = n_batch_per_epoch * batch_size\n\n # Setup environment (logging directory etc)\n general_utils.setup_logging(model_name)\n\n # Load and rescale data\n X_full_train, X_sketch_train, X_full_val, X_sketch_val = data_utils.load_data(dset, image_dim_ordering)\n img_dim = X_full_train.shape[-3:]\n\n # Get the number of non overlapping patch and the size of input image to the discriminator\n nb_patch, img_dim_disc = data_utils.get_nb_patch(img_dim, patch_size, image_dim_ordering)\n\n try:\n\n # Create optimizers\n opt_dcgan = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n # opt_discriminator = SGD(lr=1E-3, momentum=0.9, nesterov=True)\n opt_discriminator = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n\n # Load generator model\n generator_model = models.load(\"generator_unet_%s\" % generator,\n img_dim,\n nb_patch,\n bn_mode,\n use_mbd,\n batch_size)\n # Load discriminator model\n discriminator_model = models.load(\"DCGAN_discriminator\",\n img_dim_disc,\n nb_patch,\n bn_mode,\n use_mbd,\n batch_size)\n\n generator_model.compile(loss='mae', optimizer=opt_discriminator)\n discriminator_model.trainable = False\n\n DCGAN_model = models.DCGAN(generator_model,\n discriminator_model,\n img_dim,\n patch_size,\n image_dim_ordering)\n\n loss = ['mae', 'binary_crossentropy']\n loss_weights = [1E2, 1]\n DCGAN_model.compile(loss=loss, loss_weights=loss_weights, optimizer=opt_dcgan)\n\n discriminator_model.trainable = True\n discriminator_model.compile(loss='binary_crossentropy', optimizer=opt_discriminator)\n\n gen_loss = 100\n disc_loss = 100\n\n # Start training\n print(\"Start training\")\n for e in range(nb_epoch):\n # Initialize progbar and batch counter\n progbar = generic_utils.Progbar(epoch_size)\n batch_counter = 1\n start = time.time()\n\n for X_full_batch, X_sketch_batch in data_utils.gen_batch(X_full_train, X_sketch_train, batch_size):\n\n # Create a batch to feed the discriminator model\n X_disc, y_disc = data_utils.get_disc_batch(X_full_batch,\n X_sketch_batch,\n generator_model,\n batch_counter,\n patch_size,\n image_dim_ordering,\n label_smoothing=label_smoothing,\n label_flipping=label_flipping)\n\n # Update the discriminator\n disc_loss = discriminator_model.train_on_batch(X_disc, y_disc)\n\n # Create a batch to feed the generator model\n X_gen_target, X_gen = next(data_utils.gen_batch(X_full_train, X_sketch_train, batch_size))\n y_gen = np.zeros((X_gen.shape[0], 2), dtype=np.uint8)\n y_gen[:, 1] = 1\n\n # Freeze the discriminator\n discriminator_model.trainable = False\n gen_loss = DCGAN_model.train_on_batch(X_gen, [X_gen_target, y_gen])\n # Unfreeze the discriminator\n discriminator_model.trainable = True\n\n batch_counter += 1\n progbar.add(batch_size, values=[(\"D logloss\", disc_loss),\n (\"G tot\", gen_loss[0]),\n (\"G mae\", gen_loss[1]),\n (\"G logloss\", gen_loss[2])])\n\n # Save images for visualization\n if batch_counter % (n_batch_per_epoch / 2) == 0:\n # Get new images from validation\n data_utils.plot_generated_batch(X_full_batch, X_sketch_batch, generator_model,\n batch_size, image_dim_ordering, \"training\")\n X_full_batch, X_sketch_batch = next(data_utils.gen_batch(X_full_val, X_sketch_val, batch_size))\n data_utils.plot_generated_batch(X_full_batch, X_sketch_batch, generator_model,\n batch_size, image_dim_ordering, \"validation\")\n\n if batch_counter >= n_batch_per_epoch:\n break\n\n print(\"\")\n print('Epoch %s/%s, Time: %s' % (e + 1, nb_epoch, time.time() - start))\n\n if e % 5 == 0:\n gen_weights_path = os.path.join('../../models/%s/gen_weights_epoch%s.h5' % (model_name, e))\n generator_model.save_weights(gen_weights_path, overwrite=True)\n\n disc_weights_path = os.path.join('../../models/%s/disc_weights_epoch%s.h5' % (model_name, e))\n discriminator_model.save_weights(disc_weights_path, overwrite=True)\n\n DCGAN_weights_path = os.path.join('../../models/%s/DCGAN_weights_epoch%s.h5' % (model_name, e))\n DCGAN_model.save_weights(DCGAN_weights_path, overwrite=True)\n\n except KeyboardInterrupt:\n pass", "def UNet(input_size=(256, 256, 1)):\n inputs = Input(input_size)\n c1 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(inputs)\n c1 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c1)\n p1 = MaxPooling2D((2, 2))(c1)\n\n c2 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(p1)\n c2 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c2)\n p2 = MaxPooling2D((2, 2))(c2)\n\n c3 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(p2)\n c3 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c3)\n p3 = MaxPooling2D((2, 2))(c3)\n\n c4 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(p3)\n c4 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c4)\n p4 = MaxPooling2D(pool_size=(2, 2))(c4)\n\n c5 = Conv2D(256, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(p4)\n c5 = Conv2D(256, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c5)\n\n u6 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(c5)\n u6 = concatenate([u6, c4])\n c6 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(u6)\n c6 = Conv2D(128, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c6)\n\n u7 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c6)\n u7 = concatenate([u7, c3])\n c7 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(u7)\n c7 = Conv2D(64, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c7)\n\n u8 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c7)\n u8 = concatenate([u8, c2])\n c8 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(u8)\n c8 = Conv2D(32, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c8)\n\n u9 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c8)\n u9 = concatenate([u9, c1], axis=3)\n c9 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(u9)\n c9 = Conv2D(16, (3, 3), activation='elu', kernel_initializer='he_normal', padding='same')(c9)\n\n outputs = Conv2D(1, (1, 1), activation='sigmoid')(c9)\n model = Model(inputs=[inputs], outputs=[outputs])\n model.compile(optimizer=Adam(lr=1e-4), loss=dice_coef_loss, metrics=['accuracy', dice_coef])\n return model", "def __init__(self):\n\n super(ConvModule, self).__init__()\n\n self.conv1 = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, stride=[1, 2])\n self.conv1_bn = nn.BatchNorm2d(64)\n self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, stride=[1, 2])\n self.conv2_bn = nn.BatchNorm2d(128)\n self.pool1 = nn.MaxPool2d(kernel_size=4, stride=2)\n self.dropout0 = nn.Dropout(p=0.4)\n\n self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=[1, 2])\n self.conv3_bn = nn.BatchNorm2d(256)\n self.conv4 = nn.Conv2d(in_channels=256, out_channels=64, kernel_size=3, stride=[1, 2])\n self.conv4_bn = nn.BatchNorm2d(64)\n self.pool2 = nn.MaxPool2d(kernel_size=4, stride=2)\n #\n # self.conv5 = nn.Conv2d(in_channels=128, out_channels=64, kernel_size=3, stride=[1, 2])\n # self.conv5_bn = nn.BatchNorm2d(64)\n # self.pool3 = nn.MaxPool2d(kernel_size=3, stride=[1, 2])", "def __init__(self, ):\n super().__init__()\n channels = 3\n\n # Initial convolution block\n out_features = 64\n # encoder\n self.input = nn.Sequential(\n nn.ReflectionPad2d(channels),\n nn.Conv2d(3, out_features, (7, 7)),\n nn.InstanceNorm2d(out_features),\n nn.MaxPool2d(2),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(out_features, out_features * 2, (3, 3), stride=(1, 1), padding=(1, 1)),\n nn.LeakyReLU(0.2, inplace=True),\n nn.MaxPool2d(2),\n nn.Conv2d(out_features * 2, out_features * 4, (3, 3), stride=(1, 1), padding=(1, 1)),\n nn.LeakyReLU(0.2, inplace=True),\n )\n\n self.in0 = nn.InstanceNorm2d(256)\n self.block0 = blocks()\n self.block1 = blocks()\n self.block2 = blocks()\n self.block3 = blocks()\n self.block4 = blocks()\n self.block5 = blocks()\n self.block6 = blocks()\n self.block7 = blocks()\n\n self.out = nn.Sequential(\n\n nn.Upsample(scale_factor=2),\n nn.Conv2d(out_features * 4, out_features * 2, 3, stride=1, padding=1),\n nn.InstanceNorm2d(out_features * 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Upsample(scale_factor=2),\n nn.Conv2d(out_features * 2, out_features, 3, stride=1, padding=1),\n nn.InstanceNorm2d(out_features),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.ReflectionPad2d(channels),\n nn.Conv2d(out_features, channels, 7),\n nn.Tanh(),\n )", "def __init__(self, dropout=0, input_dim=(3, 32, 32), num_filters=32, filter_size=7,\r\n hidden_dim=100, num_classes=10, weight_scale=1e-3, reg=0.0, \r\n use_batch_norm=False, dtype=np.float32):\r\n self.use_dropout = dropout > 0\r\n self.use_batch_norm = use_batch_norm\r\n self.params = {}\r\n self.reg = reg\r\n self.num_layers = 3\r\n self.dtype = dtype\r\n self.pool_height = 2\r\n self.pool_width = 2\r\n self.pool_stride = 2\r\n\r\n ############################################################################\r\n # TODO: Initialize weights and biases for the three-layer convolutional #\r\n # network. Weights should be initialized from a Gaussian with standard #\r\n # deviation equal to weight_scale; biases should be initialized to zero. #\r\n # All weights and biases should be stored in the dictionary self.params. #\r\n # Store weights and biases for the convolutional layer using the keys 'W1' #\r\n # and 'b1'; use keys 'W2' and 'b2' for the weights and biases of the #\r\n # hidden affine layer, and keys 'W3' and 'b3' for the weights and biases #\r\n # of the output affine layer. #\r\n ############################################################################\r\n # NUmber of channels\r\n C, H, W = input_dim\r\n self.params['W1'] = np.random.randn(num_filters, C, filter_size, filter_size) * weight_scale\r\n self.params['b1'] = np.zeros(num_filters)\r\n H_pool = (H - self.pool_height) / 2 + 1\r\n W_pool = (W - self.pool_width) / 2 + 1\r\n self.params['W2'] = np.random.randn(np.prod((num_filters, H_pool, W_pool)), hidden_dim) * weight_scale\r\n self.params['b2'] = np.zeros(hidden_dim)\r\n self.params['W3'] = np.random.randn(hidden_dim, num_classes) * weight_scale\r\n self.params['b3'] = np.zeros(num_classes)\r\n\r\n # Initialize the parameters for batch normalization if necessary\r\n if self.use_batch_norm:\r\n self.params['gamma1'] = np.ones(num_filters) \r\n self.params['beta1'] = np.zeros(num_filters)\r\n self.params['gamma2'] = np.ones(hidden_dim)\r\n self.params['beta2'] = np.zeros(hidden_dim)\r\n\r\n # Set dropout parameters if necessary\r\n self.dropout_param={}\r\n if self.use_dropout:\r\n self.dropout_param ={'mode':'train', 'p':dropout}\r\n\r\n self.bn_params = []\r\n if self.use_batch_norm:\r\n self.bn_params = [{'mode': 'train'} for i in range(self.num_layers - 1)]\r\n\r\n ############################################################################\r\n # END OF YOUR CODE #\r\n ############################################################################\r\n\r\n for k, v in self.params.items():\r\n self.params[k] = v.astype(dtype)", "def build_resnet50(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')\n conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, use_batch_norm)\n conv1_feats = nonlinear(conv1_feats, 'relu')\n pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')\n\n res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, use_batch_norm, 64, 1)\n res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, use_batch_norm, 64)\n res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, use_batch_norm, 64)\n \n res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, use_batch_norm, 128)\n res3b_feats = self.basic_block2(res3a_feats, 'res3b', 'bn3b', is_train, use_batch_norm, 128)\n res3c_feats = self.basic_block2(res3b_feats, 'res3c', 'bn3c', is_train, use_batch_norm, 128)\n res3d_feats = self.basic_block2(res3c_feats, 'res3d', 'bn3d', is_train, use_batch_norm, 128)\n\n res4a_feats = self.basic_block(res3d_feats, 'res4a', 'bn4a', is_train, use_batch_norm, 256)\n res4b_feats = self.basic_block2(res4a_feats, 'res4b', 'bn4b', is_train, use_batch_norm, 256)\n res4c_feats = self.basic_block2(res4b_feats, 'res4c', 'bn4c', is_train, use_batch_norm, 256)\n res4d_feats = self.basic_block2(res4c_feats, 'res4d', 'bn4d', is_train, use_batch_norm, 256)\n res4e_feats = self.basic_block2(res4d_feats, 'res4e', 'bn4e', is_train, use_batch_norm, 256)\n res4f_feats = self.basic_block2(res4e_feats, 'res4f', 'bn4f', is_train, use_batch_norm, 256)\n\n res5a_feats = self.basic_block(res4f_feats, 'res5a', 'bn5a', is_train, use_batch_norm, 512)\n res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, use_batch_norm, 512)\n res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, use_batch_norm, 512)\n\n res5c_feats_flat = tf.reshape(res5c_feats, [self.batch_size, 49, 2048])\n self.conv_feats = res5c_feats_flat\n self.conv_feat_shape = [49, 2048]\n self.num_ctx = 49 \n self.dim_ctx = 2048\n\n self.imgs = imgs\n self.is_train = is_train", "def setUp(self):\n # The short NSC used in this example\n self.net_nsc = [\n (1, 4, 0, 0, 0), # Layer 1: Identity(input)\n (2, 1, 3, 1, 0), # Layer 2: Convolution(Layer1)\n (3, 1, 3, 2, 0), # Layer 3: Convolution(Layer2)\n (4, 5, 0, 1, 3), # Layer 4: Convolution(Layer1)\n (5, 7, 0, 0, 0), # Layer 5: Convolution(Layer4)\n ]", "def __init__(self, n_latent_features=1024, reduced_size=64, activation=ReLU()):\n\n super().__init__()\n self.logger = logging.getLogger(AutoEncoderConvolutional.__name__)\n self.n_latent_features = n_latent_features\n self.reduced_size = reduced_size\n self.middle_layer_size = int(16 * self.reduced_size / 4 * self.reduced_size / 4)\n self.activation = activation\n\n self.logger.info(\"Construct model..\")\n self.encode_conv1 = nn.Conv2d(3, 6, kernel_size=3, padding=1)\n self.encode_pool1 = nn.MaxPool2d(2, stride=2)\n self.encode_conv2 = nn.Conv2d(6, 16, kernel_size=3, padding=1)\n self.encode_pool2 = nn.MaxPool2d(2, stride=2)\n self.encode_fc = nn.Linear(self.middle_layer_size, self.n_latent_features)\n\n self.decode_fc = nn.Linear(self.n_latent_features, self.middle_layer_size)\n self.decode_conv1 = nn.ConvTranspose2d(16, 6, kernel_size=2, stride=2)\n self.decode_conv2 = nn.ConvTranspose2d(6, 3, kernel_size=2, stride=2)\n\n self.reset_weights()\n\n self.logger.info(\"Finished instantiation\")", "def _pspnet_builder(xs,\n name,\n cnn_fn,\n num_classes,\n is_training,\n use_global_status,\n reuse=False):\n # Ensure that the size of input data is valid (should be multiple of 6x8=48).\n h, w = xs[0].get_shape().as_list()[1:3] # NxHxWxC\n assert(h%48 == 0 and w%48 == 0 and h == w)\n\n # Build the base network.\n xs = cnn_fn(xs, name, is_training, use_global_status, reuse)\n\n with tf.variable_scope(name, reuse=reuse) as scope:\n # Build the PSP module\n pool_k = int(h/8) # the base network is stride 8 by default.\n\n # Build pooling layer results in 1x1 output.\n pool1s = avg_pools(xs,\n 'block5/pool1',\n pool_k,\n pool_k,\n 'VALID')\n pool1s = nn_mgpu.conv(pool1s,\n 'block5/pool1/conv1',\n 512,\n 1,\n 1,\n padding='SAME',\n biased=False,\n bn=True,\n relu=True,\n is_training=is_training,\n decay=0.99,\n use_global_status=use_global_status)\n pool1s = upsample_bilinears(pool1s, pool_k, pool_k)\n\n # Build pooling layer results in 2x2 output.\n pool2s = avg_pools(xs,\n 'block5/pool2',\n pool_k//2,\n pool_k//2,\n 'VALID')\n pool2s = nn_mgpu.conv(pool2s,\n 'block5/pool2/conv1',\n 512,\n 1,\n 1,\n padding='SAME',\n biased=False,\n bn=True,\n relu=True,\n is_training=is_training,\n decay=0.99,\n use_global_status=use_global_status)\n pool2s = upsample_bilinears(pool2s, pool_k, pool_k)\n\n # Build pooling layer results in 3x3 output.\n pool3s = avg_pools(xs,\n 'block5/pool3',\n pool_k//3,\n pool_k//3,\n 'VALID')\n pool3s = nn_mgpu.conv(pool3s,\n 'block5/pool3/conv1',\n 512,\n 1,\n 1,\n padding='SAME',\n biased=False,\n bn=True,\n relu=True,\n is_training=is_training,\n decay=0.99,\n use_global_status=use_global_status)\n pool3s = upsample_bilinears(pool3s, pool_k, pool_k)\n\n # Build pooling layer results in 6x6 output.\n pool6s = avg_pools(xs,\n 'block5/pool6',\n pool_k//6,\n pool_k//6,\n 'VALID')\n pool6s = nn_mgpu.conv(pool6s,\n 'block5/pool6/conv1',\n 512,\n 1,\n 1,\n padding='SAME',\n biased=False,\n bn=True,\n relu=True,\n is_training=is_training,\n decay=0.99,\n use_global_status=use_global_status)\n pool6s = upsample_bilinears(pool6s, pool_k, pool_k)\n\n # Fuse the pooled feature maps with its input, and generate\n # segmentation prediction.\n xs = nn_mgpu.concat(\n [pool1s, pool2s, pool3s, pool6s, xs],\n name='block5/concat',\n axis=3)\n xs = nn_mgpu.conv(xs,\n 'block5/conv2',\n 512,\n 3,\n 1,\n padding='SAME',\n biased=False,\n bn=True,\n relu=True,\n is_training=is_training,\n decay=0.99,\n use_global_status=use_global_status)\n xs = nn_mgpu.conv(xs,\n 'block5/fc1_voc12',\n num_classes,\n 1,\n 1,\n padding='SAME',\n biased=True,\n bn=False,\n relu=False,\n is_training=is_training)\n\n return xs", "def train(**kwargs):\n\n # Roll out the parameters\n patch_size = kwargs[\"patch_size\"]\n image_data_format = kwargs[\"image_data_format\"]\n generator_type = kwargs[\"generator_type\"]\n dset = kwargs[\"dset\"]\n batch_size = kwargs[\"batch_size\"]\n n_batch_per_epoch = kwargs[\"n_batch_per_epoch\"]\n nb_epoch = kwargs[\"nb_epoch\"]\n model_name = kwargs[\"model_name\"]\n save_weights_every_n_epochs = kwargs[\"save_weights_every_n_epochs\"]\n visualize_images_every_n_epochs = kwargs[\"visualize_images_every_n_epochs\"]\n use_mbd = kwargs[\"use_mbd\"]\n label_smoothing = kwargs[\"use_label_smoothing\"]\n label_flipping_prob = kwargs[\"label_flipping_prob\"]\n use_l1_weighted_loss = kwargs[\"use_l1_weighted_loss\"]\n prev_model = kwargs[\"prev_model\"]\n discriminator_optimizer = kwargs[\"discriminator_optimizer\"]\n n_run_of_gen_for_1_run_of_disc = kwargs[\"n_run_of_gen_for_1_run_of_disc\"]\n MAX_FRAMES_PER_GIF = kwargs[\"MAX_FRAMES_PER_GIF\"]\n\n # batch_size = args.batch_size\n # n_batch_per_epoch = args.n_batch_per_epoch\n # nb_epoch = args.nb_epoch\n # save_weights_every_n_epochs = args.save_weights_every_n_epochs\n # generator_type = args.generator_type\n # patch_size = args.patch_size\n # label_smoothing = False\n # label_flipping_prob = False\n # dset = args.dset\n # use_mbd = False\n\n # Check and make the dataset\n # If .h5 file of dset is not present, try making it\n if not os.path.exists(\"../../data/processed/%s_data.h5\" % dset):\n print(\"dset %s_data.h5 not present in '../../data/processed'!\" % dset)\n if not os.path.exists(\"../../data/%s/\" % dset):\n print(\"dset folder %s not present in '../../data'!\\n\\nERROR: Dataset .h5 file not made, and dataset not available in '../../data/'.\\n\\nQuitting.\" % dset)\n return\n else:\n if not os.path.exists(\"../../data/%s/train\" % dset) or not os.path.exists(\"../../data/%s/val\" % dset) or not os.path.exists(\"../../data/%s/test\" % dset):\n print(\"'train', 'val' or 'test' folders not present in dset folder '../../data/%s'!\\n\\nERROR: Dataset must contain 'train', 'val' and 'test' folders.\\n\\nQuitting.\" % dset)\n return\n else:\n print(\"Making %s dataset\" % dset)\n subprocess.call(['python3', '../data/make_dataset.py', '../../data/%s' % dset, '3'])\n print(\"Done!\")\n\n epoch_size = n_batch_per_epoch * batch_size\n\n init_epoch = 0\n\n if prev_model:\n print('\\n\\nLoading prev_model from', prev_model, '...\\n\\n')\n prev_model_latest_gen = sorted(glob.glob(os.path.join('../../models/', prev_model, '*gen*.h5')))[-1]\n prev_model_latest_disc = sorted(glob.glob(os.path.join('../../models/', prev_model, '*disc*.h5')))[-1]\n prev_model_latest_DCGAN = sorted(glob.glob(os.path.join('../../models/', prev_model, '*DCGAN*.h5')))[-1]\n # Find prev model name, epoch\n model_name = prev_model_latest_DCGAN.split('models')[-1].split('/')[1]\n init_epoch = int(prev_model_latest_DCGAN.split('epoch')[1][:5]) + 1\n\n # Setup environment (logging directory etc), if no prev_model is mentioned\n general_utils.setup_logging(model_name)\n\n # img_dim = X_full_train.shape[-3:]\n img_dim = (256, 256, 3)\n\n # Get the number of non overlapping patch and the size of input image to the discriminator\n nb_patch, img_dim_disc = data_utils.get_nb_patch(img_dim, patch_size, image_data_format)\n\n try:\n\n # Create optimizers\n opt_dcgan = Adam(lr=1E-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n\n if discriminator_optimizer == 'sgd':\n opt_discriminator = SGD(lr=1E-3, momentum=0.9, nesterov=True)\n elif discriminator_optimizer == 'adam':\n opt_discriminator = Adam(lr=1E-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n\n # Load generator model\n generator_model = models.load(\"generator_unet_%s\" % generator_type,\n img_dim,\n nb_patch,\n use_mbd,\n batch_size,\n model_name)\n\n generator_model.compile(loss='mae', optimizer=opt_discriminator)\n\n # Load discriminator model\n discriminator_model = models.load(\"DCGAN_discriminator\",\n img_dim_disc,\n nb_patch,\n use_mbd,\n batch_size,\n model_name)\n\n discriminator_model.trainable = False\n\n DCGAN_model = models.DCGAN(generator_model,\n discriminator_model,\n img_dim,\n patch_size,\n image_data_format)\n\n if use_l1_weighted_loss:\n loss = [l1_weighted_loss, 'binary_crossentropy']\n else:\n loss = [l1_loss, 'binary_crossentropy']\n\n loss_weights = [1E1, 1]\n DCGAN_model.compile(loss=loss, loss_weights=loss_weights, optimizer=opt_dcgan)\n\n discriminator_model.trainable = True\n discriminator_model.compile(loss='binary_crossentropy', optimizer=opt_discriminator)\n\n # Load prev_model\n if prev_model:\n generator_model.load_weights(prev_model_latest_gen)\n discriminator_model.load_weights(prev_model_latest_disc)\n DCGAN_model.load_weights(prev_model_latest_DCGAN)\n\n # Load and rescale data\n print('\\n\\nLoading data...\\n\\n')\n X_full_train, X_sketch_train, X_full_val, X_sketch_val = data_utils.load_data(dset, image_data_format)\n check_this_process_memory()\n print('X_full_train: %.4f' % (X_full_train.nbytes/2**30), \"GB\")\n print('X_sketch_train: %.4f' % (X_sketch_train.nbytes/2**30), \"GB\")\n print('X_full_val: %.4f' % (X_full_val.nbytes/2**30), \"GB\")\n print('X_sketch_val: %.4f' % (X_sketch_val.nbytes/2**30), \"GB\")\n\n # Losses\n disc_losses = []\n gen_total_losses = []\n gen_L1_losses = []\n gen_log_losses = []\n\n # Start training\n print(\"\\n\\nStarting training\\n\\n\")\n for e in range(nb_epoch):\n # Initialize progbar and batch counter\n # progbar = generic_utils.Progbar(epoch_size)\n batch_counter = 0\n gen_total_loss_epoch = 0\n gen_L1_loss_epoch = 0\n gen_log_loss_epoch = 0\n start = time.time()\n for X_full_batch, X_sketch_batch in data_utils.gen_batch(X_full_train, X_sketch_train, batch_size):\n # Create a batch to feed the discriminator model\n X_disc, y_disc = data_utils.get_disc_batch(X_full_batch,\n X_sketch_batch,\n generator_model,\n batch_counter,\n patch_size,\n image_data_format,\n label_smoothing=label_smoothing,\n label_flipping_prob=label_flipping_prob)\n # Update the discriminator\n disc_loss = discriminator_model.train_on_batch(X_disc, y_disc)\n # Create a batch to feed the generator model\n X_gen_target, X_gen = next(data_utils.gen_batch(X_full_train, X_sketch_train, batch_size))\n y_gen = np.zeros((X_gen.shape[0], 2), dtype=np.uint8)\n y_gen[:, 1] = 1\n # Freeze the discriminator\n discriminator_model.trainable = False\n # Train generator\n for _ in range(n_run_of_gen_for_1_run_of_disc-1):\n gen_loss = DCGAN_model.train_on_batch(X_gen, [X_gen_target, y_gen])\n gen_total_loss_epoch += gen_loss[0]/n_run_of_gen_for_1_run_of_disc\n gen_L1_loss_epoch += gen_loss[1]/n_run_of_gen_for_1_run_of_disc\n gen_log_loss_epoch += gen_loss[2]/n_run_of_gen_for_1_run_of_disc\n X_gen_target, X_gen = next(data_utils.gen_batch(X_full_train, X_sketch_train, batch_size))\n gen_loss = DCGAN_model.train_on_batch(X_gen, [X_gen_target, y_gen])\n # Add losses\n gen_total_loss_epoch += gen_loss[0]/n_run_of_gen_for_1_run_of_disc\n gen_L1_loss_epoch += gen_loss[1]/n_run_of_gen_for_1_run_of_disc\n gen_log_loss_epoch += gen_loss[2]/n_run_of_gen_for_1_run_of_disc\n # Unfreeze the discriminator\n discriminator_model.trainable = True\n # Progress\n # progbar.add(batch_size, values=[(\"D logloss\", disc_loss),\n # (\"G tot\", gen_loss[0]),\n # (\"G L1\", gen_loss[1]),\n # (\"G logloss\", gen_loss[2])])\n print(\"Epoch\", str(init_epoch+e+1), \"batch\", str(batch_counter+1), \"D_logloss\", disc_loss, \"G_tot\", gen_loss[0], \"G_L1\", gen_loss[1], \"G_log\", gen_loss[2])\n batch_counter += 1\n if batch_counter >= n_batch_per_epoch:\n break\n gen_total_loss = gen_total_loss_epoch/n_batch_per_epoch\n gen_L1_loss = gen_L1_loss_epoch/n_batch_per_epoch\n gen_log_loss = gen_log_loss_epoch/n_batch_per_epoch\n disc_losses.append(disc_loss)\n gen_total_losses.append(gen_total_loss)\n gen_L1_losses.append(gen_L1_loss)\n gen_log_losses.append(gen_log_loss)\n check_this_process_memory()\n print('Epoch %s/%s, Time: %.4f' % (init_epoch + e + 1, init_epoch + nb_epoch, time.time() - start))\n # Save images for visualization\n if (e + 1) % visualize_images_every_n_epochs == 0:\n data_utils.plot_generated_batch(X_full_batch, X_sketch_batch, generator_model, batch_size, image_data_format,\n model_name, \"training\", init_epoch + e + 1, MAX_FRAMES_PER_GIF)\n # Get new images from validation\n X_full_batch, X_sketch_batch = next(data_utils.gen_batch(X_full_val, X_sketch_val, batch_size))\n data_utils.plot_generated_batch(X_full_batch, X_sketch_batch, generator_model, batch_size, image_data_format,\n model_name, \"validation\", init_epoch + e + 1, MAX_FRAMES_PER_GIF)\n # Plot losses\n data_utils.plot_losses(disc_losses, gen_total_losses, gen_L1_losses, gen_log_losses, model_name, init_epoch)\n # Save weights\n if (e + 1) % save_weights_every_n_epochs == 0:\n gen_weights_path = os.path.join('../../models/%s/gen_weights_epoch%05d_discLoss%.04f_genTotL%.04f_genL1L%.04f_genLogL%.04f.h5' % (model_name, init_epoch + e, disc_losses[-1], gen_total_losses[-1], gen_L1_losses[-1], gen_log_losses[-1]))\n generator_model.save_weights(gen_weights_path, overwrite=True)\n disc_weights_path = os.path.join('../../models/%s/disc_weights_epoch%05d_discLoss%.04f_genTotL%.04f_genL1L%.04f_genLogL%.04f.h5' % (model_name, init_epoch + e, disc_losses[-1], gen_total_losses[-1], gen_L1_losses[-1], gen_log_losses[-1]))\n discriminator_model.save_weights(disc_weights_path, overwrite=True)\n DCGAN_weights_path = os.path.join('../../models/%s/DCGAN_weights_epoch%05d_discLoss%.04f_genTotL%.04f_genL1L%.04f_genLogL%.04f.h5' % (model_name, init_epoch + e, disc_losses[-1], gen_total_losses[-1], gen_L1_losses[-1], gen_log_losses[-1]))\n DCGAN_model.save_weights(DCGAN_weights_path, overwrite=True)\n\n except KeyboardInterrupt:\n pass", "def build_unet(input_layer = Input((128,128,3)), start_depth=64, activation='relu', initializer='he_normal'):\n\n # 128 -> 64\n conv1 = Conv2D_BN(input_layer, start_depth * 1, (3, 3), activation=activation, kernel_initializer=initializer)\n conv1 = Conv2D_BN(conv1, start_depth * 1, (3, 3), activation=activation, kernel_initializer=initializer)\n pool1 = MaxPooling2D((2, 2))(conv1)\n\n # 64 -> 32\n conv2 = Conv2D_BN(pool1, start_depth * 2, (3, 3), activation=activation, kernel_initializer=initializer)\n conv2 = Conv2D_BN(conv2, start_depth * 2, (3, 3), activation=activation, kernel_initializer=initializer)\n pool2 = MaxPooling2D((2, 2))(conv2)\n\n # 32 -> 16\n conv3 = Conv2D_BN(pool2, start_depth * 4, (3, 3), activation=activation, kernel_initializer=initializer)\n conv3 = Conv2D_BN(conv3, start_depth * 4, (3, 3), activation=activation, kernel_initializer=initializer)\n pool3 = MaxPooling2D((2, 2))(conv3)\n\n # 16 -> 8\n conv4 = Conv2D_BN(pool3, start_depth * 8, (3, 3), activation=activation, kernel_initializer=initializer)\n conv4 = Conv2D_BN(conv4, start_depth * 8, (3, 3), activation=activation, kernel_initializer=initializer)\n pool4 = MaxPooling2D((2, 2))(conv4)\n\n # Middle\n convm=cbam_block(pool4)\n\n # 8 -> 16\n deconv4 = Conv2DTranspose(convm, start_depth * 8, (3, 3), strides=(2, 2), activation=activation, kernel_initializer=initializer)\n uconv4 = concatenate([deconv4, conv4])\n uconv4 = Conv2D_BN(uconv4, start_depth * 8, (3, 3), activation=activation, kernel_initializer=initializer)\n uconv4 = Conv2D_BN(uconv4, start_depth * 8, (3, 3), activation=activation, kernel_initializer=initializer)\n\n # 16 -> 32\n deconv3 = Conv2DTranspose(uconv4, start_depth * 4, (3, 3), strides=(2, 2), activation=activation, kernel_initializer=initializer)\n uconv3 = concatenate([deconv3, conv3])\n uconv3 = Conv2D_BN(uconv3, start_depth * 4, (3, 3), activation=activation, kernel_initializer=initializer)\n uconv3 = Conv2D_BN(uconv3, start_depth * 4, (3, 3), activation=activation, kernel_initializer=initializer)\n\n # 32 -> 64\n deconv2 = Conv2DTranspose(uconv3, start_depth * 2, (3, 3), strides=(2, 2), activation=activation, kernel_initializer=initializer)\n uconv2 = concatenate([deconv2, conv2])\n uconv2 = Conv2D_BN(uconv2, start_depth * 2, (3, 3), activation=activation, kernel_initializer=initializer)\n uconv2 = Conv2D_BN(uconv2, start_depth * 2, (3, 3), activation=activation, kernel_initializer=initializer)\n\n # 64 -> 128\n deconv1 = Conv2DTranspose(uconv2, start_depth * 1, (3, 3), strides=(2, 2), activation=activation, kernel_initializer=initializer)\n uconv1 = concatenate([deconv1, conv1])\n uconv1 = Conv2D_BN(uconv1, start_depth * 1, (3, 3), activation=activation, kernel_initializer=initializer)\n uconv1 = Conv2D_BN(uconv1, start_depth * 1, (3, 3), activation=activation, kernel_initializer=initializer)\n\n output_layer = Conv2D(1, (1,1), padding=\"same\", activation=\"sigmoid\")(uconv1)\n\n return output_layer", "def vanilla_unet(input_shape=(512, 512, 3), base_depth=32, drop_rate=0,\n seed=1337):\n input = Input(input_shape)\n\n conv1 = Conv2D(base_depth, 3, activation='relu', padding='same')(input)\n bn1 = BatchNormalization()(conv1)\n drop1 = Dropout(drop_rate, seed=seed)(bn1)\n conv2 = Conv2D(base_depth, 3, activation='relu', padding='same')(drop1)\n bn2 = BatchNormalization()(conv2)\n mp1 = MaxPooling2D(pool_size=(2, 2))(bn2)\n\n conv3 = Conv2D(base_depth*2, 3, activation='relu', padding='same')(mp1)\n bn3 = BatchNormalization()(conv3)\n drop2 = Dropout(drop_rate, seed=seed+1)(bn3)\n conv4 = Conv2D(base_depth*2, 3, activation='relu', padding='same')(drop2)\n bn4 = BatchNormalization()(conv4)\n mp2 = MaxPooling2D(pool_size=(2, 2))(bn4)\n\n conv5 = Conv2D(base_depth*4, 3, activation='relu', padding='same')(mp2)\n bn5 = BatchNormalization()(conv5)\n drop3 = Dropout(drop_rate, seed=seed+2)(bn5)\n conv6 = Conv2D(base_depth*4, 3, activation='relu', padding='same')(drop3)\n bn6 = BatchNormalization()(conv6)\n mp3 = MaxPooling2D(pool_size=(2, 2))(bn6)\n\n conv7 = Conv2D(base_depth*8, 3, activation='relu', padding='same')(mp3)\n bn7 = BatchNormalization()(conv7)\n drop4 = Dropout(drop_rate, seed=seed+3)(bn7)\n conv8 = Conv2D(base_depth*8, 3, activation='relu', padding='same')(drop4)\n bn8 = BatchNormalization()(conv8)\n mp4 = MaxPooling2D(pool_size=(2, 2))(bn8)\n\n conv9 = Conv2D(base_depth*16, 3, activation='relu', padding='same')(mp4)\n bn9 = BatchNormalization()(conv9)\n drop5 = Dropout(drop_rate, seed=seed+4)(bn9)\n deconv0 = Conv2DTranspose(base_depth*16, 3, activation='relu',\n padding='same')(drop5)\n bn10 = BatchNormalization()(deconv0)\n up1 = UpSampling2D(interpolation='bilinear')(bn10)\n\n deconv1 = Conv2DTranspose(base_depth*8, 3, activation='relu',\n padding='same')(up1)\n bn11 = BatchNormalization()(deconv1)\n cat1 = concatenate([bn11, bn8])\n drop6 = Dropout(drop_rate, seed=seed+5)(cat1)\n deconv2 = Conv2DTranspose(base_depth*8, 3, activation='relu',\n padding='same')(drop6)\n bn12 = BatchNormalization()(deconv2)\n up2 = UpSampling2D(interpolation='bilinear')(bn12)\n\n deconv3 = Conv2DTranspose(base_depth*4, 3, activation='relu',\n padding='same')(up2)\n bn13 = BatchNormalization()(deconv3)\n cat2 = concatenate([bn13, bn6])\n drop7 = Dropout(drop_rate, seed=seed+6)(cat2)\n deconv4 = Conv2DTranspose(base_depth*4, 3, activation='relu',\n padding='same')(drop7)\n bn14 = BatchNormalization()(deconv4)\n up3 = UpSampling2D(interpolation='bilinear')(bn14)\n\n deconv5 = Conv2DTranspose(base_depth*2, 3, activation='relu',\n padding='same')(up3)\n bn15 = BatchNormalization()(deconv5)\n cat3 = concatenate([bn15, bn4])\n drop8 = Dropout(drop_rate, seed=seed+7)(cat3)\n deconv6 = Conv2DTranspose(base_depth*2, 3, activation='relu',\n padding='same')(drop8)\n bn16 = BatchNormalization()(deconv6)\n up4 = UpSampling2D(interpolation='bilinear')(bn16)\n\n deconv7 = Conv2DTranspose(base_depth, 3, activation='relu',\n padding='same')(up4)\n bn17 = BatchNormalization()(deconv7)\n cat4 = concatenate([bn17, bn2])\n drop7 = Dropout(drop_rate, seed=seed+8)(cat4)\n deconv8 = Conv2DTranspose(base_depth, 3, activation='relu',\n padding='same')(drop7)\n bn18 = BatchNormalization()(deconv8)\n\n out = Conv2DTranspose(1, 1, activation='sigmoid', padding='same')(bn18)\n\n return Model(input, out)", "def resnet50_base(freeze_blocks=[1,2,3], weight_regularizer=None, bias_regularizer=None):\n img_input = Input(shape=(None, None, 3))\n bn_axis = 3\n train1 = 1 not in freeze_blocks\n x = Conv2D(64, (7, 7), strides=(2, 2), padding='same', name='conv1', trainable=train1,\n kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)(img_input)\n x = BatchNormalization(axis=bn_axis, name='bn_conv1', trainable=False)(x, training=False)\n x = Activation('relu')(x)\n x = MaxPooling2D((3, 3), strides=(2, 2))(x)\n\n train2 = 2 not in freeze_blocks\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), trainable=train2,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', trainable=train2,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', trainable=train2,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n\n train3 = 3 not in freeze_blocks\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n\n train4 = 4 not in freeze_blocks\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n\n base_model = Model(img_input, x, name='resnet50')\n\n return base_model", "def __init__(self, args):\n \n super(MicroNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 1, kernel_size=1)\n self.conv2 = nn.Conv2d(1, 29, kernel_size=5)\n self.maxpool2 = nn.MaxPool2d(3, stride=2 , ceil_mode=True)\n self.conv3 = nn.Conv2d(29, 59, kernel_size=3)\n self.maxpool3 = nn.MaxPool2d(3, stride=2 , ceil_mode=True)\n self.conv4 = nn.Conv2d(59, 74, kernel_size=3)\n self.maxpool4 = nn.MaxPool2d(3, stride=2 , ceil_mode=True)\n self.conv2_drop = nn.Dropout2d()\n self.conv3_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(1184, 300)\n self.fc2 = nn.Linear(300, args.num_classes)\n self.conv0_bn = nn.BatchNorm2d(3)\n self.conv1_bn = nn.BatchNorm2d(1)\n self.conv2_bn = nn.BatchNorm2d(29)\n self.conv3_bn = nn.BatchNorm2d(59)\n self.conv4_bn = nn.BatchNorm2d(74)\n self.dense1_bn = nn.BatchNorm1d(300)", "def run_code_for_training_with_CrossEntropy_and_BCE_Losses(self, net):\n filename_for_out1 = \"performance_numbers_\" + str(self.dl_studio.epochs) + \"label.txt\"\n filename_for_out2 = \"performance_numbers_\" + str(self.dl_studio.epochs) + \"regres.txt\"\n FILE1 = open(filename_for_out1, 'w')\n FILE2 = open(filename_for_out2, 'w')\n net = copy.deepcopy(net)\n net = net.to(self.dl_studio.device)\n criterion1 = nn.CrossEntropyLoss()\n# criterion2 = self.dl_studio.DetectAndLocalize.IOULoss(self.dl_studio.batch_size)\n criterion2 = nn.BCELoss()\n optimizer = optim.SGD(net.parameters(), \n lr=self.dl_studio.learning_rate, momentum=self.dl_studio.momentum)\n for epoch in range(self.dl_studio.epochs): \n running_loss_labeling = 0.0\n running_loss_regression = 0.0 \n for i, data in enumerate(self.train_dataloader):\n gt_too_small = False\n inputs, bbox_gt, labels = data['image'], data['bbox'], data['label']\n if self.dl_studio.debug_train and i % 1000 == 999:\n print(\"\\n\\n[iter=%d:] Ground Truth: \" % (i+1) + \n ' '.join('%5s' % self.dataserver_train.class_labels[labels[j].item()] for j in range(self.dl_studio.batch_size)))\n inputs = inputs.to(self.dl_studio.device)\n labels = labels.to(self.dl_studio.device)\n bbox_gt = bbox_gt.to(self.dl_studio.device)\n optimizer.zero_grad()\n outputs = net(inputs)\n outputs_label = outputs[0]\n bbox_pred = outputs[1]\n if self.dl_studio.debug_train and i % 500 == 499:\n inputs_copy = inputs.detach().clone()\n inputs_copy = inputs_copy.cpu()\n bbox_pc = bbox_pred.detach().clone()\n bbox_pc[bbox_pc<0] = 0\n bbox_pc[bbox_pc>31] = 31\n _, predicted = torch.max(outputs_label.data, 1)\n print(\"[iter=%d:] Predicted Labels: \" % (i+1) + \n ' '.join('%10s' % self.dataserver_train.class_labels[predicted[j].item()] \n for j in range(self.dl_studio.batch_size)))\n for idx in range(self.dl_studio.batch_size):\n i1 = int(bbox_gt[idx][1])\n i2 = int(bbox_gt[idx][3])\n j1 = int(bbox_gt[idx][0])\n j2 = int(bbox_gt[idx][2])\n k1 = int(bbox_pc[idx][1])\n k2 = int(bbox_pc[idx][3])\n l1 = int(bbox_pc[idx][0])\n l2 = int(bbox_pc[idx][2])\n print(\" gt_bb: [%d,%d,%d,%d]\"%(j1,i1,j2,i2))\n print(\" pred_bb: [%d,%d,%d,%d]\"%(l1,k1,l2,k2))\n inputs_copy[idx,0,i1:i2,j1] = 255\n inputs_copy[idx,0,i1:i2,j2] = 255\n inputs_copy[idx,0,i1,j1:j2] = 255\n inputs_copy[idx,0,i2,j1:j2] = 255\n inputs_copy[idx,2,k1:k2,l1] = 255 \n inputs_copy[idx,2,k1:k2,l2] = 255\n inputs_copy[idx,2,k1,l1:l2] = 255\n inputs_copy[idx,2,k2,l1:l2] = 255\n self.dl_studio.display_tensor_as_image(\n torchvision.utils.make_grid(inputs_copy, normalize=True),\n \"see terminal for TRAINING results at iter=%d\" % (i+1))\n mask_regress = torch.zeros(self.dl_studio.batch_size,32,32,requires_grad=False)\n mask_gt = torch.zeros(self.dl_studio.batch_size, 32,32)\n for k,out_regres in enumerate(bbox_pred):\n x1,y1,x2,y2 = bbox_pred[k].tolist()\n x1_gt,y1_gt,x2_gt,y2_gt = bbox_gt[k].tolist()\n x1,y1,x2,y2 = [int(item) if item >0 else 0 for item in (x1,y1,x2,y2)]\n x1_gt,y1_gt,x2_gt,y2_gt = [int(item) if item>0 else 0 for item in (x1_gt,y1_gt,x2_gt,y2_gt)]\n if abs(x1_gt - x2_gt)<5 or abs(y1_gt-y2_gt) < 5: gt_too_small = True\n mask_regress_np = np.zeros((32,32), dtype=bool)\n mask_gt_np = np.zeros((32,32), dtype=bool)\n mask_regress_np[y1:y2,x1:x2] = 1\n mask_gt_np[y1_gt:y2_gt, x1_gt:x2_gt] = 1\n mask_regress[k,:,:] = torch.from_numpy(mask_regress_np)\n mask_regress.reqiures_grad=True\n mask_gt[k,:,:] = torch.from_numpy(mask_gt_np)\n mask_gt.reqiures_grad=True \n loss_labeling = criterion1(outputs_label, labels)\n loss_labeling.backward(retain_graph=True) \n loss_regression = criterion2(mask_regress, mask_gt)\n loss_regression.requires_grad = True\n loss_regression.backward()\n optimizer.step()\n running_loss_labeling += loss_labeling.item() \n running_loss_regression += loss_regression.item() \n if i % 1000 == 999: \n avg_loss_labeling = running_loss_labeling / float(1000)\n avg_loss_regression = running_loss_regression / float(1000)\n print(\"[epoch:%d, batch:%5d] loss_labeling: %.3f loss_regression: %.3f \" % (epoch + 1, i + 1, avg_loss_labeling, avg_loss_regression))\n FILE1.write(\"%.3f\\n\" % avg_loss_labeling)\n FILE1.flush()\n FILE2.write(\"%.3f\\n\" % avg_loss_regression)\n FILE2.flush()\n running_loss_labeling = 0.0\n running_loss_regression = 0.0\n print(\"\\nFinished Training\\n\")\n self.save_model(net)", "def __init__(self, input_dim=(3, 32, 32), hidden_dims_CNN = ((32, 5, 1, 1), (2, 2, 2)),\n hidden_dims_FC = ((1024), (0.5)), num_classes=10, weight_scale=1e-3, \n reg=0.0, dtype=np.float32):\n self.params = {}\n self.fix_params = {}\n self.reg = reg\n self.dtype = dtype\n \n C_input, H_input, W_input = input_dim\n pre_C = C_input \n pre_H = H_input\n pre_W = W_input\n \n num_CNN = len(hidden_dims_CNN)\n num_FC = len(hidden_dims_FC)\n\n for i in range(0, num_CNN):\n W_name = \"W\" + str(i)\n b_name = \"b\" + str(i)\n conv_param_name = \"conv_param\" + str(i)\n gamma_name = \"gamma\" + str(i)\n beta_name = \"beta\" + str(i)\n bn_param_name = \"bn_param\" + str(i)\n pool_param_name = \"pool_param\" + str(i)\n\n if num_CNN == 1:\n num_filters, filter_size, stride, pad = hidden_dims_CNN[0] # (F, filter_size, stride, pad)\n pool_stride, pool_height, pool_width = hidden_dims_CNN[1] # (pooling_stride, pooling_size)\n else:\n num_filters, filter_size, stride, pad = hidden_dims_CNN[i][0] # (F, filter_size, stride, pad)\n pool_stride, pool_height, pool_width = hidden_dims_CNN[i][1] # (pooling_stride, pooling_size)\n \n if weight_scale == -1:\n self.params[W_name] = np.random.randn(num_filters, pre_C, filter_size, filter_size) / np.sqrt(filter_size * filter_size * pre_C)\n else: \n self.params[W_name] = np.random.randn(num_filters, pre_C, filter_size, filter_size) * weight_scale\n self.params[b_name] = np.zeros(num_filters)\n self.fix_params[conv_param_name] = {'stride': stride, 'pad': pad}\n \n self.params[gamma_name] = np.random.randn(num_filters)\n self.params[beta_name] = np.random.randn(num_filters)\n self.fix_params[bn_param_name] = {'mode': 'train'}\n\n self.fix_params[pool_param_name] = {'pool_height': pool_height, 'pool_width': pool_width, 'stride': pool_stride}\n \n pre_H, pre_W = cnn_out_shape(pre_H, pre_W, filter_size, filter_size, stride, pad)\n pre_C = num_filters \n pre_H, pre_W = pool_out_shape(pre_H, pre_W, pool_height, pool_width, pool_stride)\n\n pre_fc_dim = pre_H * pre_W * pre_C\n\n for i in range(0, num_FC):\n W_name = \"W\" + str(i + num_CNN)\n b_name = \"b\" + str(i + num_CNN)\n gamma_name = \"gamma\" + str(i + num_CNN)\n beta_name = \"beta\" + str(i + num_CNN)\n bn_param_name = \"bn_param\" + str(i + num_CNN)\n drop_name = \"drop_ratio\" + str(i + num_CNN)\n \n if num_FC == 1 :\n fc_num = hidden_dims_FC[0]\n drop_ratio = hidden_dims_FC[1]\n else:\n fc_num = hidden_dims_FC[i][0]\n drop_ratio = hidden_dims_FC[i][1]\n\n if weight_scale == -1:\n self.params[W_name] = np.random.randn(pre_fc_dim, fc_num) / np.sqrt(pre_fc_dim)\n else:\n self.params[W_name] = np.random.randn(pre_fc_dim, fc_num) * weight_scale\n self.params[b_name] = np.zeros(fc_num)\n\n self.params[gamma_name] = np.random.randn(fc_num)\n self.params[beta_name] = np.random.randn(fc_num)\n self.fix_params[bn_param_name] = {'mode': 'train'}\n\n self.fix_params[drop_name] = {'mode': 'train', 'p': drop_ratio}\n\n pre_fc_dim = fc_num\n\n total_layer = num_CNN + num_FC\n W_name = \"W\" + str(total_layer)\n b_name = \"b\" + str(total_layer)\n if weight_scale == -1:\n self.params[W_name] = np.random.randn(pre_fc_dim, num_classes) / np.sqrt(pre_fc_dim)\n else:\n self.params[W_name] = np.random.randn(pre_fc_dim, num_classes) * weight_scale\n self.params[b_name] = np.zeros(num_classes)\n\n\n self.num_CNN = num_CNN\n self.num_FC = num_FC\n self.total_layer = num_CNN + num_FC\n\n for k, v in self.params.iteritems():\n self.params[k] = v.astype(dtype)", "def __init__(self):\n # def __init__(self, embed_size, input_channels):\n super(SignificanceCNN, self).__init__()\n # self.conv0 = nn.Conv2d(1, 1, (1, 3), padding=(0,0))\n self.conv0 = nn.Conv3d(1, 64, (1, 3, 3), padding=(0, 1, 1))\n self.bn0 = nn.BatchNorm3d(64)\n self.lrelu = nn.LeakyReLU()\n\n self.conv1 = nn.Conv3d(64, 128, (1, 3, 3), padding=(0, 1, 1))\n self.bn1 = nn.BatchNorm3d(128)\n \n self.conv2 = nn.Conv3d(128, 128, (1, 3, 3), padding=(0, 1, 1))\n self.bn2 = nn.BatchNorm3d(128)\n\n self.conv3 = nn.Conv3d(128, 64, (1, 3, 3), padding=(0, 1, 1))\n self.bn3 = nn.BatchNorm3d(64)\n\n self.conv4 = nn.Conv3d(64, 1, (1, 3, 3), padding=(0, 1, 1))\n self.bn4 = nn.BatchNorm3d(1)\n\n self.conv5 = nn.Conv3d(1, 1, (1, 3, 3), padding=(0, 1, 1))\n self.bn5 = nn.BatchNorm3d(1)", "def ResNet18(input_shape = (28, 28, 1), classes = 24):\n \n # Define the input as a tensor with shape input_shape\n X = X_input = Input(input_shape)\n\n \n # Zero-Padding\n X = ZeroPadding2D((3, 3))(X_input)\n \n # Stage 1\n X = Conv2D(64, (7, 7), strides = (2, 2), name = 'conv1', kernel_initializer = glorot_uniform(seed=0))(X)\n X = BatchNormalization(axis = 3, name = 'bn_conv1')(X)\n X = Activation('relu')(X)\n #X = MaxPooling2D((3, 3), strides=(2, 2))(X)\n\n # Stage 2\n X = convolutional_block(X, [64, 64], stage=2, block='a')\n X = identity_block(X, [64, 64], stage=2, block='b')\n\n # Stage 3\n X = convolutional_block(X, [128, 128], stage=3, block='a')\n X = identity_block(X, [128, 128], stage=3, block='b')\n\n # Stage 4\n X = convolutional_block(X, [256, 256], stage=4, block='a')\n X = identity_block(X, [256, 256], stage=4, block='b')\n\n # Stage 5\n X = convolutional_block(X, [512, 512], stage=5, block='a')\n X = identity_block(X, [512, 512], stage=5, block='b')\n\n # AVGPOOL\n # X = AveragePooling2D(pool_size=(2,2), name='avg_pool')(X)\n\n # output layer\n X = Flatten()(X)\n X = Dense(classes, activation='softmax', name='fc' + str(classes), kernel_initializer = glorot_uniform(seed=0))(X)\n \n # Create model\n model = Model(inputs = X_input, outputs = X, name='ResNet18')\n\n return model", "def __init__(self, dim_input=1, dim_output=1, test_num_updates=5):\n self.dim_input = dim_input\n self.dim_output = dim_output\n self.update_lr = FLAGS.update_lr\n self.meta_lr = tf.placeholder_with_default(FLAGS.meta_lr, ())\n self.auto_lr = tf.placeholder_with_default(FLAGS.auto_lr, ())\n \n self.classification = False\n self.test_num_updates = test_num_updates\n self.dim_auto = 2 #This should be able to be arbitrary\n if auto:\n self.real_input = 39 # This is square root of the total (its a kernel)\n #self.real_output = 40#self.dim_output\n self.real_output = 39*39 # This should be the complete dimension out. \n self.dim_input = 3*self.dim_auto #= 3*self.dim_auto \n self.dim_output = self.dim_auto\n #This is from each. \n #if auto: self.dim_input, self.dim_output = self.dim_auto, self.dim_auto #If auto, pass in/out the dimension of the latent (auto_\n if FLAGS.datasource == 'sinusoid':\n self.dim_hidden = [40, 40,40]\n self.loss_func = mse\n self.forward = self.forward_fc\n self.construct_weights = self.construct_fc_weights\n elif FLAGS.datasource == 'omniglot' or FLAGS.datasource == 'miniimagenet':\n self.loss_func = xent\n self.classification = True\n if FLAGS.conv:\n self.dim_hidden = FLAGS.num_filters\n self.forward = self.forward_conv\n self.construct_weights = self.construct_conv_weights\n else:\n self.dim_hidden = [256, 128, 64, 64]\n self.forward=self.forward_fc\n self.construct_weights = self.construct_fc_weights\n if FLAGS.datasource == 'miniimagenet':\n self.channels = 3\n else:\n self.channels = 1\n self.img_size = int(np.sqrt(self.dim_input/self.channels))\n else:\n raise ValueError('Unrecognized data source.')", "def test_init(self, epochs):\n i = -1\n for p in self.P:\n for subband in self.SUBBANDS:\n i += 1\n\n # --- load model ----\n pref = self.model_dir + \"/\" + self.name % (subband, p)\n model = copy.deepcopy(self.model)\n model.model.load_weights(pref + \"_epochs_%d\" % epochs[i])\n self.NET.append(model)\n # --- end load model ----\n\n # --- load permutation ----\n self.permutation.append(\n np.load(self.model_dir + \"/permutation_\" + self.name %\n (subband, p) + \".npy\"))\n # --- end load permutation ----", "def __init__(self, n_input_channels=3, n_conv_output_channels=16, k=3, s=1, pad=1, p = 0.5):\n super(ModelCNN, self).__init__()\n # 1. Convolutional layers\n # Single image is in shape: 3x96x96 (CxHxW, H==W), RGB images\n self.conv1 = nn.Conv2d(in_channels = n_input_channels, out_channels = n_conv_output_channels, kernel_size = k, stride = s, padding = pad)\n self.bn1 = nn.BatchNorm2d(n_conv_output_channels)\n self.conv2 = nn.Conv2d(in_channels = n_conv_output_channels, out_channels = 2*n_conv_output_channels, kernel_size = k, stride = s, padding = pad)\n self.bn2 = nn.BatchNorm2d(2*n_conv_output_channels)\n self.conv3 = nn.Conv2d(in_channels = 2*n_conv_output_channels, out_channels = 4*n_conv_output_channels, kernel_size = k, stride = s, padding = pad)\n self.bn3 = nn.BatchNorm2d(4*n_conv_output_channels)\n self.conv4 = nn.Conv2d(in_channels = 4*n_conv_output_channels, out_channels = 8*n_conv_output_channels, kernel_size = k, stride = s, padding = pad)\n self.bn4 = nn.BatchNorm2d(8*n_conv_output_channels)\n self.pool = nn.MaxPool2d(kernel_size = k - 1, stride = 2*s, padding = pad - pad)\n \n self.dropout = nn.Dropout(p = p)\n \n # 2. FC layers to final output\n self.fc1 = nn.Linear(in_features = 288*n_conv_output_channels, out_features = 32*n_conv_output_channels)\n self.fc_bn1 = nn.BatchNorm1d(32*n_conv_output_channels)\n self.fc2 = nn.Linear(in_features = 32*n_conv_output_channels, out_features = 16*n_conv_output_channels)\n self.fc_bn2 = nn.BatchNorm1d(16*n_conv_output_channels)\n self.fc3 = nn.Linear(in_features = 16*n_conv_output_channels, out_features = 8*n_conv_output_channels)\n self.fc_bn3 = nn.BatchNorm1d(8*n_conv_output_channels)\n self.fc4 = nn.Linear(in_features = 8*n_conv_output_channels, out_features = 1)", "def __init__(self):\n super(CNN, self).__init__()\n\n self.conv0 = nn.Conv2d(3, 3, kernel_size=5, stride=1, padding=0)\n nn.init.xavier_uniform_(self.conv0.weight)\n\n self.conv1 = nn.Conv2d(3, 30, kernel_size=5, stride=2, padding=0)\n self.conv1.weight = nn.Parameter(get_filters())\n\n self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)\n\n self.conv2 = nn.Conv2d(30, 16, kernel_size=3, stride=1, padding=0)\n nn.init.xavier_uniform_(self.conv2.weight)\n\n self.conv3 = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=0)\n nn.init.xavier_uniform_(self.conv3.weight)\n\n self.conv4 = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=0)\n nn.init.xavier_uniform_(self.conv4.weight)\n\n self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)\n\n self.conv5 = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=0)\n nn.init.xavier_uniform_(self.conv5.weight)\n\n self.conv6 = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=0)\n nn.init.xavier_uniform_(self.conv6.weight)\n\n self.conv7 = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=0)\n nn.init.xavier_uniform_(self.conv7.weight)\n\n self.conv8 = nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=0)\n nn.init.xavier_uniform_(self.conv8.weight)\n\n self.fc = nn.Linear(16 * 5 * 5, 2)\n\n self.drop1 = nn.Dropout(p=0.5) # used only for the NC dataset", "def __init__(self):\n super(SCNN, self).__init__()\n\n # Linear classifier.\n self.inplanes = 128\n self._norm_layer = nn.BatchNorm2d\n self.dilation = 1\n self.groups = 1\n self.base_width = 64\n\n self.num_class = 125\n backbone = torchvision.models.resnet34(pretrained=True)\n self.shared_features = nn.Sequential(*list(backbone.children())[0:6])\n #self.realistic_head = nn.Sequential(*list(backbone.children())[6:8])\n # self.synthetic_head = nn.Sequential(nn.Conv2d(128, 128, 3, 2, 1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),\n # nn.Conv2d(128, 128, 3, 1, 1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),\n # nn.Conv2d(128, 256, 3, 2, 1), nn.BatchNorm2d(256), nn.ReLU(inplace=True),\n # nn.Conv2d(256, 256, 3, 1, 1), nn.BatchNorm2d(256), nn.ReLU(inplace=True))\n\n self.synthetic_head1 = self._make_layer(BasicBlock, 128, 1, stride=2, dilate=False)\n self.synthetic_head2 = self._make_layer(BasicBlock, 256, 1, stride=2, dilate=False)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.classifier = nn.Linear(256, self.num_class)\n\n for m in self.synthetic_head1.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n for m in self.synthetic_head2.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n weight_init(self.classifier)\n\n for param in self.shared_features.parameters():\n param.requires_grad = False", "def resnet101_base(freeze_blocks=[1,2,3], weight_regularizer=None, bias_regularizer=None):\n img_input = Input(shape=(None, None, 3))\n bn_axis = 3\n train1 = 1 not in freeze_blocks\n x = Conv2D(64, (7, 7), strides=(2, 2), padding='same', name='conv1', trainable=train1, use_bias=False,\n kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)(img_input)\n x = BatchNormalization(axis=bn_axis, name='bn_conv1', trainable=False)(x, training=False)\n x = Scale(axis=bn_axis, name='scale_conv1', trainable=False)(x)\n x = Activation('relu')(x)\n x = MaxPooling2D((3, 3), strides=(2, 2))(x)\n\n train2 = 2 not in freeze_blocks\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), trainable=train2,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n use_conv_bias=False, separate_scale=True)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', trainable=train2,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n use_conv_bias=False, separate_scale=True)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', trainable=train2,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n use_conv_bias=False, separate_scale=True)\n\n train3 = 3 not in freeze_blocks\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n use_conv_bias=False, separate_scale=True)\n for i in range(1, 4):\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b' + str(i), trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n use_conv_bias=False, separate_scale=True)\n\n train4 = 4 not in freeze_blocks\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n use_conv_bias=False, separate_scale=True)\n for i in range(1, 23):\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b' + str(i), trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer,\n use_conv_bias=False, separate_scale=True)\n\n base_model = Model(img_input, x, name='resnet101')\n\n return base_model", "def ResUNetPlusPlus(input_size: tuple, test_mode=False):\n assert len(input_size) == 3, \"[ERROR]: Expected tuple of length 3 got {0}\".format(len(input_size))\n\n image_width, image_height, n_channels = input_size\n\n inp = tf.keras.layers.Input(shape=(image_width, image_height, n_channels), dtype=\"float32\", name=\"input_layer\")\n\n # starting conv\n x = layers.Conv2DBN(64, 3, padding=\"same\", activation=\"relu\", name=\"conv_start\")(inp)\n\n # Residual block 1\n x = layers.ResidualBlock(64, 3, activation=\"relu\", name=\"rb_1\")(x)\n skip1 = x\n\n x = tf.keras.layers.Conv2D(128,\n kernel_size=(3, 3),\n strides=(2, 2),\n padding=\"same\",\n activation=\"linear\")(x)\n x = layers.SqueezeExcitationBlock(ratio=1)(x)\n\n # Residual block 2\n x = layers.ResidualBlock(128, 3, activation=\"relu\", name=\"rb_2\")(x)\n skip2 = x\n\n x = tf.keras.layers.Conv2D(256,\n kernel_size=(3, 3),\n strides=(2, 2),\n padding=\"same\",\n activation=\"linear\")(x)\n x = layers.SqueezeExcitationBlock(ratio=2)(x)\n\n # Residual block 3\n x = layers.ResidualBlock(256, 3, activation=\"relu\", name=\"rb_3\")(x)\n skip3 = x\n\n x = tf.keras.layers.Conv2D(512,\n kernel_size=(3, 3),\n strides=(2, 2),\n padding=\"same\",\n activation=\"linear\")(x)\n x = layers.SqueezeExcitationBlock(ratio=4)(x)\n\n # Residual block 4\n x = layers.ResidualBlock(512, 3, activation=\"relu\", name=\"rb_4\")(x)\n skip4 = x\n\n x = tf.keras.layers.Conv2D(1024,\n kernel_size=(3, 3),\n strides=(2, 2),\n padding=\"same\",\n activation=\"linear\")(x)\n x = layers.SqueezeExcitationBlock(ratio=8)(x)\n\n # Bottleneck ASPP\n x = layers.ASPP(256, [4, 8, 12], (256, 256), 16, activation=\"relu\", name=\"aspp_bottleneck\")(x)\n x = layers.Conv2DBN(1024, 1, activation=\"relu\")(x)\n\n # Up-sample L4\n x = layers.GlobalAttentionUpsample(name=\"GAU_4\")([skip4, x])\n x = tf.keras.layers.Concatenate(axis=-1)([x, skip4])\n x = tf.keras.layers.Conv2D(512,\n kernel_size=(3, 3),\n padding=\"same\",\n activation=\"linear\")(x)\n x = layers.ResidualBlock(512, 3, activation=\"relu\", activate_begin=True, name=\"u_rb_4\")(x)\n\n # Up-sample L3\n x = layers.GlobalAttentionUpsample(name=\"GAU_3\")([skip3, x])\n x = tf.keras.layers.Concatenate(axis=-1)([x, skip3])\n x = tf.keras.layers.Conv2D(256,\n kernel_size=(3, 3),\n padding=\"same\",\n activation=\"linear\")(x)\n x = layers.ResidualBlock(256, 3, activation=\"relu\", activate_begin=True, name=\"u_rb_3\")(x)\n\n # Up-sample L2\n x = layers.GlobalAttentionUpsample(name=\"GAU_2\")([skip2, x])\n x = tf.keras.layers.Concatenate(axis=-1)([x, skip2])\n x = tf.keras.layers.Conv2D(128,\n kernel_size=(3, 3),\n padding=\"same\",\n activation=\"linear\")(x)\n x = layers.ResidualBlock(128, 3, activation=\"relu\", activate_begin=True, name=\"u_rb_2\")(x)\n\n # Up-sample L1\n x = layers.GlobalAttentionUpsample(name=\"GAU_1\")([skip1, x])\n x = tf.keras.layers.Concatenate(axis=-1)([x, skip1])\n x = tf.keras.layers.Conv2D(64,\n kernel_size=(3, 3),\n padding=\"same\",\n activation=\"linear\")(x)\n x = layers.ResidualBlock(64, 3, activation=\"relu\", activate_begin=True, name=\"u_rb_1\")(x)\n x = layers.Conv2DBN(1, 1, activation=\"sigmoid\")(x)\n\n return tf.keras.Model(inputs=[inp], outputs=[x])", "def __init__(self, conv_features_sizes, linear_layer_sizes, connector_shape):\n super().__init__()\n \n self.conv = nn.Sequential()\n self.mlp = nn.Sequential()\n self.flat = nn.Flatten()\n\n self.conv.add_module(name=f\"e-fconv{0}\", module=_conv2d_block(1, conv_features_sizes[0], kernel_size=3, padding=1))\n self.conv.add_module(name=f\"e-max{0}\", module=nn.MaxPool2d(2, 2))\n for i, (in_size, out_size) in enumerate(zip(conv_features_sizes[:-1], conv_features_sizes[1:]), 1):\n self.conv.add_module(name=f\"e-fconv{i}\", module=_conv2d_block(in_size, out_size, kernel_size=3, padding=1))\n self.conv.add_module(name=f\"e-max{i}\", module=nn.MaxPool2d(2, 2))\n\n mlp_input_shape = int(reduce((lambda x,y: x * y), connector_shape))\n self.mlp.add_module(name=f\"e-linear{0}\", module=nn.Linear(mlp_input_shape, linear_layer_sizes[0]))\n self.mlp.add_module(name=f\"e-batchnorm{0}\", module=nn.BatchNorm1d(linear_layer_sizes[0]))\n self.mlp.add_module(name=f\"e-relu{0}\", module=nn.ReLU())\n for i, (in_size, out_size) in enumerate(zip(linear_layer_sizes[:-1], linear_layer_sizes[1:]), 1):\n self.mlp.add_module(name=f\"e-linear{i}\", module=nn.Linear(in_size, out_size))\n self.mlp.add_module(name=f\"e-batchnorm{i}\", module=nn.BatchNorm1d(out_size))\n self.mlp.add_module(name=f\"e-relu{i}\", module=nn.ReLU())", "def __init__(self, options):\n print('Prepare the network and data.')\n self._options = options\n # Network.\n self._net = torch.nn.DataParallel(BCNN()).cuda()\n # Load the model from disk.\n #self._net.load_state_dict(torch.load(self._path['model']))\n print(self._net)\n # Criterion.\n self._criterion = torch.nn.CrossEntropyLoss().cuda()\n # Solver.\n self._solver = torch.optim.SGD(\n self._net.parameters(), lr=self._options['base_lr'],\n momentum=0.9, weight_decay=self._options['weight_decay'])\n self._scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n self._solver, mode='max', factor=0.1, patience=3, verbose=True,\n threshold=1e-4)\n\n self._train_path = os.path.join(self._options['text_path'],'train.txt')\n self._test_path = os.path.join(self._options['text_path'],'test.txt')\n\n #Dataloader\n transform = T.Compose([\n T.Resize(448), \n T.CenterCrop(448), \n T.ToTensor(), \n T.Normalize(mean=(0.485, 0.456, 0.406),\n std=(0.229, 0.224, 0.225)) \n ])\n\n train_data = Data( train_path = self._train_path, aug_path = options['aug_data'], img_transform=transform)\n\n\n\n test_data = Data( train_path = self._test_path, aug_path = options['aug_data'], img_transform=transform)\n\n\n\n self._train_loader = torch.utils.data.DataLoader(dataset=train_data,\n batch_size=self._options['batch_size'],drop_last=True, pin_memory=True,\n shuffle=True,num_workers=4)\n\n self._test_loader = torch.utils.data.DataLoader(dataset=test_data,\n batch_size=self._options['batch_size'],pin_memory=True,\n shuffle=False,num_workers=4)", "def __init__(self, num_features, lr, train_index2value, test_index2value, target_grp, optimizer, dataset_type, strength):\n super().__init__()\n\n #self.save_hyperparameters()\n\n self.lr = lr\n self.train_index2value = train_index2value\n self.test_index2value = test_index2value\n self.target_grp = target_grp\n self.optimizer = optimizer\n self.dataset_type = dataset_type\n\n if self.dataset_type == 'tabular':\n self.net = nn.Linear(num_features, 1)\n elif self.dataset_type == 'image':\n # construct network\n if strength == 'weak':\n self.cnn = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=2, kernel_size=(3, 3)),\n nn.MaxPool2d(kernel_size=(2, 2)),\n nn.Flatten())\n self.fc = nn.Linear(338 + 1, 1)\n elif strength == 'normal':\n self.cnn = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=32, kernel_size=(3, 3)),\n nn.MaxPool2d(kernel_size=(2, 2)),\n nn.Flatten())\n self.fc = nn.Linear(5408 + 1, 1)\n elif strength == 'strong':\n self.cnn = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=64, kernel_size=(3, 3)),\n nn.MaxPool2d(kernel_size=(2, 2)),\n nn.Flatten())\n self.fc = nn.Linear(10816 + 1, 1)\n else:\n raise Exception(\"Strength of the Adversary CNN not recognized!\")\n else:\n raise Exception(f\"Model was unable to recognize dataset type {self.dataset_type}!\")\n \n # init loss\n self.loss_fct = nn.BCEWithLogitsLoss()", "def main(tetrode_number=TETRODE_NUMBER,num_hidden_units=500,num_hidden_units_2=300,num_hidden_units_3=200,num_code_units=50):\n \n print(\"Making the model...\")\n network = model((None,200),200,num_hidden_units,num_hidden_units_2,num_hidden_units_3,num_code_units)\n print(\"Done!\")\n\n\n for tetrode_number in [10]:\n\n print(\"Loading the model parameters from {}\".format(MODEL_FILENAME+str(tetrode_number)))\n f = open(MODEL_FILENAME+str(tetrode_number),'r')\n all_param_values = pickle.load(f)\n f.close()\n # print(all_param_values)\n lasagne.layers.set_all_param_values(network, all_param_values)\n\n print(\"Loading the data...\")\n dataset = load_data(tetrode_number)\n print(\"Done!\")\n\n print(dataset['data'].shape)\n\n print(\"Setting up the training functions...\")\n training = funcs(dataset,network)\n print(\"Done!\")\n\n for i in range(NUM_EPOCHS):\n costs = []\n\n for start, end in zip(range(0, dataset['data'].shape[0], BATCH_SIZE), range(BATCH_SIZE, dataset['data'].shape[0], BATCH_SIZE)):\n cost = training['train'](dataset['data'][start:end],dataset['data'][start:end])\n costs.append(cost)\n\n meanTrainCost = np.mean(np.asarray(costs,dtype=np.float32))\n # accuracy = training['accuracy'](dataset['X_test'],dataset['y_test'])\n\n print(\"Epoch: {}, Training cost: {}\".format(i+1,meanTrainCost))\n # NUM_POINTS = 5000\n codes = training['code'](dataset['data'][0:NUM_POINTS])\n\n \n\n # y = set(list(d.predict(dataset['data'][0:NUM_POINTS])))\n\n # print(y)\n\n # activations_1 = training['activations_1'](dataset['data'][0:NUM_POINTS])\n # activations_2 = training['activations_2'](dataset['data'][0:NUM_POINTS])\n # codes = training['code'](dataset['data'][0:NUM_POINTS])\n # # print(codes.shape)\n # # codes_2d = bh_sne(codes)\n\n # for k in range(3):\n # print(k)\n\n # codes_2d = bh_sne(np.asarray(codes[:(k+1)*12000],dtype=np.float64))\n\n # # d = DPGMM(n_components=10, covariance_type='full')\n # d = DPGMM(n_components=15,n_iter=100)\n\n # d.fit(codes_2d[:(k+1)*12000])\n\n # hdp = d.predict_proba(codes_2d[:(k+1)*12000])\n\n # hdp_1d = [np.argmax(z) for z in hdp]\n\n # print(set(list(hdp_1d)))\n\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=hdp_1d, alpha=0.8,lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/hdp_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # m = TSNE(n_components=2, random_state=0)\n \n # # codes_2d = m.fit_transform(codes[:NUM_POINTS])\n # # activations_1_2d = bh_sne(activations_1)\n # # activations_2_2d = bh_sne(activations_2)\n\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=dataset['labels'][0:NUM_POINTS][:(k+1)*12000],alpha=0.8,lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/tsne_codes_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # This is where the code for the video will go\n # ##############################################################################\n # # Compute DBSCAN\n # db = None\n # core_samples_mask = None\n # labels = None\n\n # num_labels = 0\n # eps=1.0\n # while(num_labels < 10):\n # db = DBSCAN(eps=eps, min_samples=10).fit(codes_2d)\n # core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n # core_samples_mask[db.core_sample_indices_] = True\n # labels = db.labels_\n # num_labels = np.amax(labels)\n # eps -= 0.1\n\n # print(\"Num learned labels: {}\".format(num_labels))\n\n # plt.title('Estimated number of clusters: {}'.format(np.amax(labels)))\n # plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=labels[0:NUM_POINTS][:(k+1)*12000],lw=0)\n # plt.savefig('dbscan_labels/deep/sparse/dbscan_codes_{}_{}.png'.format(tetrode_number,k), bbox_inches='tight')\n # plt.close()\n\n # # f=open('dbscan_labels/deep/sparse/tetrode_{}.npy'.format(tetrode_number),'w')\n # # pickle.dump(labels, f)\n # # f.close()\n\n codes_2d = bh_sne(np.asarray(codes,dtype=np.float64),theta=0.4)\n\n # d = DPGMM(n_components=10, covariance_type='full')\n d = DPGMM(n_components=15,n_iter=1000)\n\n d.fit(codes_2d)\n\n hdp = d.predict_proba(codes_2d)\n\n hdp_1d = [np.argmax(z) for z in hdp]\n\n print(set(list(hdp_1d)))\n\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=hdp_1d, alpha=0.8,lw=0)\n plt.savefig('dbscan_labels/deep/sparse/hdp_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # m = TSNE(n_components=2, random_state=0)\n \n # codes_2d = m.fit_transform(codes[:NUM_POINTS])\n # activations_1_2d = bh_sne(activations_1)\n # activations_2_2d = bh_sne(activations_2)\n\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=dataset['labels'][0:NUM_POINTS],alpha=0.8,lw=0)\n plt.savefig('dbscan_labels/deep/sparse/tsne_codes_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # This is where the code for the video will go\n ##############################################################################\n # Compute DBSCAN\n db = None\n core_samples_mask = None\n labels = None\n\n num_labels = 0\n eps=1.0\n while(num_labels < 10):\n db = DBSCAN(eps=eps, min_samples=10).fit(codes_2d)\n core_samples_mask = np.zeros_like(db.labels_, dtype=bool)\n core_samples_mask[db.core_sample_indices_] = True\n labels = db.labels_\n num_labels = np.amax(labels)\n eps -= 0.1\n\n print(\"Num learned labels: {}\".format(num_labels))\n\n plt.title('Estimated number of clusters: {}'.format(np.amax(labels)))\n plt.scatter(codes_2d[:, 0], codes_2d[:, 1], c=labels[0:NUM_POINTS],lw=0)\n plt.savefig('dbscan_labels/deep/sparse/dbscan_codes_{}.png'.format(tetrode_number), bbox_inches='tight')\n plt.close()\n\n # f=open('dbscan_labels/deep/sparse/tetrode_{}.npy'.format(tetrode_number),'w')\n # pickle.dump(labels, f)\n # f.close()", "def init(self):\n self.reparam_layers = []\n if self.model_type == \"GCN\":\n for i in range(self.num_layers):\n if self.reparam_all_layers is True:\n is_reparam = True\n elif isinstance(self.reparam_all_layers, tuple):\n reparam_all_layers = tuple([kk + self.num_layers if kk < 0 else kk for kk in self.reparam_all_layers])\n is_reparam = i in reparam_all_layers\n else:\n raise\n if is_reparam:\n self.reparam_layers.append(i)\n setattr(self, \"conv{}\".format(i + 1),\n GCNConv(self.num_features if i == 0 else self.latent_size,\n self.latent_size if i != self.num_layers - 1 else self.num_classes,\n cached=True,\n reparam_mode=self.reparam_mode if is_reparam else None,\n prior_mode=self.prior_mode if is_reparam else None,\n sample_size=self.sample_size,\n bias=True if self.with_relu else False,\n val_use_mean=self.val_use_mean,\n normalize=self.normalize,\n ))\n # self.conv1 = ChebConv(self.num_features, 16, K=2)\n # self.conv2 = ChebConv(16, self.num_features, K=2)\n\n elif self.model_type == \"GAT\":\n latent_size = int(self.latent_size / 2) # Under the default setting, latent_size = 8\n for i in range(self.num_layers):\n if i == 0:\n input_size = self.num_features\n else:\n if self.struct_dropout_mode[0] == 'DNsampling' or (self.struct_dropout_mode[0] == 'standard' and len(self.struct_dropout_mode) == 3):\n input_size = latent_size * 8 * 2\n else:\n input_size = latent_size * 8\n if self.reparam_all_layers is True:\n is_reparam = True\n elif isinstance(self.reparam_all_layers, tuple):\n reparam_all_layers = tuple([kk + self.num_layers if kk < 0 else kk for kk in self.reparam_all_layers])\n is_reparam = i in reparam_all_layers\n else:\n raise\n if is_reparam:\n self.reparam_layers.append(i)\n setattr(self, \"conv{}\".format(i + 1), GATConv(\n input_size,\n latent_size if i != self.num_layers - 1 else self.num_classes,\n heads=8 if i != self.num_layers - 1 else 1, concat=True,\n reparam_mode=self.reparam_mode if is_reparam else None,\n prior_mode=self.prior_mode if is_reparam else None,\n val_use_mean=self.val_use_mean,\n struct_dropout_mode=self.struct_dropout_mode,\n sample_size=self.sample_size,\n ))\n if self.struct_dropout_mode[0] == 'DNsampling' or (self.struct_dropout_mode[0] == 'standard' and len(self.struct_dropout_mode) == 3):\n setattr(self, \"conv{}_1\".format(i + 1), GATConv(\n input_size,\n latent_size if i != self.num_layers - 1 else self.num_classes,\n heads=8 if i != self.num_layers - 1 else 1, concat=True,\n reparam_mode=self.reparam_mode if is_reparam else None,\n prior_mode=self.prior_mode if is_reparam else None,\n val_use_mean=self.val_use_mean,\n struct_dropout_mode=self.struct_dropout_mode,\n sample_size=self.sample_size,\n ))\n # On the Pubmed dataset, use heads=8 in conv2.\n \n else:\n raise Exception(\"Model_type {} is not valid!\".format(self.model_type))\n\n self.reparam_layers = sorted(self.reparam_layers)\n \n if self.model_type == \"GCN\":\n if self.with_relu:\n reg_params = [getattr(self, \"conv{}\".format(i+1)).parameters() for i in range(self.num_layers - 1)]\n self.reg_params = itertools.chain(*reg_params)\n self.non_reg_params = getattr(self, \"conv{}\".format(self.num_layers)).parameters()\n else:\n self.reg_params = OrderedDict()\n self.non_reg_params = self.parameters()\n else:\n self.reg_params = self.parameters()\n self.non_reg_params = OrderedDict()\n self.to(self.device)", "def TCN_V2(\n n_classes, \n feat_dim,\n max_len,\n gap=1,\n dropout=0.0,\n activation=\"relu\"):\n\n ROW_AXIS = 1\n CHANNEL_AXIS = 2\n \n initial_conv_len = 8\n initial_conv_num = 64\n\n config = [ \n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(2,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n ]\n\n input = Input(shape=(max_len,feat_dim))\n model = input\n\n model = Convolution1D(initial_conv_num, \n initial_conv_len,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=1)(model)\n\n for depth in range(0,len(config)):\n blocks = []\n for stride,filter_dim,num in config[depth]:\n ## residual block\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n relu = Activation(activation)(bn)\n dr = Dropout(dropout)(relu)\n conv = Convolution1D(num, \n filter_dim,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=stride)(dr)\n #dr = Dropout(dropout)(conv)\n\n\n ## potential downsample\n conv_shape = K.int_shape(conv)\n model_shape = K.int_shape(model)\n if conv_shape[CHANNEL_AXIS] != model_shape[CHANNEL_AXIS]:\n model = Convolution1D(num, \n 1,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=2)(model)\n\n ## merge block\n model = merge([model,conv],mode='sum',concat_axis=CHANNEL_AXIS)\n\n ## final bn+relu\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n model = Activation(activation)(bn)\n\n\n if gap:\n pool_window_shape = K.int_shape(model)\n gap = AveragePooling1D(pool_window_shape[ROW_AXIS],\n stride=1)(model)\n flatten = Flatten()(gap)\n else:\n flatten = Flatten()(model)\n\n dense = Dense(output_dim=n_classes,\n init=\"he_normal\",\n activation=\"softmax\")(flatten)\n\n model = Model(input=input, output=dense)\n # optimizer = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=True) \n # model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])\n return model", "def __init__(self, input_dim=(3, 32, 32), num_filters=32, filter_size=3,\n conv_layers=1, use_batchnorm=False, hidden_dim=100, num_classes=10, weight_scale=1e-3, reg=0.0,\n dtype=np.float32):\n self.params = {}\n self.reg = reg\n self.dtype = dtype\n self.conv_layers = conv_layers\n self.num_layers = conv_layers + 2 # Currently conv + affine + softmax\n self.use_batchnorm = use_batchnorm\n\n if self.use_batchnorm:\n self.bn_params = []\n self.bn_params = [{'mode': 'train'} for i in xrange(self.num_layers + 1)]\n\n ############################################################################\n # TODO: Initialize weights and biases for the three-layer convolutional #\n # network. Weights should be initialized from a Gaussian with standard #\n # deviation equal to weight_scale; biases should be initialized to zero. #\n # All weights and biases should be stored in the dictionary self.params. #\n # Store weights and biases for the convolutional layer using the keys 'W1' #\n # and 'b1'; use keys 'W2' and 'b2' for the weights and biases of the #\n # hidden affine layer, and keys 'W3' and 'b3' for the weights and biases #\n # of the output affine layer. #\n ############################################################################\n C, H, W = input_dim\n F = num_filters\n HH = filter_size\n WW = filter_size\n\n layer_dim = (F, C, HH, WW)\n\n # Conv - relu - pool weights\n for l in xrange(1, self.conv_layers + 1):\n self.params['W%d' % l] = np.random.normal(loc=0.0, scale=weight_scale, size=layer_dim)\n self.params['b%d' % l] = np.zeros(F)\n if self.use_batchnorm:\n self.params['gamma%d' % l] = np.ones(F)\n self.params['beta%d' % l] = np.zeros(F)\n layer_dim = (F, F, HH, WW)\n\n # Affine - Relu layer\n l = self.conv_layers + 1\n h_shape = ((num_filters * np.prod(input_dim[1:]) / 4**self.conv_layers), hidden_dim)\n self.params['W%d' % l] = np.random.normal(loc=0.0, scale=weight_scale, size=h_shape)\n self.params['b%d' % l] = np.zeros(hidden_dim)\n if self.use_batchnorm:\n self.params['gamma%d' % l] = np.ones(hidden_dim)\n self.params['beta%d' % l] = np.zeros(hidden_dim)\n\n # Final affine layer (hidden layers -> classes)\n l = l + 1\n a_shape = (hidden_dim, num_classes)\n self.params['W%d' % l] = np.random.normal(loc=0.0, scale=weight_scale, size=a_shape)\n self.params['b%d' % l] = np.zeros(num_classes)\n\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n for k, v in self.params.iteritems():\n self.params[k] = v.astype(dtype)", "def update_net(optimizer):\n assert kl_train_dataset.bp_mode\n frames_gen, frame_cnt, rel_props, prop_ticks, prop_scaling = kl_train_dataset[index]\n\n optimizer.zero_grad()\n \n num_crop = 1\n length = 3\n if args.modality == 'Flow':\n length = 10\n elif args.modality == 'RGBDiff':\n length = 18\n \n for frames in frames_gen:\n # frames.shape == [frame_batch_size * num_crop * 3, 224, 224]\n assert len(frames) == length * frame_cnt\n input_var = torch.autograd.Variable(frames.view(-1, length, frames.size(-2), frames.size(-1)).cuda())\n base_out = net(input_var, None, None, None, None)\n assert base_out.size(0) == frame_cnt and base_out.size(1) == base_out_dim\n step_features = base_out.mean(dim=0).unsqueeze(0)\n gate, glcu_task_pred = net.glcu(step_features)\n gate = gate.repeat(1, frame_cnt).view(frame_cnt, base_out_dim)\n assert glcu_task_pred.size(0) == 1\n glcu_task_pred = F.softmax(glcu_task_pred.squeeze(), dim=0)\n if net.additive_glcu:\n base_out = base_out + gate\n else:\n base_out = base_out * gate\n\n output = net.test_fc(base_out)\n assert output.size(0) == frame_cnt and output.size(1) == output_dim\n act_scores, comp_scores, reg_scores = reorg_stpp.forward(output, prop_ticks, prop_scaling, bp_mode=True)\n\n # Task Head\n combined_scores = F.softmax(act_scores[:, 1:], dim=1) * torch.exp(comp_scores)\n combined_scores = combined_scores.mean(dim=0).unsqueeze(0)\n task_pred = net.task_head(combined_scores)\n assert task_pred.size(0) == 1\n task_pred = F.softmax(net.task_head(combined_scores).squeeze(), dim=0)\n\n loss = KL(task_pred, glcu_task_pred)\n loss.backward()\n torch.cuda.empty_cache() # To empty the cache from previous iterations\n break\n\n optimizer.step()\n optimizer.zero_grad()\n torch.cuda.empty_cache()\n\n return float(loss.data), frame_cnt", "def PXRCmodel(isize, nc, conv_init, ndf=128, bn=True, se=False):\n \n def squeeze_excite_block(tensor, ratio=16):\n \n init = tensor\n filters = init._keras_shape[3]\n se_shape = (1, 1, filters)\n\n se = GlobalAveragePooling2D()(init)\n se = Reshape(se_shape)(se)\n se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(se)\n se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(se)\n\n x = multiply([init, se])\n return x\n \n x = inputs = Input(shape=(isize, isize, nc))\n x = Conv2D(filters=ndf, kernel_size=4, strides=1, use_bias=False,\n padding = \"same\", kernel_initializer = conv_init)(x)\n x = Conv2D(filters=ndf, kernel_size=4, strides=2, use_bias=False,\n padding = \"same\", kernel_initializer = conv_init)(x) \n x = LeakyReLU(alpha=0.2)(x)\n \n \n x = Conv2D(filters=ndf*2, kernel_size=4, strides=2, use_bias=False,\n padding = \"same\", kernel_initializer = conv_init)(x)\n x = Conv2D(filters=ndf*2, kernel_size=4, strides=1, use_bias=False,\n padding = \"same\", kernel_initializer = conv_init)(x)\n x = LeakyReLU(alpha=0.2)(x)\n \n\n x = Conv2D(filters=ndf*4, kernel_size=4, strides=2, use_bias=False,\n padding = \"same\", kernel_initializer = conv_init)(x)\n x = Conv2D(filters=ndf*4, kernel_size=4, strides=1, use_bias=False,\n padding = \"same\", kernel_initializer = conv_init)(x)\n x = LeakyReLU(alpha=0.2)(x)\n \n \n x = Conv2D(filters=ndf*8, kernel_size=4, strides=2, use_bias=False,\n padding = \"same\", kernel_initializer = conv_init)(x)\n x = Conv2D(filters=ndf*8, kernel_size=4, strides=1, use_bias=False,\n padding = \"same\", kernel_initializer = conv_init)(x)\n x = LeakyReLU(alpha=0.2)(x)\n \n \n y = Conv2D(filters=256, kernel_size=(3, 3), padding='same')(x)\n y = Conv2D(filters=256, kernel_size=(3, 3), padding='same')(y)\n y = Conv2D(filters=256, kernel_size=(3, 3), padding='same')(y)\n \n if (bn==True):\n y = BatchNormalization()(y)\n \n y = LeakyReLU()(y)\n y = MaxPool2D()(y)\n y = LeakyReLU()(y)\n \n ###########\n \n y = Conv2D(filters=128, kernel_size=(3, 3), padding='same')(y)\n y = Conv2D(filters=128, kernel_size=(3, 3), padding='same')(y)\n y = Conv2D(filters=128, kernel_size=(3, 3), padding='same')(y)\n \n if (se==True):\n y = squeeze_excite_block(y)\n \n if (bn==True):\n y = BatchNormalization()(y)\n \n y = LeakyReLU()(y)\n y = MaxPool2D()(y)\n y = LeakyReLU()(y)\n \n \n y = GlobalAveragePooling2D()(y)\n predictions = Dense(2, activation='softmax')(y)\n \n return Model(inputs=inputs, outputs=predictions)", "def __init__(self, layers, input_size):\n super(ConvNetMaker, self).__init__()\n self.conv_layers = []\n self.fc_layers = []\n # h, w, d = 32, 32, 3\n h, w, d = input_size, input_size, 3\n previous_layer_filter_count = 3\n previous_layer_size = h * w * d\n num_fc_layers_remained = len([1 for l in layers if l.startswith('FC')])\n for layer in layers:\n if layer.startswith('Conv'):\n filter_count = int(layer[4:])\n self.conv_layers += [\n nn.Conv2d(previous_layer_filter_count,\n filter_count,\n kernel_size=3,\n padding=1),\n nn.BatchNorm2d(filter_count),\n nn.ReLU(inplace=True)\n ]\n\n previous_layer_filter_count = filter_count\n d = filter_count\n previous_layer_size = h * w * d\n elif layer.startswith('MaxPool'):\n self.conv_layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n h, w = int(h / 2.0), int(w / 2.0)\n previous_layer_size = h * w * d\n elif layer.startswith('FC'):\n num_fc_layers_remained -= 1\n current_layer_size = int(layer[2:])\n if num_fc_layers_remained == 0:\n self.fc_layers += [nn.Linear(previous_layer_size,\n current_layer_size)]\n else:\n self.fc_layers += [nn.Linear(previous_layer_size,\n current_layer_size),\n nn.ReLU(inplace=True)]\n previous_layer_size = current_layer_size\n\n conv_layers = self.conv_layers\n fc_layers = self.fc_layers\n self.conv_layers = nn.Sequential(*conv_layers)\n self.fc_layers = nn.Sequential(*fc_layers)", "def train(n_hidden_1, dropout, lr, wdecay, _run):\n\n ### DO NOT CHANGE SEEDS!\n # Set the random seeds for reproducibility\n np.random.seed(42)\n\n ## Prepare all functions\n # Get number of units in each hidden layer specified in the string such as 100,100\n if FLAGS.dnn_hidden_units:\n dnn_hidden_units = FLAGS.dnn_hidden_units.split(\",\")\n dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]\n else:\n dnn_hidden_units = []\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n def get_xy_tensors(batch):\n x, y = batch\n x = torch.tensor(x.reshape(-1, 3072), dtype=torch.float32).to(device)\n y = torch.tensor(y, dtype=torch.long).to(device)\n return x, y\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n datasets = cifar10_utils.read_data_sets(DATA_DIR_DEFAULT, one_hot=False)\n train_data = datasets['train']\n test_data = datasets['test']\n model = MLP(n_inputs=3072, n_hidden=[n_hidden_1, 400], n_classes=10, dropout=dropout).to(device)\n loss_fn = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=wdecay)\n\n log_every = 50\n avg_loss = 0\n avg_acc = 0\n for step in range(FLAGS.max_steps):\n x, y = get_xy_tensors(train_data.next_batch(FLAGS.batch_size))\n\n # Forward and backward passes\n optimizer.zero_grad()\n out = model.forward(x)\n loss = loss_fn(out, y)\n loss.backward()\n\n # Parameter updates\n optimizer.step()\n\n avg_loss += loss.item() / log_every\n avg_acc += accuracy(out, y) / log_every\n if step % log_every == 0:\n print('[{}/{}] train loss: {:.6f} train acc: {:.6f}'.format(step,\n FLAGS.max_steps,\n avg_loss, avg_acc))\n _run.log_scalar('train-loss', avg_loss, step)\n _run.log_scalar('train-acc', avg_acc, step)\n avg_loss = 0\n avg_acc = 0\n\n # Evaluate\n if step % FLAGS.eval_freq == 0 or step == (FLAGS.max_steps - 1):\n x, y = get_xy_tensors(test_data.next_batch(test_data.num_examples))\n model.eval()\n out = model.forward(x)\n model.train()\n test_loss = loss_fn(out, y).item()\n test_acc = accuracy(out, y)\n print('[{}/{}] test accuracy: {:6f}'.format(step, FLAGS.max_steps, test_acc))\n\n _run.log_scalar('test-loss', test_loss, step)\n _run.log_scalar('test-acc', test_acc, step)\n ########################\n # END OF YOUR CODE #\n #######################", "def build_network(self):\n\n input_placeholder = Input(shape = self.input_shape)\n\n # Stage 1\n x = self.main_path_block(\n input_placeholder,\n 64, (7, 7), 'same',\n 'conv1', 'bn_conv1',\n activation = 'relu',\n strides = (2, 2)\n )\n x = MaxPooling2D((3, 3), strides = (2, 2), padding = 'same')(x)\n\n # Stage 2\n x = self.identity_block(x, 64, 'relu', 2, 'a', False)\n x = self.identity_block(x, 64, 'relu', 2, 'b')\n\n # Stage 3\n x = self.convolutional_block(x, [128, 128, 128], 'relu', 3, 'a')\n x = self.identity_block(x, 128, 'relu', 3, 'b')\n\n # Stage 4\n x = self.convolutional_block(x, [256, 256, 256], 'relu', 4, 'a')\n x = self.identity_block(x, 256, 'relu', 4, 'b')\n\n # Stage 5\n x = self.convolutional_block(x, [512, 512, 512], 'relu', 5, 'a')\n x = self.identity_block(x, 512, 'relu', 4, 'b')\n\n # Fully Connected Layers\n x = BatchNormalization(axis = 3)(x)\n x = Activation('relu')(x)\n x = AveragePooling2D((2, 1), padding = 'valid', strides = (2, 2))(x)\n x = Flatten()(x)\n x = Dense(512)\n x = Dense(\n self.classes, activation = 'softmax',\n name = 'fc_' + str(self.classes),\n kernel_initializer = glorot_uniform(seed = 0)\n )(x)\n\n self.model = Model(input_placeholder, x, name = 'Resnet18')", "def conv_net_lasagne(X_train: np.ndarray, y_train: np.ndarray):\n\n print(\"WARNING: Training this neural leads to serious memory issues\")\n\n net1 = NeuralNet(\n layers=[('input', layers.InputLayer),\n ('conv2d1', layers.Conv2DLayer),\n ('maxpool1', layers.MaxPool2DLayer),\n ('conv2d2', layers.Conv2DLayer),\n ('maxpool2', layers.MaxPool2DLayer),\n ('dropout1', layers.DropoutLayer),\n ('dense', layers.DenseLayer),\n ('dropout2', layers.DropoutLayer),\n ('output', layers.DenseLayer),\n ],\n # input layer\n input_shape=(None, 1, 28, 28),\n # layer conv2d1\n conv2d1_num_filters=32,\n conv2d1_filter_size=(5, 5),\n conv2d1_nonlinearity=lasagne.nonlinearities.rectify,\n conv2d1_W=lasagne.init.GlorotUniform(),\n # layer maxpool1\n maxpool1_pool_size=(2, 2),\n # layer conv2d2\n conv2d2_num_filters=32,\n conv2d2_filter_size=(5, 5),\n conv2d2_nonlinearity=lasagne.nonlinearities.rectify,\n # layer maxpool2\n maxpool2_pool_size=(2, 2),\n # dropout1\n dropout1_p=0.5,\n # dense\n dense_num_units=256,\n dense_nonlinearity=lasagne.nonlinearities.rectify,\n # dropout2\n dropout2_p=0.5,\n # output\n output_nonlinearity=lasagne.nonlinearities.softmax,\n output_num_units=10,\n # optimization method params\n update=nesterov_momentum,\n update_learning_rate=0.01,\n update_momentum=0.9,\n max_epochs=10,\n verbose=1,\n )\n # Train the network\n nn = net1.fit(X_train, y_train)\n\n return nn", "def resnet_v2(input_shape, depth, num_classes=7):\n if (depth - 2) % 9 != 0:\n raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')\n \n num_filters_in = 16\n num_res_blocks = int((depth - 2) / 9)\n \n inputs = Input(shape=input_shape)\n x = resnet_layer(inputs=inputs,\n num_filters=num_filters_in,\n conv_first=True)\n \n for stage in range(3):\n for res_block in range(num_res_blocks):\n activation = 'relu'\n batch_normalization = True\n strides = 1\n # num of param setting \n if stage == 0: # first stage\n num_filters_out = num_filters_in * 4\n if res_block == 0: # first layer & first stage\n activation = None\n batch_normalization = False\n else: # second, third stage\n num_filters_out = num_filters_in * 2\n if res_block == 0: # first layer but no first stage\n strides = 2 # downsample\n y = resnet_layer(inputs=x,\n num_filters=num_filters_in,\n kernel_size=1,\n strides=strides,\n activation=activation,\n batch_normalization=batch_normalization,\n conv_first=False)\n y = resnet_layer(inputs=y,\n num_filters=num_filters_in,\n conv_first=False)\n y = resnet_layer(inputs=y,\n num_filters=num_filters_out,\n kernel_size=1,\n conv_first=False)\n if res_block == 0:\n # Linear projection residual shortcut connection to match\n # changed dims\n # at the first time, make a shortcut origin\n x = resnet_layer(inputs=x,\n num_filters=num_filters_out,\n kernel_size=1,\n strides=strides,\n activation=None,\n batch_normalization=False)\n # and add every reputation\n x = keras.layers.add([x, y])\n \n num_filters_in = num_filters_out\n \n # Add classifier on top\n # v2 has BN_ReLU before Pooling\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n x = AveragePooling2D(pool_size=8)(x)\n y = Flatten()(x)\n outputs = Dense(num_classes,\n activation='softmax',\n kernel_initializer='he_normal')(y)\n \n # Instantiate model\n model = Model(inputs=inputs, outputs=outputs)\n \n return model", "def __init__(\n self, config: SimpleGCNConfig = SimpleGCNConfig(name=\"simplegcn\")\n ):\n super().__init__()\n self.edge_lengthscale = config.edge_lengthscale\n self.weight_edges = config.weight_edges\n\n self.atom_embedding = nn.Linear(\n config.atom_input_features, config.width\n )\n\n self.layer1 = GraphConv(config.width, config.width)\n self.layer2 = GraphConv(config.width, config.output_features)\n self.readout = AvgPooling()", "def _init_layers(self):\n self.relu = nn.ReLU(inplace=True)\n self.cls_convs = nn.ModuleList()\n self.reg_convs = nn.ModuleList()\n for i in range(self.stacked_convs):\n chn = self.in_channels if i == 0 else self.feat_channels\n self.cls_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=dict(type='DCN', deform_groups=1)\n if i == 0 and self.use_dcn else self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.reg_convs.append(\n ConvModule(\n chn,\n self.feat_channels,\n 3,\n stride=1,\n padding=1,\n conv_cfg=dict(type='DCN', deform_groups=1)\n if i == 0 and self.use_dcn else self.conv_cfg,\n norm_cfg=self.norm_cfg))\n self.atss_cls = nn.Conv2d(\n self.feat_channels,\n self.num_base_priors * self.cls_out_channels,\n 3,\n padding=1)\n self.atss_reg = nn.Conv2d(\n self.feat_channels, self.num_base_priors * 4, 3, padding=1)\n self.atss_iou = nn.Conv2d(\n self.feat_channels, self.num_base_priors * 1, 3, padding=1)\n self.scales = nn.ModuleList(\n [Scale(1.0) for _ in self.prior_generator.strides])\n\n # we use the global list in loss\n self.cls_num_pos_samples_per_level = [\n 0. for _ in range(len(self.prior_generator.strides))\n ]\n self.reg_num_pos_samples_per_level = [\n 0. for _ in range(len(self.prior_generator.strides))\n ]", "def __init__(\n self,\n numpy_rng,\n train_set_x,\n train_set_y,\n hidden_layers_sizes,\n n_ins=784,\n n_outs=10\n ):\n\n self.sigmoid_layers = []\n self.AE_layers = []\n self.params = []\n self.n_layers = len(hidden_layers_sizes)\n self.train_set_x = train_set_x\n self.train_set_y = train_set_y\n\n assert self.n_layers > 0\n\n self.x = T.matrix('x') # the data is presented as rasterized images\n self.y = T.ivector('y') # the labels are presented as 1D vector of\n\n for i in xrange(self.n_layers): # used to be n layers\n\n # construct the sigmoid layer = encoder stack\n if i == 0:\n layer_input = self.x\n else:\n layer_input = self.sigmoid_layers[-1].output\n\n sigmoid_layer = HiddenLayer(rng=numpy_rng,\n input=layer_input,\n n_in=(n_ins if i == 0 else\n hidden_layers_sizes[i-1]),\n n_out=hidden_layers_sizes[i],\n activation=T.nnet.sigmoid)\n\n # add the layer to our list of layers\n self.sigmoid_layers.append(sigmoid_layer)\n self.params.extend(sigmoid_layer.params)\n\n # init the DA_layer, takes weights from sigmoid layer\n AE_layer = AutoEncoder(\n numpy_rng=numpy_rng,\n input=layer_input,\n n_visible=(n_ins if i == 0 else hidden_layers_sizes[i-1]),\n n_hidden=hidden_layers_sizes[i],\n W=sigmoid_layer.W,\n bhid=sigmoid_layer.b)\n\n self.AE_layers.append(AE_layer)\n\n # on top of the layers\n # log layer for fine-tuning\n self.logLayer = LogisticRegression(\n input=self.sigmoid_layers[-1].output,\n n_in=hidden_layers_sizes[-1],\n n_out=n_outs\n )\n self.params.extend(self.logLayer.params)\n self.finetune_cost = self.logLayer.negative_log_likelihood(self.y)\n self.errors = self.logLayer.errors(self.y)", "def run_simple_CNN():\n [train_set, test_set, train_sampler, val_sampler, test_sampler] = pre_processing_and_samples()\n CNN = SimpleCNN()\n # Using GPU for training\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n print(device)\n if torch.cuda.is_available():\n print(\"cuda is available\")\n CNN.to(device)\n\n # Multiple GPUs\n if torch.cuda.device_count() > 1:\n CNN = nn.DataParallel(CNN)\n\n num_epochs = 100\n\n train_loss_hist, train_acc_hist, val_loss_hist, val_acc_hist = \\\n trainCNN(net=CNN, device=device, batch_size=64, n_epochs=num_epochs, learning_rate=0.001,\n train_set=train_set, test_set=test_set, train_sampler=train_sampler, val_sampler=val_sampler)\n test(net=CNN, device=device, test_set=test_set, test_sampler=test_sampler)\n\n fig, (ax1, ax2) = plt.subplots(2)\n\n ax1.set_title(\"Loss vs. Number of Training Epochs\")\n ax1.set(xlabel=\"Training Epoch\", ylabel=\"Loss\")\n ax1.plot(range(1, len(train_loss_hist) + 1), train_loss_hist, label=\"Training\")\n ax1.plot(range(1, len(val_loss_hist) + 1), val_loss_hist, label=\"Validation\")\n print(np.concatenate((train_loss_hist, val_loss_hist)))\n print(np.amax(np.concatenate((train_loss_hist, val_loss_hist))))\n ax1.set_ylim(\n (0, 1.25 * np.amax(np.concatenate((train_loss_hist, val_loss_hist), axis=0, out=None)).detach().cpu()))\n ax1.set_xticks(np.arange(1, num_epochs + 1, 1.0))\n ax1.legend()\n\n ax2.set_title(\"Accuracy vs. Number of Training Epochs\")\n ax2.set(xlabel=\"Training Epoch\", ylabel=\"Accuracy\")\n ax2.plot(range(1, num_epochs + 1), train_acc_hist, label=\"Training\")\n ax2.plot(range(1, num_epochs + 1), val_acc_hist, label=\"Validation\")\n ax2.set_ylim(0, 100) # Sets y bounds\n ax2.set_xticks(np.arange(1, num_epochs + 1, 1.0))\n ax2.legend()\n\n plt.tight_layout() # Call after plotting all subplots\n plt.savefig('basic_cifar_10.png')", "def example1():\n DATASETS_DICT = './data'\n IMG_SIZE = CONFIG['img_size']\n\n # x_train = DataLoader.load(os.path.join(DATASETS_DICT, 'x_train_cats_dogs.npy'))\n # y_train = DataLoader.load(os.path.join(DATASETS_DICT, 'y_train_cats_dogs.npy'))\n # x_train = DataLoader.load(os.path.join(DATASETS_DICT, 'x_cats_dogs_skimage.npy'))\n # y_train = DataLoader.load(os.path.join(DATASETS_DICT, 'y_cats_dogs_skimage.npy'))\n\n # x_train = DataLoader.load(os.path.join(DATASETS_DICT, 'x_rps_skimage.npy'))\n # y_train = DataLoader.load(os.path.join(DATASETS_DICT, 'y_rps_skimage.npy'))\n x_train = DataLoader.load_npy(CONFIG['data']['x_path'])\n y_train = DataLoader.load_npy(CONFIG['data']['y_path'])\n\n x_train = torch.Tensor(x_train).view(-1, IMG_SIZE, IMG_SIZE)\n y_train = torch.Tensor(y_train)\n\n N_TRAIN = CONFIG['n_train']\n N_EVAL = CONFIG['n_eval']\n N_TEST = CONFIG['n_test']\n\n if N_TRAIN + N_EVAL + N_TEST > len(x_train):\n raise Exception('Not enough data!')\n\n\n # resnet50 works with 224, 244 input size\n n_output = 2\n net = ConvNet(n_output)\n optimizer = optim.Adam(net.parameters(), lr=1e-3)\n loss_function = nn.MSELoss()\n\n # split data\n x_eval = x_train[:N_EVAL]\n y_eval = y_train[:N_EVAL]\n\n x_test = x_train[N_EVAL:N_EVAL+N_TEST]\n y_test = y_train[N_EVAL:N_EVAL+N_TEST]\n\n x_train = x_train[N_EVAL+N_TEST:N_EVAL+N_TEST+N_TRAIN]\n y_oracle = y_train[N_EVAL+N_TEST:N_EVAL+N_TEST+N_TRAIN]\n\n # show_grid_imgs(x_train[:16], y_oracle[:16], (4, 4))\n\n EPOCHS = 10\n BATCH_SIZE = 128\n\n print('Start training')\n for epoch in range(EPOCHS):\n for k in tqdm(range(0, len(x_train), BATCH_SIZE)):\n batch_x = x_train[k:k+BATCH_SIZE].view(-1, 1, IMG_SIZE, IMG_SIZE)\n batch_y = y_oracle[k:k+BATCH_SIZE]\n\n net.zero_grad()\n\n out = net(batch_x)\n loss = loss_function(out, batch_y)\n loss.backward()\n optimizer.step()\n\n print(f'Epoch: {epoch}. Loss: {loss}')\n\n correct = 0\n total = 0\n\n with torch.no_grad():\n for k in tqdm(range(len(x_test))):\n real_class = torch.argmax(y_test[k])\n net_out = net(x_test[k].view(-1, 1, IMG_SIZE, IMG_SIZE))[0] # returns list\n predicted_class = torch.argmax(net_out)\n\n if predicted_class == real_class:\n correct += 1\n total += 1\n\n print('Accuracy: ', round(correct/total, 3))\n\n torch.save(net, f'{DATASETS_DICT}/cnn_rps_model.pt')", "def __init__(self,\n inplanes,\n planes,\n stride=1,\n dilation=1,\n downsample=None,\n style='pytorch',\n with_cp=False,\n conv_cfg=None,\n norm_cfg=dict(type='BN')):\n super(Bottleneck, self).__init__()\n assert style in ['pytorch', 'caffe']\n self.inplanes = inplanes\n self.planes = planes\n self.stride = stride\n self.dilation = dilation\n self.style = style\n self.with_cp = with_cp\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n\n if self.style == 'pytorch':\n self.conv1_stride = 1\n self.conv2_stride = stride\n else:\n self.conv1_stride = stride\n self.conv2_stride = 1\n\n self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)\n self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)\n self.norm3_name, norm3 = build_norm_layer(\n norm_cfg, planes * self.expansion, postfix=3)\n\n self.conv1 = build_conv_layer(\n conv_cfg,\n inplanes,\n planes,\n kernel_size=1,\n stride=self.conv1_stride,\n bias=False)\n self.add_module(self.norm1_name, norm1)\n\n self.conv2 = build_conv_layer(\n conv_cfg,\n planes,\n planes,\n kernel_size=3,\n stride=self.conv2_stride,\n padding=dilation,\n dilation=dilation,\n bias=False)\n\n self.add_module(self.norm2_name, norm2)\n self.conv3 = build_conv_layer(\n conv_cfg,\n planes,\n planes * self.expansion,\n kernel_size=1,\n bias=False)\n self.add_module(self.norm3_name, norm3)\n\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample", "def __init__(self, config, input_shp):\n\n # Run initialization for super class\n super(MyNetwork, self).__init__()\n\n # Store configuration\n self.config = config\n\n # Placeholder for layers\n self.layers = {}\n indim = input_shp[0]\n\n # Retrieve Conv, Act, Pool functions from configurations. We'll use\n # these for our code below.\n if config.conv2d == \"torch\":\n self.Conv2d = nn.Conv2d\n elif config.conv2d == \"custom\":\n self.Conv2d = ConvBlock\n self.Activation = getattr(nn, config.activation)\n self.Pool2d = getattr(nn, config.pool2d)\n self.Linear = nn.Linear\n\n # Resnet Blocks, similar to slide 73 of lecture 21. However, for\n # simplicity, we'll make is slightly different. Note that we used\n # nn.Sequential this time.\n self.convs = nn.Sequential()\n cur_h, cur_w = input_shp[-2:]\n for _i in range(config.num_conv_outer):\n #\n # NOTE THE NEW LAYER ON THESE LINES!\n #\n # We have a dedicated 1x1 layer to get more channels. Note also\n # that this is a pure linear convolution layer.\n outdim = config.nchannel_base * 2 ** _i\n self.convs.add_module(\n \"conv_{}_base\".format(_i), nn.Conv2d(indim, outdim, 1, 1, 0))\n indim = outdim\n for _j in range(config.num_conv_inner):\n # We now use our selected convolution layer. Note that our\n # resnet implementation will have a different call style to\n # vanilla conv2d of torch, so we'll just do an ugly if-else\n # here.\n if config.conv2d == \"torch\":\n self.convs.add_module(\n \"conv_{}_{}\".format(_i, _j),\n self.Conv2d(indim, outdim, config.ksize, 1, 1))\n self.convs.add_module(\n \"act_{}_{}\".format(_i, _j),\n self.Activation())\n cur_h = cur_h - (config.ksize - 1)\n cur_w = cur_w - (config.ksize - 1)\n elif config.conv2d == \"custom\":\n self.convs.add_module(\n \"conv_{}_{}\".format(_i, _j),\n self.Conv2d(indim, outdim, config.ksize, 1, self.Activation))\n self.convs.add_module(\n \"conv_{}_pool\".format(_i), self.Pool2d(2, 2))\n cur_h = cur_h // 2\n cur_w = cur_w // 2\n\n # Final output layer. We'll assume that conv layer outputs are global\n # average pooled\n self.output = nn.Linear(indim, config.num_class)\n\n print(self)", "def init_train(self):\n data = self.loader.load_labelled_data(self.conf.split, 'training')\n\n # Initialise unlabelled data iterator\n num_ul = 0\n if self.conf.ul_mix > 0:\n ul_data = self.loader.load_unlabelled_data(self.conf.split, 'all')\n\n # calculate number of unlabelled images as a proportion of the labelled images\n num_ul = int(data.size() * self.conf.ul_mix)\n num_ul = num_ul if num_ul <= ul_data.size() else ul_data.size()\n log.info('Sampling %d unlabelled images out of total %d.' % (num_ul, ul_data.size()))\n ul_data.sample(num_ul)\n self.gen_X_U = data_utils.generator(self.conf.batch_size, 'overflow', ul_data.images)\n\n # Initialise labelled data iterator\n assert self.conf.l_mix >= 0\n\n # calculate number of labelled images\n num_l = int(data.size() * self.conf.l_mix)\n num_l = num_l if num_l <= data.size() else data.size()\n log.info('Using %d labelled images out of total %d.' % (num_l, data.size()))\n train_images = data.images[:num_l]\n train_masks = data.masks[:num_l]\n\n self.conf.unlabelled_image_num = num_ul\n self.conf.labelled_image_num = num_l\n self.conf.data_len = num_ul if num_ul > num_l else num_l\n self.conf.batches = int(np.ceil(self.conf.data_len / self.conf.batch_size))\n self.conf.save()\n\n self.gen_X_L = data_utils.generator(self.conf.batch_size, 'overflow', train_images, train_masks)\n\n # Initialise real masks iterator for discriminator training, using the real masks from the data CV split.\n self.other_masks = data_utils.generator(self.conf.batch_size, 'overflow', data.masks + 0)", "def build_cnn(input_var=None):\n\n # input layer\n network = lasagne.layers.InputLayer(\n shape=(\n None,\n 1,\n 128,\n 129\n ),\n input_var=input_var\n )\n\n # conv\n network = lasagne.layers.Conv2DLayer(\n lasagne.layers.batch_norm(network), # Batch norm on incoming\n num_filters=32, # Number of convolution filters to use\n filter_size=(5, 5),\n stride=(1, 1), # Stride fo (1,1)\n pad='same', # Keep output size same as input\n nonlinearity=lasagne.nonlinearities.leaky_rectify, #rectify, # ReLU\n W=lasagne.init.GlorotUniform() # W initialization\n )\n\n # conv\n #network = lasagne.layers.Conv2DLayer(\n #lasagne.layers.batch_norm(network), # Batch norm on incoming\n #num_filters=32, # Number of convolution filters to use\n #filter_size=(5, 5),\n #stride=(1, 1), # Stride fo (1,1)\n #pad='same', # Keep output size same as input\n #nonlinearity=lasagne.nonlinearities.leaky_rectify, #rectify, # ReLU\n #W=lasagne.init.GlorotUniform() # W initialization\n #)\n\n # pool (2x2 max pool)\n network = lasagne.layers.MaxPool2DLayer(\n network, pool_size=(2, 2)\n )\n\n # conv\n network = lasagne.layers.Conv2DLayer(\n lasagne.layers.batch_norm(network), # Batch norm on incoming\n num_filters=32, # Number of convolution filters to use\n filter_size=(3, 3),\n stride=(1, 1), # Stride fo (1,1)\n pad='same', # Keep output size same as input\n nonlinearity=lasagne.nonlinearities.leaky_rectify, #rectify, # ReLU\n W=lasagne.init.GlorotUniform() # W initialization\n )\n\n # conv\n #network = lasagne.layers.Conv2DLayer(\n #lasagne.layers.batch_norm(network), # Batch norm on incoming\n #num_filters=32, # Number of convolution filters to use\n #filter_size=(3, 3),\n #stride=(1, 1), # Stride fo (1,1)\n #pad='same', # Keep output size same as input\n #nonlinearity=lasagne.nonlinearities.leaky_rectify, #rectify, # ReLU\n #W=lasagne.init.GlorotUniform() # W initialization\n #)\n\n # pool (2x2 max pool)\n network = lasagne.layers.MaxPool2DLayer(\n network, pool_size=(2, 2)\n )\n\n # Fully-connected layer of 256 units with 50% dropout on its inputs\n network = lasagne.layers.DenseLayer(\n lasagne.layers.dropout(network, p=.5),\n num_units=256,\n nonlinearity=lasagne.nonlinearities.leaky_rectify, #rectify, # ReLU\n W=lasagne.init.HeUniform() # W initialization\n )\n\n # Finally add a 1-unit softmax output layer\n network = lasagne.layers.DenseLayer(\n network,\n num_units=1,\n nonlinearity=lasagne.nonlinearities.sigmoid\n )\n\n return network", "def train_classifier(data, n_iters=3, batch_size=100):\n tqdm.write(f'Training a dilated CNN classifier for {n_iters} iterations.')\n (trainx, trainy), (valx, valy), (testx, testy) = data\n train_size, val_size, test_size = trainx.shape[0], valx.shape[0], testx.shape[0]\n train_batches = (train_size - 1) // batch_size + 1\n val_batches = (val_size - 1) // batch_size + 1\n test_batches = (test_size - 1) // batch_size + 1\n\n model = Network()\n model.add_layer(ConvLayer(10, (3, 3), (1, 1), 2)) \\\n .add_layer(ReluLayer()) \\\n .add_layer(Pad2DLayer((2, 2))) \\\n .add_layer(ConvLayer(10, (3, 3), (1, 1), 2)) \\\n .add_layer(ReluLayer()) \\\n .add_layer(Pool2DLayer((2, 2))) \\\n .add_layer(ConvLayer(10, (3, 3), (1, 1), 2)) \\\n .add_layer(ReluLayer()) \\\n .add_layer(Pool2DLayer((2, 2))) \\\n .add_layer(FlattenLayer()) \\\n .add_layer(FCLayer(32)) \\\n .add_layer(ReluLayer()) \\\n .add_layer(FCLayer(10)) \\\n .add_layer(SoftmaxCELayer())\n for i in range(1, n_iters + 1):\n train_order = np.random.permutation(train_size)\n bar = trange(train_batches, file=sys.stdout)\n for j in bar:\n cost = model.forward(trainx[train_order[j * batch_size: (j + 1) * batch_size]],\n trainy[train_order[j * batch_size: (j + 1) * batch_size]])\n bar.set_description(f'Curr loss: {cost}')\n model.backward()\n model.adam_trainstep()\n correct = []\n for j in range(val_batches):\n res = model.run(valx[j * batch_size:(j + 1) * batch_size])\n correct.append(np.argmax(res, axis=1) == valy[j * batch_size:(j + 1) * batch_size])\n tqdm.write(f'Validation accuracy: {np.mean(correct)}')\n tqdm.write('-------------------------------------------------------')\n\n correct = []\n for i in range(test_batches):\n res = model.run(testx[i * batch_size:(i + 1) * batch_size])\n correct.append(np.argmax(res, axis=1) == testy[i * batch_size:(i + 1) * batch_size])\n tqdm.write(f'Test accuracy: {np.mean(correct)}')\n tqdm.write('-------------------------------------------------------')", "def inception_network():\n X = K.Input(shape=(224, 224, 3))\n initializer = K.initializers.he_normal(seed=None)\n conv_1 = K.layers.Conv2D(filters=64, kernel_size=7,\n padding='same', strides=2,\n kernel_initializer=initializer,\n activation='relu')(X)\n max_pool_1 = K.layers.MaxPooling2D(pool_size=3, strides=2,\n padding='same')(conv_1)\n\n conv_2 = K.layers.Conv2D(filters=64, padding='same',\n kernel_size=1, activation='relu',\n kernel_initializer=initializer)(max_pool_1)\n conv2_1 = K.layers.Conv2D(filters=192, padding='same',\n kernel_size=3, activation='relu',\n kernel_initializer=initializer)(conv_2)\n max_pool_2 = K.layers.MaxPooling2D(pool_size=3, strides=2,\n padding='same')(conv2_1)\n\n incep_3a = inception_block(max_pool_2, [64, 96, 128, 16, 32, 32])\n incep_3b = inception_block(incep_3a, [128, 128, 192, 32, 96, 64])\n max_pool_3 = K.layers.MaxPooling2D(pool_size=3, strides=2,\n padding='same')(incep_3b)\n\n incep_4a = inception_block(max_pool_3, [192, 96, 208, 16, 48, 64])\n incep_4b = inception_block(incep_4a, [160, 112, 224, 24, 64, 64])\n incep_4c = inception_block(incep_4b, [128, 128, 256, 24, 64, 64])\n incep_4d = inception_block(incep_4c, [112, 144, 288, 32, 64, 64])\n incep_4e = inception_block(incep_4d, [256, 160, 320, 32, 128, 128])\n max_pool_4 = K.layers.MaxPooling2D(pool_size=3, strides=2,\n padding='same')(incep_4e)\n\n incep_5a = inception_block(max_pool_4, [256, 160, 320, 32, 128, 128])\n incep_5b = inception_block(incep_5a, [384, 192, 384, 48, 128, 128])\n avg_pool = K.layers.AveragePooling2D(pool_size=7, strides=None)(incep_5b)\n\n drop_out = K.layers.Dropout(0.4)(avg_pool)\n dense = K.layers.Dense(units=1000, activation='softmax',\n kernel_initializer=initializer)(drop_out)\n return K.models.Model(inputs=X, outputs=dense)", "def __init__(self, embed_size):\n super(EncoderCNN, self).__init__()\n resnet = models.resnet152(pretrained=True)\n modules = list(resnet.children())[:-1] # delete the last fc layer.\n self.resnet = nn.Sequential(*modules)\n self.pooling = nn.MaxPool2d(2,stride = 2)\n self.linear = nn.Linear(resnet.fc.in_features, embed_size)\n self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)\n self.init_weights()", "def model_generator(input_shape=(256, 256, 3), input_mask=(256, 256, 1)):\n \n out = Conv2D(64, kernel_size=5, strides=1, padding='same',\n dilation_rate=(1, 1))(input_shape)\n out = BatchNormalization()(out)\n out = Activation('relu')(out)\n\n out = Conv2D(128, kernel_size=3, strides=2,\n padding='same', dilation_rate=(1, 1))(out)\n out = BatchNormalization()(out)\n out = Activation('relu')(out)\n out = Conv2D(128, kernel_size=3, strides=1,\n padding='same', dilation_rate=(1, 1))(out)\n out = BatchNormalization()(out)\n out = Activation('relu')(out)\n\n out = Conv2D(256, kernel_size=3, strides=2,\n padding='same', dilation_rate=(1, 1))(out)\n out = BatchNormalization()(out)\n out = Activation('relu')(out)\n out = Conv2D(256, kernel_size=3, strides=1,\n padding='same', dilation_rate=(1, 1))(out)\n out = BatchNormalization()(out)\n out = Activation('relu')(out)\n out = Conv2D(256, kernel_size=3, strides=1,\n padding='same', dilation_rate=(1, 1))(out)\n out = BatchNormalization()(out)\n out = Activation('relu')(out)\n\n out = Conv2D(256, kernel_size=3, strides=1,\n padding='same', dilation_rate=(2, 2))(out)\n out = BatchNormalization()(out)\n out = Activation('relu')(out)\n out = Conv2D(256, kernel_size=3, strides=1,\n padding='same', dilation_rate=(4, 4))(out)\n out = BatchNormalization()(out)\n out = Activation('relu')(out)\n out = Conv2D(256, kernel_size=3, strides=1,\n padding='same', dilation_rate=(8, 8))(out)\n out = BatchNormalization()(out)\n out = Activation('relu')(out)\n out = Conv2D(256, kernel_size=3, strides=1,\n padding='same', dilation_rate=(16, 16))(out)\n out = BatchNormalization()(out)\n out = Activation('relu')(out)\n\n out = Conv2D(256, kernel_size=3, strides=1,\n padding='same', dilation_rate=(1, 1))(out)\n out = BatchNormalization()(out)\n out = Activation('relu')(out)\n out = Conv2D(256, kernel_size=3, strides=1,\n padding='same', dilation_rate=(1, 1))(out)\n out = BatchNormalization()(out)\n out = Activation('relu')(out)\n\n out = Conv2DTranspose(128, kernel_size=4, strides=2,\n padding='same')(out)\n out = BatchNormalization()(out)\n out = Activation('relu')(out)\n out = Conv2D(128, kernel_size=3, strides=1,\n padding='same', dilation_rate=(1, 1))(out)\n out = BatchNormalization()(out)\n out = Activation('relu')(out)\n\n out = Conv2DTranspose(64, kernel_size=4, strides=2,\n padding='same')(out)\n out = BatchNormalization()(out)\n out = Activation('relu')(out)\n out = Conv2D(32, kernel_size=3, strides=1,\n padding='same', dilation_rate=(1, 1))(out)\n out = BatchNormalization()(out)\n out = Activation('relu')(out)\n\n out = Conv2D(3, kernel_size=3, strides=1,\n padding='same', dilation_rate=(1, 1))(out)\n out = BatchNormalization()(out)\n out = Activation('sigmoid')(out)\n # x [0] * x [2]: Cut out the region where the mask bit is set from out (make the region other than mask 0)\n # x [1] * (1 - x [2]): Cut out the region where the bit of mask is not set from input_image\n # Merge (add) the above two to make the image replaced only with the output of NN for the mask part\n out = keras.layers.Lambda(lambda x: x[0] * x[2] + x[1] * (1 - x[2]),\n trainable=False)([out, input_shape, input_mask])\n model = Model([input_shape,input_mask],out)\n\n return model, out", "def __init__(self, kernel_size, filters, stage, block, strides=(2, 2)):\n super().__init__(name='conv_block' + str(stage) + block)\n filters1, filters2, filters3 = filters\n if K.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n self.conv1 = layers.Conv2D(\n filters1, (1, 1),\n strides=strides,\n kernel_initializer='he_normal',\n name=conv_name_base + '2a')\n self.bn1 = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')\n self.act1 = layers.Activation('relu')\n\n self.conv2 = layers.Conv2D(\n filters2,\n kernel_size,\n padding='same',\n kernel_initializer='he_normal',\n name=conv_name_base + '2b')\n self.bn2 = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')\n self.act2 = layers.Activation('relu')\n\n self.conv3 = layers.Conv2D(\n filters3, (1, 1),\n kernel_initializer='he_normal',\n name=conv_name_base + '2c')\n self.bn3 = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')\n\n self.shortcut_conv = layers.Conv2D(\n filters3, (1, 1),\n strides=strides,\n kernel_initializer='he_normal',\n name=conv_name_base + '1')\n self.shortcut_bn = layers.BatchNormalization(\n axis=bn_axis, name=bn_name_base + '1')\n\n self.add = layers.Add()\n self.act = layers.Activation('relu')", "def main(tetrode_number=TETRODE_NUMBER,num_hidden_units=300,num_hidden_units_2=200,num_code_units=50):\n print(\"Loading the data...\")\n dataset = load_data(tetrode_number)\n print(\"Done!\")\n\n print(\"Tetrode number: {}, Num outputs: {}\".format(tetrode_number,dataset['output_dim']))\n\n print(dataset['input_shape'])\n print(dataset['output_dim'])\n \n print(\"Making the model...\")\n network = model(dataset['input_shape'],dataset['output_dim'],num_hidden_units,num_hidden_units_2,num_code_units,(4,1))\n print(\"Done!\")\n\n print(\"Setting up the training functions...\")\n training = funcs(dataset,network)\n print(\"Done!\")\n\n accuracies = []\n trainvalidation = []\n\n print(\"Begining to train the network...\")\n epochsDone = 0\n autoencoderSameLabels = []\n try:\n for i in range(NUM_EPOCHS):\n costs = []\n valid_costs = []\n\n for start, end in zip(range(0, dataset['num_examples_train'], BATCH_SIZE), range(BATCH_SIZE, dataset['num_examples_train'], BATCH_SIZE)):\n cost = training['train'](dataset['X_train'][start:end],dataset['y_train'][start:end])\n costs.append(cost)\n \n for start, end in zip(range(0, dataset['num_examples_valid'], BATCH_SIZE), range(BATCH_SIZE, dataset['num_examples_valid'], BATCH_SIZE)):\n cost = training['valid'](dataset['X_valid'][start:end],dataset['y_valid'][start:end])\n valid_costs.append(cost)\n\n\n meanValidCost = np.mean(np.asarray(valid_costs),dtype=np.float32) \n meanTrainCost = np.mean(np.asarray(costs,dtype=np.float32))\n accuracy = training['accuracy'](dataset['X_test'],dataset['y_test'])\n\n print(\"Epoch: {}, Accuracy: {}, Training cost / validation cost: {}\".format(i+1,accuracy,meanTrainCost/meanValidCost))\n\n if(np.isnan(meanTrainCost/meanValidCost)):\n print(\"Nan value\")\n break\n\n\n # this is the test to see if the autoencoder is learning how to \n if i%10==0:\n acs = []\n for j in range(dataset['caswells_dim']):\n # print(dataset['labeled_test'][j].shape)\n codes = training['code'](dataset['labeled_test'][j])\n np.mean(np.argmax(dataset['y_test'], axis=1) == np.argmax(training['predict'](dataset['X_test']), axis=1))\n format_codes = []\n for code in codes:\n # if(j==0):\n format_codes.append(np.argmax(code))\n\n prev = sorted(format_codes)[0]\n # print(sorted(format_codes))\n k = 0\n same = [1]\n for code in sorted(format_codes)[1:]:\n if(code == prev):\n same[k] = same[k] + 1\n else:\n k+=1\n same.append(1)\n prev = code\n\n same = np.asarray(same)\n # print(same,np.argmax(same),same[np.argmax(same)],np.sum(same))\n label_acc = same[np.argmax(same)]*1.0/np.sum(same)\n acs.append(label_acc)\n print(\"Label: {}, Num examples: {}, Same label with autoencoder: {} \".format(j,dataset['labeled_test'][j].shape[0],label_acc))\n acs = np.asarray(acs)\n autoencoderSameLabels.append(np.mean(acs))\n print(\"Average agreement: {}\".format(np.mean(acs)))\n\n\n if i%50 == 0:\n ran = randint(0,dataset['num_examples_test']-20)\n now = datetime.datetime.now()\n for j in range(10):\n testing = [dataset['X_test'][ran]]\n # print(testing[0].shape)\n output = dataset['y_test'][ran].reshape((1, 200))[0]\n print(output)\n\n # print(np.arange(dataset['output_dim']))\n # print(output)\n prediction = training['predict'](testing)[0].reshape((1, 200))[0]\n print(prediction)\n # print(prediction)\n # print(testing[0][0])\n \n code = training['code'](testing).reshape((1, 50))\n\n # print(code)\n \n # plotting the figure\n\n fig = plt.figure(1)\n sub1 = fig.add_subplot(311)\n sub2 = fig.add_subplot(312)\n sub3 = fig.add_subplot(313)\n\n # add titles\n\n sub1.set_title('Desired output')\n sub2.set_title('Net output')\n sub3.set_title('Code layer output')\n\n # adding x labels\n\n sub1.set_xlabel('Time')\n sub2.set_xlabel('Time')\n sub3.set_xlabel('Code label')\n\n # adding y labels\n\n sub1.set_ylabel('Amplitude')\n sub2.set_ylabel('Amplitude')\n sub3.set_ylabel('Probability')\n\n # Plotting data\n\n # print(testing[0][0])\n # inp = []\n # for z in range(4):\n # inp += list(testing[0][0][z])\n\n\n sub1.plot(output)\n # sub1.bar(x_axis, output, width=1)\n sub1.grid(True)\n\n sub2.plot(prediction)\n sub2.grid(True)\n\n x_axis = list(np.arange(len(code[0])))\n\n # sub3.plot(code[0])\n sub3.bar(x_axis, code[0], width=1)\n # plt.show()\n\n fig.tight_layout()\n\n # plt.plot(var2)\n # fig.tight_layout()\n plt.savefig('../logs/convAuto/fig{}_{}_{}.png'.format(i,j,now), bbox_inches='tight')\n plt.close()\n \n ran += 1\n # break\n\n\n trainvalidation.append([meanTrainCost,meanValidCost])\n accuracies.append(accuracy)\n if(EARLY_STOPPING):\n if(len(accuracies) < STOPPING_RANGE):\n pass\n else:\n test = [k for k in accuracies if k < accuracy]\n if not test:\n print('Early stopping causing training to finish at epoch {}'.format(i+1))\n break\n del accuracies[0]\n accuracies.append(accuracy)\n\n epochsDone = epochsDone + 1\n\n except KeyboardInterrupt:\n pass\n\n # plt.plot(trainvalidation)\n # plt.show()\n\n if(LOG_EXPERIMENT):\n print(\"Logging the experiment details...\")\n log = dict(\n NET_TYPE = \"Conv auto encoder 2 hidden 1 code\",\n TETRODE_NUMBER = tetrode_number,\n BASENAME = BASENAME,\n NUM_EPOCHS = epochsDone,\n BATCH_SIZE = BATCH_SIZE,\n TRAIN_VALIDATION = trainvalidation,\n LEARNING_RATE = LEARNING_RATE,\n MOMENTUM = MOMENTUM,\n SAME_LABEL_AVERAGES = autoencoderSameLabels,\n ACCURACY = accuracies,\n NETWORK_LAYERS = [str(type(layer)) for layer in lasagne.layers.get_all_layers(network)],\n OUTPUT_DIM = dataset['output_dim'],\n # NETWORK_PARAMS = lasagne.layers.get_all_params_values(network)\n )\n now = datetime.datetime.now()\n filename = \"experiments/convAuto/{}_{}_{}_NUMLAYERS_{}_OUTPUTDIM_{}\".format(now,NUM_EPOCHS,NUM_HIDDEN_UNITS,len(log['NETWORK_LAYERS']),log['OUTPUT_DIM'])\n filename = re.sub(\"[^A-Za-z0-9_/,-:]\", \"\", filename)\n with open(filename,\"w\") as outfile:\n outfile.write(str(log))", "def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,\n dropout=1, normalization=None, reg=0.0,\n weight_scale=1e-2, dtype=np.float32, seed=None):\n self.normalization = normalization\n self.use_dropout = dropout != 1\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n\n ############################################################################\n # TODO: Initialize the parameters of the network, storing all values in #\n # the self.params dictionary. Store weights and biases for the first layer #\n # in W1 and b1; for the second layer use W2 and b2, etc. #\n # When using batch normalization, store scale and shift parameters for the #\n # first layer in gamma1 and beta1; for the second layer use gamma2 and #\n # beta2, etc. Scale parameters should be initialized to ones and shift #\n # parameters should be initialized to zeros. #\n ############################################################################\n input_size = input_dim\n for i in range(len(hidden_dims)):\n output_size = hidden_dims[i]\n self.params['W' + str(i+1)] = np.random.randn(input_size,output_size) * weight_scale\n self.params['b' + str(i+1)] = np.zeros(output_size)\n if self.normalization:\n self.params['gamma' + str(i+1)] = np.ones(output_size)\n self.params['beta' + str(i+1)] = np.zeros(output_size)\n input_size = output_size # 下一层的输入\n # 输出层,没有BN操作\n self.params['W' + str(self.num_layers)] = np.random.randn(input_size,num_classes) * weight_scale\n self.params['b' + str(self.num_layers)] = np.zeros(num_classes)\n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n self.dropout_param = {}\n if self.use_dropout:\n self.dropout_param = {'mode': 'train', 'p': dropout}\n if seed is not None:\n self.dropout_param['seed'] = seed\n\n # With batch normalization we need to keep track of running means and\n # variances, so we need to pass a special bn_param object to each batch\n # normalization layer. You should pass self.bn_params[0] to the forward pass\n # of the first batch normalization layer, self.bn_params[1] to the forward\n # pass of the second batch normalization layer, etc.\n self.bn_params = []\n if self.normalization=='batchnorm':\n self.bn_params = [{'mode': 'train'} for i in range(self.num_layers - 1)]\n if self.normalization=='layernorm':\n self.bn_params = [{} for i in range(self.num_layers - 1)]\n\n # Cast all parameters to the correct datatype\n for k, v in self.params.items():\n self.params[k] = v.astype(dtype)" ]
[ "0.67401975", "0.6634952", "0.657736", "0.649362", "0.6486684", "0.64751244", "0.64618695", "0.64333904", "0.64330065", "0.6408339", "0.64069897", "0.6405594", "0.6391012", "0.63891834", "0.638887", "0.6384175", "0.6357176", "0.6355764", "0.63509816", "0.6346752", "0.6345361", "0.6334365", "0.63150436", "0.6288047", "0.62841105", "0.6279429", "0.6272141", "0.6271547", "0.6271176", "0.6267742", "0.6251993", "0.6251364", "0.6246921", "0.6245862", "0.62315977", "0.62299806", "0.6216105", "0.62155944", "0.62109005", "0.6203069", "0.61972713", "0.61962354", "0.6194196", "0.6188516", "0.6186152", "0.6184639", "0.6183547", "0.61791825", "0.6169229", "0.6168522", "0.61661756", "0.6163147", "0.61571324", "0.61569303", "0.6155765", "0.6154362", "0.61377454", "0.61357003", "0.6133056", "0.6131244", "0.6130357", "0.6120536", "0.6119706", "0.61184627", "0.6116553", "0.61123246", "0.6111152", "0.61063427", "0.6104059", "0.6101818", "0.60994285", "0.6097869", "0.6096389", "0.60920906", "0.60865307", "0.60819864", "0.60789585", "0.6070349", "0.60660595", "0.6063517", "0.6062151", "0.60579574", "0.6056186", "0.605472", "0.6051323", "0.60473233", "0.6046125", "0.60431665", "0.604277", "0.6041294", "0.6036594", "0.60226846", "0.6019352", "0.6018671", "0.60179615", "0.6017341", "0.60169625", "0.6016927", "0.6014526", "0.60144305", "0.6005668" ]
0.0
-1
Create a onehot encoding of x of size k.
def one_hot(x, k, dtype=np.float32): return np.array(x[:, None] == np.arange(k), dtype)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _one_hot(z, K):\n z_one_hot = np.zeros((z.size, K))\n z_one_hot[np.arange(z.size), z] = 1\n return z_one_hot", "def one_hot(x, k, dtype=np.float32):\n return np.array(x[:, None] == np.arange(k), dtype)", "def _one_hot(x, k, dtype=np.float32):\n return np.array(x[:, None] == np.arange(k), dtype)", "def one_hot_encode(x):\n # TODO: Implement Function\n output = np.zeros((len(x), 10))\n \n for i, j in enumerate(x):\n output[i,j] = 1\n \n return output", "def one_hot_encode(x, n_classes):\n return np.eye(n_classes)[x]", "def one_hot_encode(y, out_size):\n n = len(y)\n oh = np.zeros((n, out_size))\n oh[range(n), y] = 1\n return oh", "def one_hot(self, x, N):\n\n ### YOUR CODE HERE ###\n M = x.shape[0]\n one_hot = np.zeros((N, M))\n for i in range(M):\n one_hot[x[i], i] = 1\n \n# for xi,intWord in enumerate(x):\n# one_hot[xi,intWord] = 1\n \n\n assert one_hot.shape == (N, x.shape[0])\n return one_hot", "def one_hot_encode(x):\n # TODO: Implement Function\n x_l = list(x)\n for index in np.arange(len(x_l)):\n x_l[index] = get_one_hot_vector(x[index])[x[index]]\n return np.array(x_l)", "def make_one_hot(X, onehot_size):\n if onehot_size < 450:\n dig_one = np.zeros((onehot_size, onehot_size))\n np.fill_diagonal(dig_one, 1)\n rX = dig_one[np.asarray(X)]\n else:\n # for large onehot size, this is faster\n rX = np.zeros((len(X), onehot_size))\n for i in range(len(X)):\n rX[i, X[i]] = 1\n return rX", "def one_hot(x, num_classes, dtype=jnp.float32):\n return jax.nn.one_hot(x, num_classes).astype(dtype)", "def make_onehot(x,num_labels=7):\n enc = OneHotEncoder(n_values=num_labels)\n return enc.fit_transform(np.array(x).reshape(-1, 1)).toarray()", "def to_onehot(x, num_classes):\n return np.eye(num_classes, dtype='float32')[x]", "def to_one_hot(v):\n n = len(v)\n m = max(v) + 1\n out = np.zeros((n, m))\n out[np.arange(n), v] = 1\n return out", "def oneHot(index, n):\n x = np.zeros(n)\n x[index] = 1\n return x", "def one_hot_encode(idx, vocab_size):\n # Initialize the encoded array\n one_hot = np.zeros(vocab_size)\n \n # Set the appropriate element to one\n one_hot[idx] = 1.0\n\n return one_hot", "def one_hot_encoding(labels, num_classes=10):\n num_labels = labels.shape[0]\n encoded = np.zeros((num_labels, num_classes))\n encoded[np.arange(num_labels), labels[np.arange(num_labels)]] = 1\n \n return encoded", "def one_hot(Y, n_c):\n Y_onehot = np.zeros((Y.size, n_c))\n Y_onehot[np.arange(Y.size), Y] = 1\n return Y_onehot.T", "def one_hot(y_):\n y_ = y_.reshape(len(y_))\n n_values = int(np.max(y_)) + 1\n n_values = 6\n return np.eye(n_values)[np.array(y_, dtype=np.int32)] # Returns FLOATS", "def one_hot(self, x, vocab, n_grams) -> np.ndarray:\n _x = np.zeros((x.__len__(), self.max_length, vocab.__len__()))\n for i, item in enumerate(tqdm(x, desc='One Hot Text Encoder')):\n items = TextProcessor.n_gram_split(item, n_grams)\n for j, token in enumerate(items):\n if j >= self.max_length:\n break\n if token in vocab:\n idx = vocab.index(token)\n _x[i][j][idx] = 1\n\n return _x", "def make_one_hot(y):\n one_hot = np.zeros((len(y), 10))\n for i in range(len(y)):\n one_hot[i, y[i]] = 1.\n return one_hot.transpose(1,0)", "def one_hot(index, dims, dtype=np.uint8):\n\n seq_len = len(index)\n ret = np.zeros((seq_len, dims), dtype)\n for i in range(seq_len):\n ret[i][index[i]]=1\n\n return ret", "def one_hot_encode(x):\n # TODO: Implement Function\n lb = preprocessing.LabelBinarizer()\n lb.fit([0,1,2,3,4,5,6,7,8,9])\n \n return lb.transform(x)", "def _onehot(y, n_classes=False):\n if not n_classes:\n \"\"\"Create one-hot encoded labels.\"\"\"\n n_classes = len(set(y))\n out = np.zeros((len(y), n_classes))\n for i, ii in enumerate(y):\n out[i][ii] += 1\n y_onehot = out.astype(int)\n return y_onehot", "def one_hot_encode(self, arr, n_labels):\n one_hot = np.zeros((np.multiply(*arr.shape), n_labels), dtype=np.float32)\n one_hot[np.arange(one_hot.shape[0]), arr.flatten()] = 1.\n one_hot = one_hot.reshape((*arr.shape, n_labels))\n \n return one_hot", "def to_categorical(x, n_col=None):\n if not n_col:\n n_col = np.amax(x) + 1\n one_hot = np.zeros((x.shape[0], n_col))\n one_hot[np.arange(x.shape[0]), x] = 1\n return one_hot", "def one_hot(array, N):\r\n\r\n array = array.astype(int)\r\n assert numpy.max(array) < N\r\n assert numpy.min(array) >= 0\r\n\r\n one_hot = numpy.zeros((array.shape[0], N))\r\n one_hot[numpy.arange(array.shape[0]), array] = 1\r\n return one_hot", "def onehot_encoding(labels, dim, device):\n out = th.zeros(list(labels.size()) + [dim]).to(device)\n out.scatter_(len(out.size()) - 1, labels.unsqueeze(-1), 1.0)\n return out", "def one_hot_encode(x_: ArrayLike) -> tuple[IntArray, dict[str, int]]:\n x: np.ndarray = np.copy(x_)\n if x.ndim == 1:\n x = x[:, np.newaxis]\n shape = x.shape\n has_na = np.any(pd.isna(x))\n if x.dtype == object:\n x = x.astype(str)\n categories, codes = np.unique(x, return_inverse=True)\n num_classes = len(categories)\n encoded_x = np.zeros((x.size, num_classes), dtype=np.uint8)\n encoded_x[np.arange(x.size), codes.astype(np.uint8).ravel()] = 1\n encoded_x = encoded_x.reshape(*shape, num_classes)\n if has_na:\n # remove NaN column\n categories = categories[:-1]\n encoded_x = encoded_x[:, :, :-1]\n mapping = {\n _category_name(category): code for code, category in enumerate(categories)\n }\n return encoded_x, mapping", "def _one_hot_encode(label_vector, total_num_labels):\n out = np.zeros(shape=(len(label_vector), total_num_labels))\n for i in range(len(label_vector)):\n out[i, int(label_vector[i])] = 1\n return out", "def one_hot_encode(vector, values = 10):\n n = len(vector)\n out = numpy.zeros( (n, values) )\n out[range(n), vector] = 1\n return out", "def one_hot_encoding(y):\n\n y_oh = np.zeros((y.shape[0], y.max() - y.min() + 1))\n\n # currently only works in min is actually 0\n for j in range(0, y_oh.shape[1]):\n y_oh[np.where(y == j), j] = 1\n\n return y_oh", "def one_hot(a, size=None, dtype=np.int32):\n # http://stackoverflow.com/a/37323404\n if isinstance(a, np.ndarray):\n if size is None:\n size = a.max() + 1\n return np.eye(size, dtype=dtype)[a]\n else:\n if size is None:\n raise NotImplementedError()\n return K.eye(size, dtype)[a]", "def To1hot(label,num_class):\n onehot = np.zeros(num_class)\n onehot[label] = 1\n return onehot", "def encode_one_hot(s):\n all = []\n for c in s:\n x = np.zeros((INPUT_VOCAB_SIZE)) \n index = char_indices[c]\n x[index] = 1 \n all.append(x)\n return all", "def as_one_hot(ind, n):\n vec = np.zeros(n)\n vec[ind] = 1\n return vec", "def one_hot(index):\n\toutput = np.zeros(100)\n\toutput[index] = 1\n\treturn output", "def one_hot_encode(labels, num_classes=None):\n if num_classes is None:\n num_classes = len(np.unique(labels))\n return np.eye(num_classes)[labels]", "def one_hot_encode(labels, num_classes=None):\n if num_classes is None:\n num_classes = len(np.unique(labels))\n return np.eye(num_classes)[labels]", "def one_hot(labels, dim):\n batch_size = labels.size(0)\n out = torch.zeros(batch_size, dim)\n out[np.arange(batch_size), labels.squeeze().long()] = 1\n return out", "def one_hot(x, dtype=np.float32):\n return np.array(x[:, None] == np.arange(x.max()+1), dtype)", "def to_onehot(value, dim):\n one_hot = torch.zeros(value.shape[0], dim)\n one_hot[torch.arange(value.shape[0]), value.long()] = 1\n return one_hot", "def encode_one_hot2(s):\n x = np.zeros((LINE_SIZE, INPUT_VOCAB_SIZE))\n for n, c in enumerate(s):\n index = char_indices[c]\n x[n, index] = 1 \n return x", "def one_hot(y, num_classes):\n return np.eye(num_classes)[y]", "def one_hot_encode(self, y: np.ndarray) -> np.ndarray:\n return np.eye(self.output_size)[y]", "def __one_hot(self, y):\n y_one_hot = np.zeros((y.size, y.max() + 1))\n y_one_hot[np.arange(y.size), y] = 1\n \n return y_one_hot", "def int_to_one_hot(x, n=None):\n if isinstance(x, int):\n if n is None:\n raise ValueError('n is required to one-hot encode a single integer')\n if x >= n:\n raise ValueError('x must be smaller than n in order to one-hot encode')\n output = np.zeros((n,))\n output[x] = 1\n else:\n if n is None:\n n = int(np.max(x) + 1)\n else:\n if np.max(x) >= n:\n raise ValueError('The maximum value in x ({}) is greater than '\n 'n ({}), therefore 1-of-n encoding is not '\n 'possible'.format(np.max(x), n))\n x = np.array(x, dtype=np.int)\n if x.ndim is 1:\n x = x[:, None]\n orig_shp = x.shape\n x = np.reshape(x, (-1, orig_shp[-1]))\n output = np.zeros((x.shape[0], n))\n output[np.arange(x.shape[0]), x.squeeze()] = 1\n output = output.reshape(orig_shp[:-1] + (n,))\n\n return output", "def get_one_hot(size, ind):\n one_hot = np.zeros((size,))\n one_hot[ind] = 1\n return one_hot", "def _onehot(integer_labels):\r\n n_rows = len(integer_labels)\r\n n_cols = integer_labels.max() + 1\r\n onehot = np.zeros((n_rows, n_cols), dtype='uint8')\r\n onehot[np.arange(n_rows), integer_labels] = 1\r\n return onehot", "def turn_to_one_hot(self,length,ind):\n one_hot = [0 for _ in range(length)]\n if (ind >= len(one_hot)):\n one_hot[-1] = 1\n else:\n one_hot[ind] = 1\n return one_hot", "def label_to_one_hot(label, num_of_class=2):\r\n import numpy as np\r\n one_hot = np.zeros((len(label), num_of_class), dtype=np.uint8)\r\n for i in range(len(label)):\r\n one_hot[i, int(label[i] - 1)] = 1 # label is 1 and 2\r\n\r\n return one_hot", "def _get_one_hot_array(num_left_cards, max_num_cards):\n one_hot = np.zeros(max_num_cards)\n if num_left_cards > 0:\n one_hot[num_left_cards - 1] = 1\n\n return one_hot", "def one_hot_encode(x):\n\n # check if encoder has been previously created, if not make a global var an initialize it\n if 'encoder' not in globals():\n global encoder\n encoder = LabelBinarizer()\n encoder.fit(range(10))\n\n return encoder.transform(x)", "def one_hot(labels):\n one_hot_labels = np.zeros(labels.shape + (n_actions,))\n for c in range(n_actions):\n one_hot_labels[labels == c, c] = 1.0\n return one_hot_labels", "def one_hot(size, index, use_cuda=False):\n mask = long_type(use_cuda)(*size).fill_(0)\n ones = 1\n if isinstance(index, Variable):\n ones = Variable(long_type(use_cuda)(index.size()).fill_(1))\n mask = Variable(mask, volatile=index.volatile)\n\n ret = mask.scatter_(1, index, ones)\n return ret", "def one_hot(index, dims, dtype=np.uint8):\n if isinstance(index, int):\n ret = np.zeros((dims,), dtype)\n ret[index] = 1\n elif isinstance(index, (list, tuple)):\n seq_len = len(index)\n ret = np.zeros((seq_len, dims), dtype)\n ret[range(seq_len), index] = 1.0\n else:\n raise ValueError('index should be int or list(tuple) of int.')\n return ret", "def onehot(index):\n classNum=2#1\n onehot = np.zeros(classNum)#这代表种类类型\n onehot[index] = 1.0\n return onehot", "def one_hot(self, y):\n\n one_hot = np.zeros((self.n_samples, self.n_classes))\n\n # using np.array to select elements of another np.array\n\n # first diemention index\n # np.arange(self.n_samples) : (1, n_samples)\n # row vectors (0,1,2.....n_samples-1) \n\n # second dimention index\n # \n\n\n one_hot[np.arange(self.n_samples), y.T] = 1\n return one_hot", "def labels_to_one_hot(\n labels: np.ndarray, categories: int, axis: int = 0,\n keepdims=False, dtype=bool\n):\n if keepdims:\n assert labels.shape[axis] == 1\n result_ndim = labels.ndim\n else:\n result_ndim = labels.ndim + 1\n\n if axis < 0:\n axis += result_ndim\n\n shape = labels.shape\n zeros = np.zeros((categories, labels.size), dtype=dtype)\n zeros[labels.ravel(), range(labels.size)] = 1\n\n zeros = zeros.reshape((categories,) + shape)\n\n if keepdims:\n zeros = zeros[(slice(None),) * (axis + 1) + (0,)]\n\n zeros = np.moveaxis(zeros, 0, axis)\n\n return zeros", "def one_hot(x):\n cart_pos,cart_vel,pole_ang,pole_vel = x\n\n # Cart position\n discrete_cart_pos = int((cart_pos - CART_POSITION_MIN)/(CART_POSITION_MAX-CART_POSITION_MIN)*4)\n\n # Pole angle\n discrete_pol_ang = int((cart_pos - POLE_ANGLE_MIN)/(POLE_ANGLE_MAX-POLE_ANGLE_MIN)*4)\n\n # Cart velocity\n cart_vel_discretisations = [-1,0,1]\n discrete_cart_vel= 3\n for i,v in enumerate(cart_vel_discretisations):\n if cart_vel < v:\n discrete_cart_vel = i\n break\n\n # Pole tip velocity\n pole_vel_discretisations = [-1,0,1]\n discrete_pole_vel= 3\n for i,v in enumerate(pole_vel_discretisations):\n if pole_vel < v:\n discrete_pole_vel = i\n break\n\n # Convert to one-hot encoding\n x = discrete_cart_pos + discrete_cart_vel*4 + discrete_pol_ang*8 + discrete_cart_vel*12\n output = [0] * ONE_HOT_NUM_FEATURES\n output[x] = 1\n return np.array([output]).transpose()", "def one_hot_enc(self, word):\n word = self.text_to_int(word)\n word = Variable(torch.tensor(word))\n word = torch.nn.functional.one_hot(word, len(self.index_map))\n return word.transpose(0, 1)", "def int2onehot(x, length):\n assert x > 0 and x < length\n assert isinstance(x, int)\n assert isinstance(length, int)\n ohv = np.zeros([length], dtype=np.float32)\n ohv[x] = 1\n return ohv", "def flat_to_one_hot(val, ndim):\n shape =np.array(val).shape\n v = np.zeros(shape + (ndim,))\n if len(shape) == 1:\n v[np.arange(shape[0]), val] = 1.0\n else:\n v[val] = 1.0\n return v", "def _get_one_hot(targets, num_classes):\n ret = np.zeros((num_classes, targets.shape[0]))\n ret[targets, np.arange(targets.size)] = 1\n return ret", "def one_hot_encoding(gt, num_classes):\n if gt.ndim == 1:\n # multi-class classification\n return F.one_hot(gt, num_classes=num_classes)\n else:\n # binary classification\n # example. [[0], [1], [1]]\n # multi-label classification\n # example. [[0, 1, 1], [1, 0, 0], [1, 1, 1]]\n return gt", "def one_hot(self) -> np.ndarray:\n array = np.zeros(10, dtype=int)\n array[self.of] = 1\n array.flags.writeable = False\n return array", "def get_onehot(tensor, labels=10):\n one_hot = np.zeros((tensor.shape[0], labels))\n one_hot[range(tensor.shape[0]), tensor.astype(int)] = 1.\n return one_hot", "def to_one_hot(arr, num_classes):\n arr = arr.data.astype(int)\n a = np.zeros((arr.shape[0], num_classes))\n a[np.arange(len(a)), arr] = 1\n return tensor.Tensor(a, requires_grad=True)", "def to_one_hot(arr, num_classes):\n arr = arr.data.astype(int)\n a = np.zeros((arr.shape[0], num_classes))\n a[np.arange(len(a)), arr] = 1\n return tensor.Tensor(a, requires_grad=True)", "def one_hot_encoded(class_numbers, num_classes=None):\n\n # Find the number of classes if None is provided.\n # Assumes the lowest class-number is zero.\n if num_classes is None:\n num_classes = np.max(class_numbers) + 1\n\n return np.eye(num_classes, dtype=float)[class_numbers]", "def one_hot_encoded(class_numbers, num_classes=None):\n\n # Find the number of classes if None is provided.\n # Assumes the lowest class-number is zero.\n if num_classes is None:\n num_classes = np.max(class_numbers) + 1\n\n return np.eye(num_classes, dtype=float)[class_numbers]", "def convert_to_one_hot(a):\n a = a[:, 0]\n a = a.astype(int)\n A = np.zeros((len(a), config.num_classes))\n A[np.arange(len(a)), a] = 1\n return A", "def to_one_hot_encoding(target_data):\n target_data = target_data.squeeze()\n n_class = len(np.unique(target_data))\n res = np.eye(n_class)[target_data.astype(int)]\n return res", "def one_hot(input_data:torch.Tensor, dim:int):\n res = []\n n_channels = input_data.size(1)\n offset = input_data.min()\n length = dim\n for channel_idx in range(n_channels):\n channel_one_hot = []\n channel = input_data[:, channel_idx]\n for entry in channel:\n one_hot_x = torch.zeros(length)\n one_hot_x[entry+offset] = 1\n channel_one_hot.append(one_hot_x)\n channel_one_hot = torch.cat(channel_one_hot)\n channel_one_hot = channel_one_hot.reshape(-1, length)\n res.append(channel_one_hot.unsqueeze(2))\n res = torch.cat(res, dim=2)\n return res", "def onehot_encode_y(y, num_class):\n # Assertions\n assert isinstance(y, np.ndarray), \\\n 'y must be a numpy ndarray'\n assert isinstance(num_class, int), \\\n 'num_class must be an int'\n # Functionality\n one_hot = np.zeros((y.shape[0],num_class),dtype=np.int8)\n for index, cls in enumerate(y):\n one_hot[index, int(cls)] = 1\n\n return one_hot", "def onehot(rating):\n vec = [0 for i in range(5)]\n vec[rating - 1] = 1\n return np.array(vec)", "def one_hot_encoding(data):\n\n data_encoded = pd.get_dummies(data)\n\n return data_encoded", "def to_one_hot(labels, num_classes):\n shape = labels.size()\n shape = shape + (num_classes,)\n one_hot = torch.FloatTensor(shape)\n one_hot.zero_()\n dim = 1 if len(shape) == 2 else 2\n one_hot.scatter_(dim, labels.unsqueeze(-1), 1)\n return one_hot", "def one_hot(labels, classes=None):\n return K.utils.to_categorical(labels, classes)", "def one_hot(a, actions, dtype=t.float32):\n assert 1 == a.shape[-1]\n return (a == t.arange(actions, device=a.device)).to(dtype=dtype)", "def encode_labels(self, y, num_labels):\n onehot = np.zeros((num_labels, y.shape[0]))\n for i in range(y.shape[0]):\n onehot[y[i], i] = 1.0\n return onehot", "def one_hot(class_ids, num_classes):\n oh = np.zeros((len(class_ids), num_classes), dtype=np.float32)\n oh[np.arange(len(class_ids)), class_ids] = 1\n\n assert (oh.argmax(axis=1) == class_ids).all()\n assert (oh.sum(axis=1) == 1).all()\n\n return oh", "def label_name_to_one_hot(self, label_name):\n label_name_to_int = {n: i for i, n in enumerate(BOX_LABELS)}\n label_id = label_name_to_int[label_name]\n one_hot = np.zeros(shape=[len(BOX_LABELS)], dtype=np.float32)\n one_hot[label_id] = 1\n return one_hot", "def conv_y_to_onehot_mat(labels):\n one_idx = np.array(labels)\n nkind = len(np.unique(one_idx))\n nlabels = len(one_idx)\n\n ret = np.zeros((nkind, nlabels))\n ret[one_idx, np.arange(nlabels)] = 1\n return ret", "def one_hot_encoding(sequence):\n\n mydict = {\n \"A\": np.asarray([1, 0, 0, 0]),\n \"a\": np.asarray([1, 0, 0, 0]),\n \"C\": np.asarray([0, 1, 0, 0]),\n \"c\": np.asarray([0, 1, 0, 0]),\n \"G\": np.asarray([0, 0, 1, 0]),\n \"g\": np.asarray([0, 0, 1, 0]),\n \"T\": np.asarray([0, 0, 0, 1]),\n \"t\": np.asarray([0, 0, 0, 1]),\n \"Y\": np.asarray([0, 1, 0, 1]),\n \"y\": np.asarray([0, 1, 0, 1]),\n \"R\": np.asarray([1, 0, 1, 0]),\n \"r\": np.asarray([1, 0, 1, 0]),\n \"S\": np.asarray([0, 1, 1, 0]),\n \"s\": np.asarray([0, 1, 1, 0]),\n \"W\": np.asarray([1, 0, 0, 1]),\n \"w\": np.asarray([1, 0, 0, 1]),\n \"K\": np.asarray([0, 0, 1, 1]),\n \"k\": np.asarray([0, 0, 1, 1]),\n \"M\": np.asarray([1, 1, 0, 0]),\n \"m\": np.asarray([1, 1, 0, 0]),\n \"B\": np.asarray([0, 1, 1, 1]),\n \"b\": np.asarray([0, 1, 1, 1]),\n \"D\": np.asarray([1, 0, 1, 1]),\n \"d\": np.asarray([1, 0, 1, 1]),\n \"H\": np.asarray([1, 1, 0, 1]),\n \"h\": np.asarray([1, 1, 0, 1]),\n \"V\": np.asarray([1, 1, 1, 0]),\n \"v\": np.asarray([1, 1, 1, 0]),\n \"N\": np.asarray([0, 0, 0, 0]),\n \"n\": np.asarray([0, 0, 0, 0]),\n \"-\": np.asarray([0, 0, 0, 0]),\n }\n print(f\"Seq: {sequence}\")\n if len(sequence) > 0:\n nuc_list = list()\n for nuc in list(sequence):\n nuc_list.append(mydict[nuc])\n result = np.stack(np.asarray(nuc_list, dtype=\"int8\"))\n return result\n else: \n print(\"ERROR! sequence is too short\")", "def one_hot_encode(df, ohe_cols):\n return pd.get_dummies(df, columns=ohe_cols)", "def dense_to_one_hot(labels, n_classes=2):\n labels = np.array(labels)\n n_labels = labels.shape[0]\n index_offset = np.arange(n_labels) * n_classes\n labels_one_hot = np.zeros((n_labels, n_classes), dtype=np.float32)\n labels_one_hot.flat[index_offset + labels.ravel()] = 1\n return labels_one_hot", "def onehot(trace):\n encoded_trace = np.zeros((len(trace), 3), dtype=int)\n encoded_trace[np.arange(len(trace)), trace] = 1\n return encoded_trace.flatten()", "def onehot(t, num_classes):\n assert isinstance(t, torch.LongTensor)\n return torch.zeros(t.size()[0], num_classes).scatter_(1, t.view(-1, 1), 1)", "def dense_to_one_hot(labels_dense, num_classes):\n num_labels = labels_dense.shape[0]\n index_offset = numpy.arange(num_labels) * num_classes\n labels_one_hot = numpy.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot", "def dense_to_one_hot(labels_dense, num_classes):\n num_labels = labels_dense.shape[0]\n index_offset = numpy.arange(num_labels) * num_classes\n labels_one_hot = numpy.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot", "def dense_to_one_hot(labels_dense, num_classes):\n num_labels = labels_dense.shape[0]\n index_offset = numpy.arange(num_labels) * num_classes\n labels_one_hot = numpy.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot", "def dense_to_one_hot(labels_dense, num_classes):\n num_labels = labels_dense.shape[0]\n index_offset = np.arange(num_labels) * num_classes\n labels_one_hot = np.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot", "def label_to_one_hot(x, labels=None):\n n = len(labels)\n labels_idx = {l: i for i, l in enumerate(labels)}\n if not hasattr(x, '__len__'):\n output = np.zeros((n,))\n output[labels_idx[x]] = 1\n else:\n x = np.array(x, dtype=np.int)\n orig_shp = x.shape\n x = np.reshape(x, (-1))\n x = np.array([labels_idx[_] for _ in x])\n output = np.zeros((x.shape[0], n))\n output[np.arange(x.shape[0]), x] = 1\n if len(orig_shp) == 1:\n output_shape = orig_shp + (n,)\n else:\n output_shape = orig_shp[:-1] + (n,)\n output = output.reshape(output_shape)\n\n return output", "def one_hot_encode(df, col):\n return pd.get_dummies(df, columns=[col], drop_first=True)", "def dense_to_one_hot(labels_dense, num_classes=10):\n num_labels = labels_dense.shape[0]\n #print('check num_labels',num_labels)\n index_offset = numpy.arange(num_labels) * num_classes\n labels_one_hot = numpy.zeros((num_labels, num_classes))\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n return labels_one_hot", "def dense_to_one_hot(labels_dense, num_classes):\r\n num_labels = labels_dense.shape[0]\r\n index_offset = numpy.arange(num_labels) * num_classes\r\n labels_one_hot = numpy.zeros((num_labels, num_classes))\r\n labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\r\n return labels_one_hot", "def one_hot_encode(Y, classes):\n if type(classes) is not int:\n return None\n if Y is None or type(Y) != np.ndarray:\n return None\n for c in Y:\n if c >= classes or c < 0:\n return None\n m = Y.shape[0]\n mtx = np.zeros((m, classes))\n\n for row, c_label in zip(mtx, Y):\n row[c_label] = 1\n\n return mtx.T", "def chars_to_onehot(self, char_list):\n int_list = self.char_to_int(char_list)\n one_hot = np.zeros((len(self.unique_chars), len(int_list)))\n for i,int_elem in enumerate(int_list):\n one_hot[int_elem,i] = 1\n return one_hot", "def label2onehot(self, batch_size, labels):\r\n dim = 6\r\n out = torch.zeros(batch_size, dim)\r\n out[np.arange(batch_size), labels] = 1\r\n return out", "def to_categorical(x, num_classes):\n return np.eye(num_classes, dtype='uint8')[x]" ]
[ "0.804429", "0.801011", "0.78977334", "0.7889785", "0.7800311", "0.7714966", "0.75706875", "0.75658363", "0.7427502", "0.7418045", "0.73576915", "0.735553", "0.7347711", "0.7343637", "0.7291789", "0.7286171", "0.7206042", "0.72045", "0.7190287", "0.7164403", "0.71621656", "0.7137647", "0.71267194", "0.7120279", "0.71084416", "0.71021783", "0.7095118", "0.7093074", "0.7083752", "0.7076452", "0.7072502", "0.70642143", "0.7054511", "0.70472527", "0.70328957", "0.7024934", "0.7023044", "0.7023044", "0.6991934", "0.6986427", "0.6973439", "0.6969106", "0.69674784", "0.6952567", "0.6911198", "0.69060576", "0.68951315", "0.6862935", "0.68624216", "0.6852557", "0.6835732", "0.68322635", "0.6805062", "0.67776644", "0.67752355", "0.67619455", "0.6759625", "0.67396444", "0.67152685", "0.67129374", "0.6700594", "0.66997683", "0.667953", "0.66709757", "0.66701293", "0.6657092", "0.6612892", "0.6612892", "0.6607239", "0.6607239", "0.66052735", "0.65942943", "0.6593063", "0.65830463", "0.65826565", "0.65769064", "0.6564957", "0.6540918", "0.652778", "0.65271705", "0.652533", "0.6514781", "0.6509675", "0.6500311", "0.65000814", "0.64946645", "0.64937794", "0.64762926", "0.64739037", "0.64739037", "0.64739037", "0.6472274", "0.64710635", "0.64659584", "0.6463554", "0.64587635", "0.64536554", "0.64482576", "0.6447426", "0.6437744" ]
0.7974455
2
Returns a Bokeh glyph object.
def _init_glyph(self, plot, mapping, properties): level = properties.pop('level', 'underlay') renderer = plot.add_tile(mapping['tile_source'], level=level) renderer.alpha = properties.get('alpha', 1) return renderer, renderer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createReferenceGlyph(self):\n return _libsbml.GeneralGlyph_createReferenceGlyph(self)", "def createTextGlyph(self):\n return _libsbml.Layout_createTextGlyph(self)", "def _init_glyph(self, plot, mapping, properties):\n properties.pop('source')\n properties.pop('legend')\n box = Span(level='overlay', **dict(mapping, **properties))\n plot.renderers.append(box)\n return None, box", "def bokeh_constructor( loader, node ):\n global workspace\n args = loader.construct_mapping(node, deep=True)\n args = resolve_pointer( workspace, args )\n\n source = None\n\n if not 'figure' in args:\n args['figure'] = {}\n\n args['figure'] = resolve_pointer( workspace, args['figure'] )\n if 'source' in args:\n source = blaze.odo( args['source'], ColumnDataSource )\n\n p = figure( **args['figure'] )\n\n for glyph, kwargs in yaml_to_args(args['glyphs']):\n if source:\n kwargs['source'] = source\n getattr( p, glyph )( **kwargs )\n\n return p", "def createGeneralGlyph(self):\n return _libsbml.Layout_createGeneralGlyph(self)", "def to_glyphspec(self):\n d = self.vm_props(withvalues=True)\n d[\"type\"] = self.__view_model__\n\n # TODO: Remove this when we rename the BokehJS fill color attribute\n # from \"fill\" to \"fill_color\"\n if \"fill_color\" in d:\n d[\"fill\"] = d.pop(\"fill_color\")\n\n # Iterate over all the DataSpec properties and convert them, using the\n # fact that DataSpecs store the dict-ified version on the object.\n for attrname, dspec in self.dataspecs().iteritems():\n d[attrname] = dspec.to_dict(self)\n return d", "def getSubGlyph(self, *args):\n return _libsbml.GeneralGlyph_getSubGlyph(self, *args)", "def createSpeciesGlyph(self):\n return _libsbml.Layout_createSpeciesGlyph(self)", "def createSpeciesReferenceGlyph(self):\n return _libsbml.Layout_createSpeciesReferenceGlyph(self)", "def createCompartmentGlyph(self):\n return _libsbml.Layout_createCompartmentGlyph(self)", "def getReferenceGlyph(self, *args):\n return _libsbml.GeneralGlyph_getReferenceGlyph(self, *args)", "def glyph(x):\n assert isinstance(x, str)\n return [x]", "def clone(self):\n return _libsbml.TextGlyph_clone(self)", "def createSpeciesReferenceGlyph(self):\n return _libsbml.ReactionGlyph_createSpeciesReferenceGlyph(self)", "def createReactionGlyph(self):\n return _libsbml.Layout_createReactionGlyph(self)", "def getGeneralGlyph(self, *args):\n return _libsbml.Layout_getGeneralGlyph(self, *args)", "def getGlyphId(self):\n return _libsbml.ReferenceGlyph_getGlyphId(self)", "def readGlyphElement(self, glyphElement, instanceObject):\n # name\n glyphName = glyphElement.attrib.get('name')\n if glyphName is None:\n raise MutatorError(\"Glyph object without name attribute.\")\n\n # mute\n mute = glyphElement.attrib.get(\"mute\")\n if mute == \"1\":\n instanceObject.muteGlyph(glyphName)\n # we do not need to stick around after this\n return\n\n # unicode\n unicodes = glyphElement.attrib.get('unicode')\n if unicodes == None:\n unicodes = self.unicodeMap.get(glyphName, None)\n else:\n try:\n unicodes = [int(u, 16) for u in unicodes.split(\" \")]\n except ValueError:\n raise MutatorError(\"unicode values %s are not integers\" % unicodes)\n\n # note\n note = None\n for noteElement in glyphElement.findall('.note'):\n note = noteElement.text\n break\n\n # location\n instanceLocation = self.locationFromElement(glyphElement)\n\n # masters\n glyphSources = None\n for masterElement in glyphElement.findall('.masters/master'):\n fontSourceName = masterElement.attrib.get('source')\n fontSource, fontLocation = self.sources.get(fontSourceName)\n if fontSource is None:\n raise MutatorError(\"Unknown glyph master: %s\"%masterElement)\n sourceLocation = self.locationFromElement(masterElement)\n if sourceLocation is None:\n # if we don't read a location, use the instance location\n sourceLocation = fontLocation\n masterGlyphName = masterElement.attrib.get('glyphname')\n if masterGlyphName is None:\n # if we don't read a glyphname, use the one we have\n masterGlyphName = glyphName\n d = dict( font=fontSource,\n location=sourceLocation,\n glyphName=masterGlyphName)\n if glyphSources is None:\n glyphSources = []\n glyphSources.append(d)\n # calculate the glyph\n instanceObject.addGlyph(glyphName, unicodes, instanceLocation, glyphSources, note=note)", "def getCompartmentGlyph(self, *args):\n return _libsbml.Layout_getCompartmentGlyph(self, *args)", "def getSpeciesGlyph(self, *args):\n return _libsbml.Layout_getSpeciesGlyph(self, *args)", "def glyphAt(self, a, b):\n for i in range(len(self.glyphs), 0, -1):\n g = self.glyphs[i-1]\n if g.contains(a, b):\n return g\n return None", "def clone(self):\n return _libsbml.GeneralGlyph_clone(self)", "def createCubicBezier(self):\n return _libsbml.SpeciesReferenceGlyph_createCubicBezier(self)", "def singleglyph(x):\n return [glyph(x)]", "def _font(self):\n\treturn self.m_gdfont", "def createCubicBezier(self):\n return _libsbml.GeneralGlyph_createCubicBezier(self)", "def createCubicBezier(self):\n return _libsbml.ReferenceGlyph_createCubicBezier(self)", "def toXML(self):\n return _libsbml.TextGlyph_toXML(self)", "def clone(self):\n return _libsbml.ReferenceGlyph_clone(self)", "def createCubicBezier(self):\n return _libsbml.ReactionGlyph_createCubicBezier(self)", "def getCurve(self, *args):\n return _libsbml.GeneralGlyph_getCurve(self, *args)", "def toXML(self):\n return _libsbml.GeneralGlyph_toXML(self)", "def getTextGlyph(self, *args):\n return _libsbml.Layout_getTextGlyph(self, *args)", "def __init__(self, glyphSet):\n super(DecomposingPen, self).__init__()\n self.glyphSet = glyphSet", "def getCurve(self, *args):\n return _libsbml.ReferenceGlyph_getCurve(self, *args)", "def toXML(self):\n return _libsbml.ReferenceGlyph_toXML(self)", "def toXML(self):\n return _libsbml.CompartmentGlyph_toXML(self)", "def get(self, *args):\n return _libsbml.ListOfCompartmentGlyphs_get(self, *args)", "def get_text_font ( self, object ):\n return self.text_font", "def getDemoFontGlyphSetPath():\n\treturn os.path.join(getDemoFontPath(), \"glyphs\")", "def getCurve(self, *args):\n return _libsbml.SpeciesReferenceGlyph_getCurve(self, *args)", "def clone(self):\n return _libsbml.CompartmentGlyph_clone(self)", "def get(self) -> HintCell:\n\n boundaries = self.boundaries.get()\n boni = self.boni.get()\n\n return HintCell(self.source, boundaries, boni)", "def addSubGlyph(self, *args):\n return _libsbml.GeneralGlyph_addSubGlyph(self, *args)", "def clone(self):\n return _libsbml.SpeciesGlyph_clone(self)", "def addReferenceGlyph(self, *args):\n return _libsbml.GeneralGlyph_addReferenceGlyph(self, *args)", "def addTextGlyph(self, *args):\n return _libsbml.Layout_addTextGlyph(self, *args)", "def to_wx_font ( self, editor ):\n font = editor.value\n return wx.Font( font.GetPointSize(), font.GetFamily(), font.GetStyle(),\n font.GetWeight(), font.GetUnderlined(), \n font.GetFaceName() )", "def create(font_name, point):\n return pygame.font.SysFont(font_name, int(point))", "def get_renderer ( self, object ):\n return self.renderer", "def named_font(self, point):\n return Font.create(self.name, point * self.scale)", "def asBoldItalic(self):\n\t\treturn TextFont( self.name[0:2]+'IB', self.size)", "def _create_texture(self, char, glyph, width, height, glyph_width, glyph_height):\n\n if char not in self._texture_cache:\n ID = glGenTextures (1)\n glBindTexture (GL_TEXTURE_2D, ID)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n #glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);\n #glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);\n tex2d = \"\"\n for j in xrange (height):\n for i in xrange (width):\n if (i >= glyph_width) or (j >= glyph_height):\n value = chr (0)\n tex2d += value*4\n else:\n value = chr (glyph.getpixel ((i, j)))\n tex2d += value*4\n\n glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, tex2d)\n self._texture_cache[char] = ID\n\n return self._texture_cache[char]", "def getText(self):\n return _libsbml.TextGlyph_getText(self)", "def clone(self):\n return _libsbml.SpeciesReferenceGlyph_clone(self)", "def get_font(self, option):\n return get_font(option=option)", "def font(self):\n\treturn self.m_font", "def font(self):\n\treturn self.m_font", "def _create_glyph_metrics(glyph, base):\n metrics = base.Struct(**_UNCOMPRESSED_METRICS)(\n left_side_bearing=glyph.left_bearing,\n right_side_bearing=glyph.left_bearing + glyph.width,\n character_width=glyph.advance_width,\n character_ascent=glyph.height+glyph.shift_up,\n character_descent=-glyph.shift_up,\n character_attributes=0,\n )\n return metrics", "def asBold(self):\n\t\treturn TextFont( self.name[0:2]+'B', self.size)", "def _instantiateFont(self, path):\n return self._fontClass(path,\n libClass=self._libClass,\n kerningClass=self._kerningClass,\n groupsClass=self._groupsClass,\n infoClass=self._infoClass,\n featuresClass=self._featuresClass,\n glyphClass=self._glyphClass,\n glyphContourClass=self._glyphContourClass,\n glyphPointClass=self._glyphPointClass,\n glyphComponentClass=self._glyphComponentClass,\n glyphAnchorClass=self._glyphAnchorClass)", "def fetch_fa_glyph(glyph: Optional[str]) -> str:\n if glyph is None:\n return \"\"\n else:\n x = \"<span role=\\\"presentation\\\" class=\\\"fa\"\n if glyph == \"home\":\n x += \" fa-home\\\" aria-hidden\"\n elif glyph == \"blog\":\n x += \" fa-pencil-alt\\\" aria-hidden\"\n elif glyph == \"mail\":\n x += \" fa-envelope\\\" aria-hidden\"\n elif glyph == \"site cite\":\n x += \" fa-pencil-alt\\\" aria-hidden\"\n elif glyph == \"index\":\n x += \" fa-list\\\" aria-hidden\"\n elif glyph == \"summary charts\":\n x += \" fa-chart-line\\\" aria-hidden\"\n elif glyph == \"location\":\n x += \" fa-map-marker-alt\\\" aria-hidden\"\n elif glyph == \"citation\":\n x += \" fa-edit\\\" aria-hidden\"\n elif glyph == \"specimen\":\n x += \" fa-flask\\\" aria-hidden\"\n elif glyph == \"sequence\":\n x += \" fa-dna\\\" aria-hidden\"\n elif glyph == \"original\":\n x += \" fa-arrow-alt-left\\\" aria-hidden\"\n elif glyph == \"computed\":\n x += \" fa-cogs\\\" aria-hidden\"\n elif glyph == \"geography\":\n x += \"r fa-map\\\" aria-hidden\"\n elif glyph == \"synonymy\":\n x += \" fa-exchange\\\" aria-hidden\"\n elif glyph == \"specific name\":\n x += \" fa-window-minimize\\\" aria-hidden\"\n elif glyph == \"info\":\n x += \" fa-info-circle\\\" aria-hidden\"\n elif glyph == \"accepted species\":\n x += \" fa-check-circle\\\" aria-hidden\"\n elif glyph == \"download\":\n x += \" fa-download\\\" aria-hidden\"\n elif glyph == \"file download\":\n x += \" fa-file-download\\\" aria-hidden\"\n elif glyph == \"maps\":\n x += \"r fa-map\\\" aria-hidden\"\n elif glyph == \"photo\":\n x += \" fa-camera-alt\\\" aria-hidden\"\n elif glyph == \"video\":\n x += \" fa-video\\\" aria-hidden\"\n elif glyph == \"references\":\n x += \" fa-book\\\" aria-hidden\"\n elif glyph == \"art\":\n x += \" fa-paint-brush\\\" aria-hidden\"\n elif glyph == \"measure\":\n x += \"r fa-ruler\\\" aria-hidden\"\n elif glyph == \"handedness\":\n x += \" fa-hands\\\" aria-hidden\"\n elif glyph == \"list pdf\":\n x += \"-li far fa-file-pdf\\\" aria-hidden\"\n elif glyph == \"list github\":\n x += \"-li fab fa-github\\\" aria-hidden\"\n elif glyph == \"list systematics\":\n x += \"-li fa fa-signal fa-rotate-270\\\" aria-hidden\"\n elif glyph == \"list phylogeny\":\n x += \"-li fa fa-share-alt fa-rotate-270\\\" aria-hidden\"\n elif glyph == \"list species\":\n x += \"-li fa fa-list\\\" aria-hidden\"\n elif glyph == \"list common\":\n x += \"-li far fa-comments\\\" aria-hidden\"\n elif glyph == \"list ranges\":\n x += \"-li far fa-map\\\" aria-hidden\"\n elif glyph == \"list morphology\":\n x += \"-li far fa-heart\\\" aria-hidden\"\n elif glyph == \"list references\":\n x += \"-li fa fa-book\\\" aria-hidden\"\n elif glyph == \"list lifecycle\":\n x += \"-li fa fa-sync\\\" aria-hidden\"\n elif glyph == \"list photo\":\n x += \"-li fa fa-camera-alt\\\" aria-hidden\"\n elif glyph == \"list video\":\n x += \"-li fa fa-video\\\" aria-hidden\"\n elif glyph == \"list art\":\n x += \"-li fa fa-paint-brush\\\" aria-hidden\"\n elif glyph == \"list site cite\":\n x += \"-li fa fa-pencil-alt\\\" aria-hidden\"\n elif glyph == \"list unusual dev\":\n x += \"-li fa fa-transgender-alt\\\" aria-hidden\"\n elif glyph == \"bad location\":\n x += \" fa-exclamation-triangle\\\" style=\\\"color: red\\\" title=\\\"Problematic Location: Outside range of \" \\\n \"all fiddler crabs or this particular species.\\\"\"\n elif glyph == \"questionable id\":\n x += \" fa-question-circle\\\" style=\\\"color: goldenrod\\\" title=\\\"Questionable ID: Species identity \" \\\n \"uncertain.\\\"\"\n elif glyph == \"tax key\":\n x += \" fa-key\\\" ara-hidden\"\n elif glyph == \"location marker\":\n x += \"r fa-map-marked-alt\\\" ara-hidden\"\n else:\n report_error(\"missing glyph: \" + glyph)\n return \"\"\n return x + \"></span> \"", "def get(self, *args):\n return _libsbml.ListOfSpeciesReferenceGlyphs_get(self, *args)", "def getCurve(self, *args):\n return _libsbml.ReactionGlyph_getCurve(self, *args)", "def get(self, *args):\n return _libsbml.ListOfSpeciesGlyphs_get(self, *args)", "def _get_renderer(self) :\n \n return self._renderer", "def clone(self):\n return _libsbml.ReactionGlyph_clone(self)", "def dtype_renderer(_, cell_combobox, tree_model, tree_iter, data):\n\n cell_data = tree_model.get(tree_iter, data)[0]\n cell_combobox.set_property('markup', cell_data)", "def font(self):\n return self[\"font\"]", "def font(self):\n return self[\"font\"]", "def font(self):\n return self[\"font\"]", "def get(self, *args):\n return _libsbml.ListOfReferenceGlyphs_get(self, *args)", "def span_instance(self) -> Span:\n return self._span_instance", "def getSpeciesReferenceGlyph(self, *args):\n return _libsbml.ReactionGlyph_getSpeciesReferenceGlyph(self, *args)", "def get(self, *args):\n return _libsbml.ListOfTextGlyphs_get(self, *args)", "def toXML(self):\n return _libsbml.SpeciesGlyph_toXML(self)", "def get_bl_cell(self):\n return self._bl_cell", "def toXML(self):\n return _libsbml.SpeciesReferenceGlyph_toXML(self)", "def get_render(self):\n return self.fam.c_get_render(self)", "def Font(self, attr=None, out=None):\n if attr is None:\n self._font = 0\n else:\n mask = 1 << attr\n self._font ^= mask\n code = self._attr.GetFontCode(self._font & (1 << renderer.BOLD),\n self._font & (1 << renderer.ITALIC))\n if out:\n out.write(code)\n return code", "def _create_ink_metrics(glyph, base):\n return _create_glyph_metrics(glyph.reduce(), base)", "def from_hex(hexstr, width, height):\n return Glyph.from_bytes(binascii.unhexlify(hexstr.encode('ascii')), width, height)", "def from_wx_font ( self, font ):\n return font", "def _extractGlyph(self, char):\n charno = ord(char)\n vertices = None\n currentGlyph = None\n\n if charno in self.extracted:\n currentGlyph = self.extracted[charno]\n else:\n if char in ('\\n', ):\n # No glyph for these chars\n pass\n else:\n glyph = self.font.getGlyph(charno, self.glyphs)\n if glyph is None:\n save_char = char\n save_charno = charno\n # Use '.notdef' glyph if it is defined in the font\n repcharno = None\n if self.glyphs != GlyphTypes.CBDT_COLOR:\n glyph = self.font.getGlyph(repcharno, self.glyphs)\n if glyph is None:\n # Use WHITE SQUARE gplyph: \\u25A1\n repcharno = 9633\n glyph = self.font.getGlyph(repcharno, self.glyphs)\n if glyph is None:\n # Still None? Replace character with blank\n repcharno = 32\n glyph = self.font.getGlyph(repcharno, self.glyphs)\n charno = 32\n char = chr(charno)\n if glyph is None:\n self.logger.error(\"Font %s has no space\"\n \" character!\" % self.font.fontFile)\n\n self.logger.warning(\"Char %r (%d) not found in\"\n \" font %s has been replaced with chr(%s)\"\n % (save_char, save_charno, self.font.fontFile,\n repcharno))\n\n currentGlyph = glyph\n self.extracted[charno] = currentGlyph\n\n if currentGlyph is not None and 'vertices' in currentGlyph:\n vertices = currentGlyph['vertices'].copy()\n\n return char, vertices, currentGlyph", "def toXML(self):\n return _libsbml.ReactionGlyph_toXML(self)", "def getTypeCode(self):\n return _libsbml.TextGlyph_getTypeCode(self)", "def __init__(self, font, color=(255,255,255,255)):\r\n if not font.endswith('.png'):\r\n font += '.png'\r\n super(Pngfont, self).__init__(\"fonts/%s\" % font)\r\n self.font = font\r\n pixels = self.im.load()\r\n\r\n self.glyph_table = {}\r\n # Extract font information from top scanline of font image; create width,\r\n # height, tex_coord and vertices for each character.\r\n for v in range(95):\r\n x = (pixels[v * 2, 0][0] * 2.0) / self.ix\r\n y = ((pixels[v * 2, 0][1] + 8) * 2.0) / self.iy\r\n width = float(pixels[v * 2 + 1, 0][0])\r\n height = float(pixels[v * 2 + 1, 0][1])\r\n width_scale = width / self.ix\r\n height_scale = height / self.iy\r\n\r\n self.glyph_table[v] = [width, height,\r\n [(x + width_scale, y - height_scale),\r\n (x, y - height_scale),\r\n (x, y),\r\n (x + width_scale, y)],\r\n [(width, 0, 0), (0, 0, 0), (0, -height, 0), (width, -height, 0)]]\r\n\r\n alph = self.im.split()[-1] #keep alpha\r\n draw = ImageDraw.Draw(self.im)\r\n draw.rectangle((0, 1, self.ix, self.iy), fill=color)\r\n self.im.putalpha(alph)\r\n\r\n RGBs = 'RGBA' if self.alpha else 'RGB'\r\n self.image = self.im.convert(RGBs).tostring('raw', RGBs)\r\n self._tex = ctypes.c_int()", "def getTypeCode(self):\n return _libsbml.GeneralGlyph_getTypeCode(self)", "def get_demo_chart() -> Chart:\n chart = ChartSampleGenerator.create_chart()\n return chart", "def asItalic( self):\n\t\treturn TextFont( self.name[0:2]+'I', self.size)", "def getStrike(self):\n pass", "def getIndexForReferenceGlyph(self, *args):\n return _libsbml.GeneralGlyph_getIndexForReferenceGlyph(self, *args)", "def readGlyph(self, layerName, glyphName, glyphObject):\n\t\ttree = self._fileSystem.getGlyphTree(layerName, glyphName)\n\t\treadGlyphFromTree(tree, glyphObject, glyphObject)", "def GetFont(self):\r\n\r\n return self._font", "def GetFont(self):\r\n\r\n return self._font", "def GetFont(self):\r\n\r\n return self._font", "def addSpeciesGlyph(self, *args):\n return _libsbml.Layout_addSpeciesGlyph(self, *args)", "def as_sprite(self):\n return Sprite(texture=self)", "def getElementName(self):\n return _libsbml.TextGlyph_getElementName(self)", "def getInstance(self, vf, location):\n if vf.axes:\n return getVarFontInstance(vf, location)\n return vf" ]
[ "0.64251554", "0.6417896", "0.6385368", "0.61207485", "0.6027903", "0.5853716", "0.5838887", "0.580616", "0.5715113", "0.5697527", "0.5558109", "0.55418766", "0.5470425", "0.54696107", "0.5457854", "0.5454618", "0.5413018", "0.5380582", "0.5379364", "0.5375702", "0.5330651", "0.53291035", "0.53228396", "0.53044266", "0.5299674", "0.5294034", "0.5273073", "0.52251476", "0.5174074", "0.51461565", "0.514397", "0.5139198", "0.5101417", "0.50934476", "0.5088868", "0.5080752", "0.50763434", "0.5075059", "0.5072271", "0.5011193", "0.5002101", "0.49894327", "0.49795628", "0.4974502", "0.49504238", "0.4945893", "0.49263617", "0.4921811", "0.4921717", "0.49151716", "0.48819572", "0.4879889", "0.48715636", "0.48606765", "0.48551303", "0.4851047", "0.4846197", "0.4846197", "0.48439434", "0.48347193", "0.48319465", "0.48141792", "0.48107165", "0.48029798", "0.47827595", "0.4764526", "0.47584152", "0.4755317", "0.47523257", "0.47523257", "0.47523257", "0.47350308", "0.47336397", "0.472388", "0.47197214", "0.47102225", "0.46996087", "0.4674484", "0.46727076", "0.46654865", "0.46512127", "0.46510598", "0.46480522", "0.46435097", "0.46427366", "0.4642429", "0.46366447", "0.46223935", "0.46097955", "0.4604595", "0.45789462", "0.45631894", "0.455825", "0.4552918", "0.4552918", "0.4552918", "0.45428446", "0.4536788", "0.45359746", "0.45318887" ]
0.5845592
6
This Method is used to display the snake on the screen
def draw_snake(self, dis, snake_Part, snake_Body): for x in snake_Body: if self.left: direction = pygame.transform.rotate(x[2], x[3]) dis.blit(direction, (x[0], x[1])) elif self.right: direction = pygame.transform.rotate(x[2], x[3]) dis.blit(direction, (x[0], x[1])) elif self.up: direction = pygame.transform.rotate(x[2], x[3]) dis.blit(direction, (x[0], x[1])) elif self.down: direction = pygame.transform.rotate(x[2], x[3]) dis.blit(direction, (x[0], x[1])) if self.left: x1 = self.x + self.x_change - 10 y1 = self.y + self.y_change - 10 hdir = pygame.transform.rotate(Head, -90) dis.blit(hdir, (x1,y1)) elif self.right: x1 = self.x + self.x_change - 10 y1 = self.y + self.y_change - 10 hdir = pygame.transform.rotate(Head, 90) dis.blit(hdir, (x1,y1)) elif self.up: x1 = self.x + self.x_change - 10 y1 = self.y + self.y_change - 10 hdir = pygame.transform.rotate(Head, 180) dis.blit(hdir, (x1,y1)) elif self.down: x1 = self.x + self.x_change - 10 y1 = self.y + self.y_change - 10 hdir = pygame.transform.rotate(Head, 0) dis.blit(hdir, (x1,y1)) pygame.display.update
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _draw_snake(self):\n if self._snake is not None:\n for pixel in self._snake.body:\n self._sensehat.set_pixel(pixel.x, pixel.y, self._snake.color)", "def snakePrint():\n for snake in snake_pos: \n pg.draw.rect(game_disp, white, snake)", "def draw(self):\n self.screen.fill(Color.BLACK)\n self.screen.blit(self.red_block, self.apple)\n [self.screen.blit(self.green_block, xy) for xy in self.snake]\n self.screen.blit(self.white_bar, (0, 0))\n self.draw_text(str(self.score), self.score_pos, size=32)\n pygame.display.flip()", "def snakeSetup(self,display):\n if display:\n self.screen = pygame.display.set_mode(windowSize)\n pygame.display.set_caption('Snake!')\n pygame.init()\n self.clock = pygame.time.Clock()\n self.dir = left #round(3 * random.random())\n self.s = snake(playerColor, unitSize,self.dir)\n self.setup = True", "def render(self):\n self.screen.reset()\n\n # draw snake\n surface = pymlgame.Surface(self.screen.width, self.screen.height)\n for part in self.snake.parts:\n surface.draw_dot(part, pymlgame.RED)\n self.screen.blit(surface)\n\n # draw apple\n self.screen.blit(self.apple_surface, self.apple)\n\n if self.snake.parts[0] == self.oldapple:\n self.snake.grow = True\n self.oldapple = None\n\n self.screen.update()\n\n #TODO: accelerate every 5 points by 1 fps\n self.clock.tick()", "def StartScreen():\n dis.blit(Sl, (dis_width/2 - 200, 0))\n message(\"Snake\" , 90, (23, 252, 3), (dis_width/2 - 100, 400))\n message(\"Press Enter To Start\", 50, (143,250,3), (dis_width/2 - 190, 500)) \n pygame.display.update()", "def __run(self):\n # init snake show\n self.__init_snake()\n self.__introduction.hide()\n # start ticktock for snake moving\n self.__ticker.start()\n # enable key press\n self.__enable_key = True", "def run_game():\n move()\n grid()\n\n for i in range(len(snake)):# draw every cell of snake \n\n pygame.draw.rect(DISPLAYSURF, GREEN, (snake[i][0], snake[i][1], CELLSIZE, CELLSIZE))\n\n apple = pygame.draw.rect(DISPLAYSURF, RED, (X_random, Y_random,CELLSIZE, CELLSIZE))#draw apple\n\n print(snake)", "def draw(self, screen):", "def show_start_screen():\n title_font = pygame.font.Font('freesansbold.ttf', 80)\n title_surface = title_font.render('Snake!', True, WHITE)\n title_rect = title_surface.get_rect()\n title_rect.center = (WINDOWWIDTH / 2, WINDOWHEIGHT / 2)\n DISPLAYSURF.fill(BGCOLOR)\n DISPLAYSURF.blit(title_surface, title_rect)", "def draw_game_state(snake):\n\n score_surface = BASICFONT.render('Score: ' + str(len(snake) - 3), True, WHITE)\n score_rect = score_surface.get_rect()\n score_rect.topleft = (WINDOWWIDTH - 120, 10)\n DISPLAYSURF.blit(score_surface, score_rect)\n pygame.display.update()", "def draw_snake(self):\r\n snake_body = self.get_body()\r\n for pos in snake_body: # draw entire snake\r\n p = pygame.Rect(pos[0], pos[1], 20, 20)\r\n if snake_body.index(pos) % 3 == 0: # every third section is black, others are yellow\r\n pygame.draw.rect(self.game_display, self.snake_color_1, p)\r\n else:\r\n pygame.draw.rect(self.game_display, self.snake_color_2, p)", "def render(self, screen) -> None:\n for y in range(self.width):\n for x in range(self.height):\n if self.get_value(Point(x, y)) == FieldState.SNAKE:\n draw.rect(screen, WHITE, (x*SIZE, y*SIZE, SIZE, SIZE))", "def snakegame():\n pygame.init()\n\n white = (255, 255, 255) # screen\n black=(0,0,0) # snake\n red=(255,0,0) # message\n blue = (0, 0, 255) # food\n green = (0, 255, 0) # score\n\n # game window\n dis_width = 600\n dis_height = 400\n dis = pygame.display.set_mode((dis_width, dis_height))\n pygame.display.set_caption(\"Snake Game by Griffin\")\n\n clock = pygame.time.Clock()\n\n # display\n snake_block = 10\n snake_speed = 15\n\n font_style = pygame.font.SysFont(\"bahnschrift\", 25)\n score_font = pygame.font.SysFont(\"bahnschrift\", 35)\n\n # exit message, score\n def message(msg, color):\n \"\"\"display messages with color preference\"\"\"\n mesg = font_style.render(msg, True, color)\n dis.blit(mesg, [dis_width / 3, dis_height / 2])\n\n def rules(msg, color):\n \"\"\"display rules with color preference\"\"\"\n mesg = font_style.render(msg, True, color)\n dis.blit(mesg, [dis_width / 8, dis_height / 4])\n \n def controls(msg, color):\n \"\"\"display rules with color preference\"\"\"\n mesg = font_style.render(msg, True, color)\n dis.blit(mesg, [dis_width / 3, dis_height / 3])\n\n def our_snake(snake_block, snake_list):\n \"\"\"display snake\"\"\"\n for x in snake_list:\n pygame.draw.rect(dis, black, [x[0], x[1], snake_block, snake_block])\n\n def your_score(score):\n \"\"\"display score with color preference\"\"\"\n value = score_font.render(\"Your Score: \" + str(score), True, green)\n dis.blit(value, [0, 0])\n\n def game_loop(): \n \"\"\"run game or quit when asked\"\"\" \n # prev auto quit\n game_over = False\n game_close = False\n \n # display\n x1 = dis_width / 2\n y1 = dis_height / 2\n x1_change = 0\n y1_change = 0\n\n snake_list = []\n snake_length = 1\n \n foodx = round(random.randrange(0, dis_width - snake_block) / 10.0) * 10.0\n foody = round(random.randrange(0, dis_width - snake_block) / 10.0) * 10.0\n \n while not game_over:\n # wait game \n while game_close == True:\n dis.fill(white)\n your_score(snake_length - 1)\n rules(\"gather food without hitting the border or your snake body\", black)\n controls(\"WASD or arrow keys\", red)\n message(\"Press Q-Quit or E-Play\", red)\n pygame.display.update()\n \n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_e: # play again\n game_loop()\n if event.key == pygame.K_q: # quit\n game_over = True\n game_close = False\n # in game\n for event in pygame.event.get():\n # close on x\n if event.type == pygame.QUIT:\n game_over = True\n # move snake\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT or event.key == pygame.K_a:\n x1_change = -snake_block\n y1_change = 0\n elif event.key == pygame.K_RIGHT or event.key == pygame.K_d:\n x1_change = snake_block\n y1_change = 0\n elif event.key == pygame.K_UP or event.key == pygame.K_w:\n x1_change = 0\n y1_change = -snake_block\n elif event.key == pygame.K_DOWN or event.key == pygame.K_s:\n x1_change = 0\n y1_change = snake_block\n \n # border block\n if x1 >= dis_width or x1 < 0 or y1 >= dis_height or y1 < 0:\n game_close = True\n \n x1 += x1_change\n y1 += y1_change\n dis.fill(white)\n pygame.draw.rect(dis, blue, [foodx, foody, snake_block, snake_block])\n snake_head = []\n snake_head.append(x1)\n snake_head.append(y1)\n snake_list.append(snake_head)\n\n if len(snake_list) > snake_length:\n del snake_list[0]\n\n for x in snake_list[:-1]:\n if x == snake_head:\n game_close == True\n \n our_snake(snake_block, snake_list)\n your_score(snake_length - 1)\n pygame.display.update()\n \n if x1 == foodx and y1 == foody:\n foodx = round(random.randrange(0, dis_width - snake_block) / 10.0) * 10.0\n foody = round(random.randrange(0, dis_height - snake_block) / 10.0) * 10.0\n snake_length += 1\n clock.tick(snake_speed)\n \n pygame.quit()\n quit()\n\n game_loop()", "def paint_snake(self):\n self._canvas.delete('snake')\n for x, y in self._snake:\n self._canvas.create_rectangle(x*SCALE, y*SCALE, x*SCALE+SHIFT, y*SCALE+SHIFT,\n outline='white', fill=SNAKE_COLOUR, tag='snake')\n if self._snake.snake_coordinates().count((x, y)) > 1:\n self._canvas.create_rectangle(x*SCALE, y*SCALE, x*SCALE+SHIFT, y*SCALE+SHIFT,\n outline='white', fill=DIED_SNAKE_COLOUR, tag='snake')", "def snake(snake_x, snake_y, snake_scale_x, snake_scale_y, snake_color):\n arcade.draw_rectangle_filled(snake_x, snake_y, snake_scale_x, snake_scale_y, snake_color)", "def start(self):\n self._canvas.focus_set()\n self._canvas.bind('<Left>', lambda event: self._move_snake(LEFT))\n self._canvas.bind('<Right>', lambda event: self._move_snake(RIGHT))\n self._canvas.bind('<Up>', lambda event: self._move_snake(UP))\n self._canvas.bind('<Down>', lambda event: self._move_snake(DOWN))\n self.put_apple()\n self._after_id.append(self._canvas.after(SNAKE_SPEED, self._move_snake, RIGHT))\n self.paint()", "def play_game(self):\n\n while True:\n self.pixels = [clear] * 64\n\n # sense HAT controller\n for event in self.sense.stick.get_events():\n if event.action == \"pressed\":\n if event.direction == \"up\":\n self.set_direction(0)\n elif event.direction == \"right\":\n self.set_direction(1)\n elif event.direction == \"down\":\n self.set_direction(2)\n elif event.direction == \"left\":\n self.set_direction(3)\n # insert to the start of the array\n self.trail.insert(0, [self.trail[0][0] + self.direction[0], self.trail[0][1] + self.direction[1]])\n\n # one border cross in and the other off\n if self.trail[0][0] < 0:\n self.trail[0][0] = 7\n if self.trail[0][1] < 0:\n self.trail[0][1] = 7\n if self.trail[0][0] > 7:\n self.trail[0][0] = 0\n if self.trail[0][1] > 7:\n self.trail[0][1] = 0\n\n # we cover the situation, when the apple pos is a snake pos in this if statement\n if self.trail[0] == self.apple_pos:\n self.apple_pos = []\n while self.apple_pos == []:\n self.apple_pos = [random.randint(0, 7), random.randint(0, 7)]\n if self.apple_pos in self.trail:\n self.apple_pos = []\n self.length += 1\n # snake runs into itself\n elif self.trail[0] in self.trail[1:]:\n self.length = 1\n else:\n while len(self.trail) > self.length:\n # remove from the end ( \"like\" moving, but the length is correct)\n self.trail.pop()\n\n for pos in self.trail:\n # snake visualize on the pixel map (2d coord to 1d coord)\n self.pixels[pos[1] * 8 + pos[0]] = white\n\n # y * rowSize + x -> coordinate convert because of the pixel map\n self.pixels[self.apple_pos[1] * 8 + self.apple_pos[0]] = red\n # apple position (red led)\n self.sense.set_pixels(self.pixels)\n\n time.sleep(0.15)", "def on_draw(delta_time):\n # draws all our objects\n arcade.start_render()\n\n generate_grid()\n apple()\n snake(on_draw.snake_part_x, on_draw.snake_part_y, 20, 20, snake_color)\n snake(on_draw.snake_part2_x, on_draw.snake_part2_y, 20, 20, snake_color)\n snake(on_draw.snake_part3_x, on_draw.snake_part3_y, 20, 20, snake_color)\n snake(on_draw.snake_part4_x, on_draw.snake_part4_y, 20, 20, snake_color)\n snake(on_draw.snake_part5_x, on_draw.snake_part5_y, 20, 20, snake_color)\n snake(on_draw.snake_part6_x, on_draw.snake_part6_y, 20, 20, snake_color)\n\n\n if on_draw.snake_part_x <= 230:\n snake(on_draw.snake_part6_x + 20, on_draw.snake_part6_y, 20, 20, snake_color)\n\n\n \"\"\" If statements that will make snake part one move \"\"\"\n if on_draw.snake_part_x >= 550:\n on_draw.snake_part_x -= 20\n\n elif on_draw.snake_part_x <= 550:\n on_draw.snake_part_y += 20\n if on_draw.snake_part_y >= 500:\n on_draw.snake_part_y -= 20\n on_draw.snake_part_x -= 20\n if on_draw.snake_part_x <= 180:\n on_draw.snake_part_x += 20\n\n \"\"\" If statements that will make snake part two move \"\"\"\n if on_draw.snake_part2_x >= 550:\n on_draw.snake_part2_x -= 20\n\n elif on_draw.snake_part2_x <= 550:\n on_draw.snake_part2_y += 20\n if on_draw.snake_part2_y >= 500:\n on_draw.snake_part2_y -= 20\n on_draw.snake_part2_x -= 20\n if on_draw.snake_part2_x <= 200:\n on_draw.snake_part2_x += 20\n\n \"\"\" If statements that will make snake part three move \"\"\"\n if on_draw.snake_part3_x >= 550:\n on_draw.snake_part3_x -= 20\n\n elif on_draw.snake_part3_x <= 550:\n on_draw.snake_part3_y += 20\n if on_draw.snake_part3_y >= 500:\n on_draw.snake_part3_y -= 20\n on_draw.snake_part3_x -= 20\n if on_draw.snake_part3_x <= 220:\n on_draw.snake_part3_x += 20\n\n \"\"\" If statements that will make snake part four move \"\"\"\n if on_draw.snake_part4_x >= 550:\n on_draw.snake_part4_x -= 20\n\n elif on_draw.snake_part4_x <= 550:\n on_draw.snake_part4_y += 20\n if on_draw.snake_part4_y >= 500:\n on_draw.snake_part4_y -= 20\n on_draw.snake_part4_x -= 20\n if on_draw.snake_part4_x <= 240:\n on_draw.snake_part4_x += 20\n\n \"\"\" If statements that will make snake part five move \"\"\"\n if on_draw.snake_part5_x >= 550:\n on_draw.snake_part5_x -= 20\n\n elif on_draw.snake_part5_x <= 550:\n on_draw.snake_part5_y += 20\n if on_draw.snake_part5_y >= 500:\n on_draw.snake_part5_y -= 20\n on_draw.snake_part5_x -= 20\n if on_draw.snake_part5_x <= 260:\n on_draw.snake_part5_x += 20\n\n \"\"\" If statements that will make snake part six move \"\"\"\n if on_draw.snake_part6_x >= 550:\n on_draw.snake_part6_x -= 20\n\n elif on_draw.snake_part6_x <= 550:\n on_draw.snake_part6_y += 20\n if on_draw.snake_part6_y >= 500:\n on_draw.snake_part6_y -= 20\n on_draw.snake_part6_x -= 20\n if on_draw.snake_part6_x <= 280:\n on_draw.snake_part6_x += 20", "def phase_6(self):\n test_board_1 = board(5, 5, snake_init_coordinates = [3, 1], fruit_init_coordinates = [3, 2])\n test_board_1.Snake_init_from_lst([[3, 1], [4, 1], [4, 2], [4, 3], [4, 4], [3, 4], [2, 4], [1, 4], [0, 4], [0, 3]])\n test_board_1.Update_board()\n render = Render_engine('terminal', test_board_1)\n render.render_terminal(test_board_1)\n\n print(\"\\n\\nSpecific Information of this snake\")\n print(\"*******************************\")\n print(test_board_1.Snake)", "def render(self):\n self._surface.fill(Color('black'))\n for y in range(0, self.height):\n for x in range(0, self.length):\n if self.grid.get_cell(x, y) == CellType.snake:\n self._surface.blit(self.snake_cell_image,\n (x * self.cell_size, y * self.cell_size))\n elif self.grid.get_cell(x, y) == CellType.apple:\n self._surface.blit(self.apple_cell_image,\n (x * self.cell_size, y * self.cell_size))\n pg.display.update()", "def play(self):\n\n self.useKeys = true;\n clock = pygame.time.Clock()\n s = snake(playerColor, unitSize)\n dir = round(3 * random.random())\n s.move(self.dir)\n screen = pygame.display.set_mode((gameSize*unitSize,gameSize*unitSize))\n pygame.display.set_caption('Snake!')\n global boardRect, boardPosX, boardPosY, score\n boardRect = pygame.Rect((0, 0), (gameSize * unitSize,\n gameSize * unitSize))\n tempPosX = boardPosX\n tempPosY = boardPosY\n boardPosX = 0\n boardPosY = 0\n\n while 1:\n if not gameOver:\n #pygame.time.delay(50)\n #clock.tick(10)\n self.draw(screen, s)\n self.score = score\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n boardPosX = tempPosX\n boardPosY = tempPosY\n boardRect = pygame.Rect((boardPosX, boardPosY), (gameSize * unitSize,\n gameSize * unitSize))\n self.useKeys = false\n return\n\n self.checkMove(s);\n\n else:\n self.gameOver = True;\n pygame.font.init()\n screen.fill(0)\n pygame.draw.rect(screen, boardColor, boardRect)\n font = pygame.font.Font(pygame.font.get_default_font(),\n 15)\n self.score = score;\n text = font.render('Score: ' + str(score), True, (255,\n 255, 255), (100, 100, 100))\n textRect = text.get_rect()\n screen.blit(text, textRect)\n pygame.display.flip()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n boardPosX = tempPosX\n boardPosY = tempPosY\n boardRect = pygame.Rect((boardPosX, boardPosY), (gameSize * unitSize,\n gameSize * unitSize))\n self.useKeys = false\n return", "def main_game():\n #All the initial perimeters required to Run the Game\n Game_quit = False\n Loss = False\n foodx = round(random.randrange(50, dis_width - 50))\n foody = round(random.randrange(50, dis_height - 50))\n snake_Body = []\n score = 4\n redraw(dis)\n\n while not Game_quit:\n \n keys = pygame.key.get_pressed()\n \n Game_quit = Quit()\n \n if not snake.Start: \n StartScreen()\n if keys[pygame.K_RETURN]:\n snake.Start = True \n\n if Loss:\n \n EndScreen(score)\n \n if keys[pygame.K_RETURN]:\n main_game()\n \n elif keys[pygame.K_ESCAPE]:\n Game_quit = True\n\n snake.x = dis_width/2\n snake.y = dis_height/2\n #Playing Game......\n while snake.Start and not Loss and not Game_quit :\n\n Clock.tick(20)\n Game_quit = Quit() \n \n\n Movement()\n \n snake.x += snake.x_change\n snake.y += snake.y_change\n \n if snake.x < 0 or snake.x > dis_width - snake.width or snake.y < 0 or snake.y > dis_height - snake.height:\n Loss = True\n \n #For Drawing Snake Body having different x and y with each part of snake \n \n snake_Part = []\n snake_Part.append(snake.x)\n snake_Part.append(snake.y)\n snake_Part.append(Mid)\n snake_Part.append(snake.ang)\n snake_Body.append(snake_Part) \n\n if len(snake_Body) > score:\n \n del snake_Body[0]\n \n #For Drawing the Head of the snake\n \n snake_x = snake.x\n snake_y = snake.y\n snake_x += snake.x_change \n snake_y += snake.y_change\n headposx = snake_x\n headposy = snake_y \n \n #Collision Detection between head and body\n \n for track in snake_Body[:-1]:\n \n if track[0] == headposx and track[1] == headposy:\n Loss = True\n \n snake.draw_snake(dis, snake_Part, snake_Body) \n fooddraw(foodx, foody) \n \n d = Distance(foodx,foody)\n #Collision Detection between Snake and the Food\n if d < 40:\n foodx = round(random.randrange(50, dis_width - 50))\n foody = round(random.randrange(50, dis_height - 50))\n score += 1\n \n scoredraw(score)\n redraw(dis)", "def __new_snake(self):\n self._snake = self.Snake(Direction.RIGHT, Position(4, 4), Position(3, 4), Position(2, 4))", "def paint(self):\n self.paint_snake()\n self.paint_apple()\n root.mainloop()", "def snake():\n return render_template('snake.html')", "def draw(self):\n if not self.pressed:\n #draw info prompt in room\n arcade.draw_rectangle_filled(self.center_x, self.center_y, 20, 20, arcade.color.ANTIQUE_BRASS)\n arcade.draw_text(\"?\", self.center_x, self.center_y, arcade.color.BLACK, anchor_x=\"center\", anchor_y=\"center\")\n else:\n #draw info to top of screen when clicked\n arcade.draw_text(self.text, 10, settings.HEIGHT - 10, arcade.color.BLACK, anchor_x=\"left\", anchor_y=\"top\")", "def draw_laser(self):\n pygame.draw.rect(self.screen, self.color, self.rect)", "def update(self, pressed_keys):\r\n # read key presses in event log and change position accordingly\r\n if pressed_keys[K_UP]:\r\n if self.direction == \"down\":\r\n pass\r\n else:\r\n self.yChange = -block\r\n self.xChange = 0\r\n self.direction = \"up\"\r\n self.surf = pygame.transform.scale(self.image[0], (block, block))\r\n if pressed_keys[K_DOWN]:\r\n if self.direction == \"up\":\r\n pass\r\n else:\r\n self.yChange = block\r\n self.xChange = 0\r\n self.direction = \"down\"\r\n self.surf = self.imgD\r\n if pressed_keys[K_LEFT]:\r\n if self.direction == \"right\":\r\n pass\r\n else:\r\n self.xChange = -block\r\n self.yChange = 0\r\n self.direction = \"left\"\r\n self.surf = self.imgL\r\n if pressed_keys[K_RIGHT]:\r\n if self.direction == \"left\":\r\n pass\r\n else:\r\n self.xChange = block\r\n self.yChange = 0\r\n self.direction = \"right\"\r\n self.surf = self.imgR\r\n\r\n # when snake passes the boundaries of the screen it will loop through to the opposite side\r\n if self.x >= dis_width:\r\n self.x = 0\r\n if self.x < 0:\r\n self.x = dis_width\r\n if self.y >= dis_height:\r\n self.y = 0\r\n if self.y < 0:\r\n self.y = dis_height\r\n\r\n # add the direction change based on button press\r\n self.x += self.xChange\r\n self.y += self.yChange\r\n\r\n self.head = []\r\n self.head.append(self.x)\r\n self.head.append(self.y)\r\n self.head.append(self.direction)\r\n self.list.append(self.head)\r\n\r\n #if list has more items than the length of snake delete first item in list\r\n if len(self.list) > self.length:\r\n del self.list[0]", "def draw_s(self):\r\n pen.down()\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(20)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.forward(20)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(40)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(50)", "def main():\n arcade.open_window(WINDOW_WIDTH, WINDOW_HEIGHT, \"Snake.exe\")\n # Set the window background colour\n arcade.set_background_color(light_green)\n\n # Calls the on_draw method every 1/3(20 seconds) of a second\n arcade.schedule(on_draw, 1/3)\n # Keeps the window open until closed by the user\n arcade.run()", "def setup(x, y, w, h, t):\n filldraw_rectangle(x,y,w,h,t,\"red\")\n \n for y in range(10):\n if (y % 2) != 0:\n pos = (10*y)+10\n else:\n pos =((10*y)-9)+10 \n for x in range(10):\n filldraw_rectangle(x,y,1,1,t,\"red\")\n if pos in snake1 or pos in snake2: \n filldraw_rectangle(x,y,1,1,t,\"green\")\n if pos in ladder1 or pos in ladder2: \n filldraw_rectangle(x,y,1,1,t,\"yellow\")\n write_num(x,y,t,pos) \n if (y % 2) != 0:\n pos = pos - 1\n else:\n pos = pos + 1", "def draw(self):\r\n self.__screen.draw_asteroid(self, self.__x, self.__y)", "def on_draw(self):\n arcade.start_render()\n self.snake.draw()\n self.apple_list.draw()\n if self.state not in (2,3): #if not paused or ended, it shows score and lives\n stats_overlay(self.width, self.height-20, self.score, self.lives)\n if self.state == 2:\n pause_overlay(self.width//2, 7*self.height//12) #shows that it is paused\n if self.state == 3: #shows game over\n game_over_overlay(self.width//2, 7*self.height//12, self.score)", "def draw_piece(self):\n self.screen.blit(self.image, self.rect)", "def redrawWindow(surface):\r\n global rows, width, s, snack\r\n surface.fill((0,0,0)) # fill whole window with black colour\r\n s.draw(surface) #put snake in window\r\n snack.draw(surface) #put snack in window\r\n drawGrid(width,rows, surface) #draw grid structure\r\n pygame.display.update() #displays updated window\r", "def display_card(self):\n # draw the background\n bg_image = pygame.image.load(self.background)\n pygame.display.flip()\n # draw the greeting\n self.draw_greeting()\n # draw the snowman\n frosty = Snowman(self.snowman, self.screen)\n allsprites = pygame.sprite.RenderPlain((frosty,))\n allsprites.update()\n clock = pygame.time.Clock()\n # play the music\n pygame.mixer.music.play(-1)\n # main game loop\n while True:\n # update the state of the various game \"assets\" in this loop\n self.update_state(clock, bg_image, allsprites)\n # handle any input from the user\n self.handle_input(pygame.event.get())\n # display the result\n pygame.display.flip()", "def setup_screen():\n screen = Screen()\n screen.setup(width=600, height=600)\n screen.bgcolor(\"black\")\n screen.title(\"My Snake Game\")\n screen.tracer(0)\n return screen", "def main():\n field = Field(10, 10)\n snake = Snake((0, 0))\n game = Game(field, snake)\n game.start()", "def draw(snake, head_pos, fruit_pos, game_field_dimensions=GAME_FIELD,\n game_field_sym=' ', snake_sym='*', fruit_sym='F', border_sym='#', \n head_sym='@',game_message=''):\n \n game_field_width = game_field_dimensions[0]\n game_field_height = game_field_dimensions[1]\n \n head_x = head_pos[0]\n head_y = head_pos[1]\n \n fruit_x = fruit_pos[0]\n fruit_y = fruit_pos[1]\n \n for i in range(game_field_height):\n for j in range(game_field_width):\n # Draw the game field borders ########\n if i == 0 or i == game_field_height-1: # Vertical borders\n if j == game_field_width-1: # Right border\n print(border_sym)\n else: # Left border\n print(border_sym, end='')\n else: # Horizontal borders\n if j == game_field_width-1: # Bottom border\n print(border_sym)\n elif j == 0: # Top border\n print(border_sym, end='')\n ##### End draw borders################\n \n # Draw snake's head ##################\n elif j==head_x and i==head_y:\n print(head_sym, end='') \n ##### End draw snake's head###########\n \n # Draw snake's body ##################\n elif check_point([j,i], snake):\n print(snake_sym,end='')\n ##### End draw snake's body ##########\n \n # Draw fruit ######################### \n elif j==fruit_x and i==fruit_y:\n print(fruit_sym, end='') \n ##### End draw fruit #################\n \n # Fill the game field ################\n else:\n print(game_field_sym, end='')\n \n ##### End fill the game field ########\n \n # Draw the game message\n if game_message:\n print(game_message)\n \n # ~ if DEBUG == True:\n # ~ print('head x =', head_x)\n # ~ print('head y =', head_y)\n \n # ~ print('fruit x =', fruit_x)\n # ~ print('fruit y =', fruit_y)\n \n \n # ~ print(snake) ", "def display_loop(self):\n from time import sleep\n self.displaying = True\n while self.displaying:\n print self\n sleep(.083)\n print loc(self.y, self.x) + ' '*self.size", "def draw(self):\n self.screen.blit(self.score_image, self.score_rect)\n self.screen.blit(self.high_score_image, self.high_score_rect)\n self.screen.blit(self.level_image, self.level_rect)\n self.ships.draw(self.screen)", "def update(self):\n head = (\n (self.snake[0][0] + self.xv * self.ac),\n (self.snake[0][1] + self.yv * self.ac),\n )\n self.snake.insert(0, head)\n\n if head == self.apple:\n self.score += 10\n self.apple = None\n while not self.apple:\n xy = random.choice(MAP)\n self.apple = xy if xy not in self.snake else None\n else:\n self.snake.pop()\n\n if (\n head[0] < 0\n or head[0] >= WIN_W\n or head[1] < BLOCK_H\n or head[1] >= WIN_H\n ):\n self.alive = False\n\n if head in self.snake[1:]:\n self.alive = False\n\n if self.fps % 100 == 0:\n self.fps += 5", "def draw(self): # called to draw this piece on the board\r\n \r\n pygame.draw.circle(screen, self.color, (self.x, self.y), 23)\r\n if self.isKing:\r\n screen.blit(king_text, (self.x-12, self.y-8))", "def display_ship(self):\r\n self.screen.blit(self.ship, self.rect)", "def gameUpdate():\n score_txt = font.render('Score: ' + str(score), True, white)\n hscore_txt = font.render('High Score: ' + str(high_score), True, white)\n pg.draw.rect(game_disp, red, food_pos)\n game_disp.blit(score_txt, (0,0))\n game_disp.blit(hscore_txt, (canv_w - 150, 0))\n snakePrint()\n screen.blit(game_disp, (0,0))\n pg.display.update()", "def draw():", "def draw_screen(self):\n\t\tself.current_screen.draw_screen(self.master_screen)", "def draw_snake(snake, pad):\n\n ps = [None] + snake + [None]\n for p in zip(ps[:-2], ps[1:-1], ps[2:]):\n d0 = None if p[0] is None else ((p[1] - p[0])[0], (p[1] - p[0])[1])\n d1 = None if p[2] is None else ((p[1] - p[2])[0], (p[1] - p[2])[1])\n\n c = {\n None: {None: None, (-1, 0): '‥', (1, 0): '¨', (0, -1): ':', (0, 1): ':'},\n (-1, 0): {None: '╽', (-1, 0): None, (1, 0): '║', (0, -1): '╔', (0, 1): '╗'},\n (1, 0): {None: '╿', (-1, 0): '║', (1, 0): None, (0, -1): '╚', (0, 1): '╝'},\n (0, -1): {None: '╼', (-1, 0): '╔', (1, 0): '╚', (0, -1): None, (0, 1): '═'},\n (0, 1): {None: '╾', (-1, 0): '╗', (1, 0): '╝', (0, -1): '═', (0, 1): None},\n }[d0][d1]\n\n pad.addch(p[1][0], p[1][1], c)", "def display(self, s):\n pygame.display.flip()\n time.sleep(s)\n self.screen.fill(self._background)", "def interact():\n env = SnakeEnv()\n done = False\n r = 0\n action = random.randrange(4)\n delay_time = 0.2\n\n # After the first run of the method env.render()\n # env.renderer.viewer obtains an attribute 'window'\n # which is a pyglet.window.Window object\n env.render(mode='human')\n # Use the arrows to control the snake's movement direction\n @env.renderer.viewer.window.event\n def on_text_motion(motion):\n \"\"\"\n Events to actions mapping\n \"\"\"\n\n nonlocal action\n if motion == MOTION_UP:\n action = 0\n elif motion == MOTION_DOWN:\n action = 2\n elif motion == MOTION_LEFT:\n action = 3\n elif motion == MOTION_RIGHT:\n action = 1\n\n while not done:\n time.sleep(delay_time)\n obs, reward, done, info = env.step(action)\n env.render(mode='human')\n if reward:\n r += reward\n # Speeding up snake after eating food\n delay_time -= 1/6 * delay_time\n\n return r", "def draw(self):\n\n self.squares.draw(self.screen)\n if not self.hide_grid:\n self.draw_grid()\n self.fleas.draw(self.screen)\n pygame.display.flip()", "def draw(self):\n\n State.screen.draw()", "def draw(self, screen):\n for i in range(self.tiles_len):\n x, y = self.tilepos[i]\n screen.blit(self.images[i], (x, y))\n self.draw_text(screen, \"Moves : \" + str(self.nb_move), 40, 500, 10, 255, 255, 255, False)", "def draw(screen):\n MY.restart_button.draw(screen)\n MY.display_text.draw(screen)", "def move(self):\r\n piece = []\r\n if self.direction == \"UP\":\r\n piece = [self.body[0][0], self.body[0][1] - self.POS_CHANGE] # create piece at new coordinates\r\n elif self.direction == \"DOWN\":\r\n piece = [self.body[0][0], self.body[0][1] + self.POS_CHANGE]\r\n elif self.direction == \"LEFT\":\r\n piece = [self.body[0][0] - self.POS_CHANGE, self.body[0][1]]\r\n elif self.direction == \"RIGHT\":\r\n piece = [self.body[0][0] + self.POS_CHANGE, self.body[0][1]]\r\n\r\n if piece:\r\n if piece in self.body: # Lose game if snake touches itself\r\n self.alive = False\r\n else:\r\n self.body.insert(0, piece) # insert new piece at head of snake\r\n if len(self.body) > self.length:\r\n self.body.pop() # delete last piece of snake, if length isnt increased\r\n\r\n self.draw_snake()", "def draw(self):\n self.game.screen.blit(self.image, self.game.off(self.pos))", "def show_score(self):\n self.screen.blit(self.score_image, self.score_rect)\n self.screen.blit(self.high_score_text_image, self.high_score_text_rect)\n self.screen.blit(self.high_score_image, self.high_score_rect)\n self.screen.blit(self.wave_image, self.wave_rect)\n self.spaceships.draw(self.screen)", "def display(self):\n mg_w = self.width\n mg_h = self.height\n str_to_prt = \"\\n\" * self.y + (\" \" * self.x + \"#\" * mg_w + '\\n') * mg_h\n print(str_to_prt[:-1])", "def show(self, screen):\n x_display = self.xy_position[0] * constants.CELL_SIZE\n y_display = self.xy_position[1] * constants.CELL_SIZE\n screen.blit(self.image, (x_display, y_display))", "def draw(self, screen):\n lines = self.text.strip().split('\\n')\n y = self.y\n for line in lines:\n self.ui.show_text(line, (self.x, y), 30)\n y += 32", "def draw(self):\r\n self.scr.fill(SCREEN_COLOR)\r\n self.label.draw()\r\n pygame.display.flip()", "def launchGame(): \n # On rejoint la partie\n game.join()\n\n #On affecte le nom\n game.player.setName(options.name)\n\n #On créer une nouvelle fenetre\n win = createNewWin(curses)\n\n #On creer notre premiere pomme...\n win.addch(game.apple.coordx, game.apple.coordy, 'O', curses.color_pair(3))\n\n #On indique la direction par defaut du serpent, il ira par defaut a droite\n key = curses.KEY_RIGHT\n\n #On effectue une boucle infinie tant que la touche Echap (27) n'est pas\n #pressée.\n while key != 27:\n #On ajoute le score a la ligne 0, colonne 2\n #Le score est calcule en recuperant la longueur du serpent actuel\n #et en retirant 2 (sa valeur initiale)\t\n win.addstr(0,2,' Joueur : %s Score : %s ' %(game.player.name, str(game.player.score)), curses.color_pair(1))\n\n #On calcul un mouvement de ralentissement dependant de la longueur du\n #serpent\n win.timeout(180+ ( (len(game.snake.oSnake)-2) % 10- (len(game.snake.oSnake)-2) ) * 3 )\n\n #On 'hook' les touches\n getkey = win.getch()\n\n #On recupere la valeur de la touche par defaut\n key = key if getkey==-1 else getkey\n\n #Suivant la touche pressée, on modifie les positions de notre serpent\n game.snake.move(key)\n\n #On supprime les derniers elements sur lequel le Snake passe\n win.addch(game.snake.oSnake[len(game.snake.oSnake)-1][1],\n game.snake.oSnake[len(game.snake.oSnake)-1][0],' ')\n\n #On supprime un element du snake pour eviter la collision\n if win.inch(game.snake.oSnake[0][1], game.snake.oSnake[0][0]) & 255 == 32:\n game.snake.oSnake.pop()\n\n #Si on passe sur un element O\t\n elif win.inch(game.snake.oSnake[0][1],game.snake.oSnake[0][0]) & 255 == ord('O'):\n #On ajoute 1 point a notre Joueur\n game.player.addPoint()\n\n #On recalcule des nouvelles coordonnees pour la pomme\n game.apple.newApple()\n #On verifie les nouvelles coordonnees\n while game.apple.checkApple(game.snake.oSnake) != True:\n game.apple.newApple()\n\n #On l'affiche a l'ecran\n win.addch(game.apple.coordx, game.apple.coordy, 'O', curses.color_pair(3))\n\t\t\n else:\n break\n\n #On affiche une partie de notre Snake\n win.addch(game.snake.oSnake[0][1],game.snake.oSnake[0][0],'X', curses.color_pair(2))\n\n\n #Si on sort de la boucle (GameOver), alors on\n #détruit les fenetres\n destroyWin()\n\n #A la fin de la partie (game over), on affiche l'écran \n showGameOver()", "def render():\n screen.blit(background ,[0,0]) # paint the background with white colour\n for position in wall_positions:\n screen.blit(wall_surface , position)\n for position in hole_positions:\n screen.blit(hole_surface , position) \n for position in food_positions: \n screen.blit(food_surface, position)\n for position in goodfood_positions: \n screen.blit(goodfood_surface, position)\n for position in badfood_positions: \n screen.blit(badfood_surface, position)\n for position in xfood_positions: \n screen.blit(xfood_surface, position)\n for position in snake_body_positions: \n screen.blit(snake_surface, position)\n text_screen('Score: %s speed: %s Life: %d' %(str(score),str(speed),life),20,10) \n pygame.display.flip()", "def draw(self):\n\n self.state_stack.peek().draw(self.screen)", "def draw(self, screen):\n self.draw_left_zone(screen)\n self.draw_middle_zone(screen)\n self.draw_right_zone(screen)", "def _update_screen(self):\n self.screen.fill(self.settings.bg_color)\n self.sideways_ship.blitme()\n for bullet in self.bullets.sprites():\n bullet.draw_bullet()\n self.aliens.draw(self.screen)\n pygame.display.flip()", "def display_pygame():\n sprite_group.clear(screen, eraser_image)\n sprite_group.draw(screen)\n pygame.display.update()", "def draw(self, force=False):\n self.display.draw(force)", "def game_loop(): \n # prev auto quit\n game_over = False\n game_close = False\n \n # display\n x1 = dis_width / 2\n y1 = dis_height / 2\n x1_change = 0\n y1_change = 0\n\n snake_list = []\n snake_length = 1\n \n foodx = round(random.randrange(0, dis_width - snake_block) / 10.0) * 10.0\n foody = round(random.randrange(0, dis_width - snake_block) / 10.0) * 10.0\n \n while not game_over:\n # wait game \n while game_close == True:\n dis.fill(white)\n your_score(snake_length - 1)\n rules(\"gather food without hitting the border or your snake body\", black)\n controls(\"WASD or arrow keys\", red)\n message(\"Press Q-Quit or E-Play\", red)\n pygame.display.update()\n \n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_e: # play again\n game_loop()\n if event.key == pygame.K_q: # quit\n game_over = True\n game_close = False\n # in game\n for event in pygame.event.get():\n # close on x\n if event.type == pygame.QUIT:\n game_over = True\n # move snake\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT or event.key == pygame.K_a:\n x1_change = -snake_block\n y1_change = 0\n elif event.key == pygame.K_RIGHT or event.key == pygame.K_d:\n x1_change = snake_block\n y1_change = 0\n elif event.key == pygame.K_UP or event.key == pygame.K_w:\n x1_change = 0\n y1_change = -snake_block\n elif event.key == pygame.K_DOWN or event.key == pygame.K_s:\n x1_change = 0\n y1_change = snake_block\n \n # border block\n if x1 >= dis_width or x1 < 0 or y1 >= dis_height or y1 < 0:\n game_close = True\n \n x1 += x1_change\n y1 += y1_change\n dis.fill(white)\n pygame.draw.rect(dis, blue, [foodx, foody, snake_block, snake_block])\n snake_head = []\n snake_head.append(x1)\n snake_head.append(y1)\n snake_list.append(snake_head)\n\n if len(snake_list) > snake_length:\n del snake_list[0]\n\n for x in snake_list[:-1]:\n if x == snake_head:\n game_close == True\n \n our_snake(snake_block, snake_list)\n your_score(snake_length - 1)\n pygame.display.update()\n \n if x1 == foodx and y1 == foody:\n foodx = round(random.randrange(0, dis_width - snake_block) / 10.0) * 10.0\n foody = round(random.randrange(0, dis_height - snake_block) / 10.0) * 10.0\n snake_length += 1\n clock.tick(snake_speed)\n \n pygame.quit()\n quit()", "def Draw(self):\n print ( 10*\"*\")\n print (\"Player \" + self.character + \" says:\")\n print (\"It's a Draw\")\n print ( 10*\"*\")", "def draw(self, screen):\n screen.blit(self.surface, self.rect)", "def __draw_lostscreen(self) -> None:\n self.__draw_background()\n line1 = LOOSE_FONT.render(\"You loose!! Highscore: \" + str(round(self.highscore)), 1, RED)\n line2 = LOOSE_FONT.render(\"Press enter to play again\", 1, RED)\n self.win.blit(line1, (round((WIDTH/2) - line1.get_width()/2), round(HEIGHT/2 - line1.get_height()/2)))\n self.win.blit(line2, (round((WIDTH/2) - line2.get_width()/2), round(HEIGHT/2 - line1.get_height()/2) + line1.get_height() + 5)) \n pygame.display.update()", "def draw(self):\n self.screen.blit(self.image, self.rect)", "def draw(self):\n self.strip.show()", "def show_score(self):\n\t\tself.screen.blit(self.score_image, self.score_rect)\n\t\tself.screen.blit(self.high_score_image, self.high_score_rect)\n\t\tself.screen.blit(self.level_image, self.level_rect)\n\t\t#Draw the ships to the screen.\n\t\tself.ships.draw(self.screen)", "def display_debug(self, screen: pygame.Surface):\n\t\tfor p1, p2 in self.__calculate_points():\n\t\t\tpygame.draw.line(screen, Color(255).get(), p1.get_int(), p2.get_int(), 2)\n\t\t\tpygame.draw.circle(screen, Color(255, 0, 0).get(), p1.get_int(), 2)\n\n\t\tpygame.draw.circle(screen, Color(0, 255, 0).get(), self.__target.get_int(), 2)", "def on_show_view(self):\n self.setup()\n arcade.set_background_color(arcade.color.BLACK)\n arcade.set_viewport(0, constants.SCREEN_WIDTH - 1, 0, constants.SCREEN_HEIGHT - 1)", "def display(self):\n prow = self.__width * '#'\n nstr = self.y * \"\\n\"\n for x in range(self.__height):\n nstr += self.x * \" \"\n nstr += prow\n if x == (self.__height - 1):\n break\n nstr += \"\\n\"\n print(nstr)", "def draw(self):\r\n\r\n self.screen.fill((0,0,0))\r\n self.sprite_group.draw(self.screen)\r\n pygame.display.flip()", "def goto(self, x, y):\n # note that the snake can get outside of the canvas!\n if(self._gridmode):\n self._x = round(x)\n self._y = round(y)\n else:\n self._x = round(x, 2)\n self._y = round(y, 2)\n \n self._appendCurrentState()", "def draw(self):\n self.screen.blit(self.image, (self.x_pos1, self.y_pos))\n self.screen.blit(self.image, (self.x_pos2, self.y_pos))", "def show_score(self):\r\n\t\tself.screen.blit(self.score_image, self.score_rect)\r\n\t\tself.screen.blit(self.high_score_imgae, self.high_score_rect)\r\n\t\tself.screen.blit(self.level_image, self.level_rect)\r\n\t\t#Draw ships.\r\n\t\tself.ships.draw(self.screen)", "def create_snake(self):\n for position in SNAKE_STARTING_POSITIONS:\n self.add_segment(position)", "def get_new_snake():\n global direction, snake, X_start, Y_start\n X = [x for x in range(40, WINDOWWIDTH - 80, 20)] #multiplier list 20\n Y = [y for y in range(40,WINDOWHEIGHT - 80, 20)]#multiplier list 20\n X_start = random.choice(X)#random multiplier of 20\n Y_start = random.choice(Y)#random multiplier of 20\n direction = \"right\"\n snake = [[X_start, Y_start], [X_start - 20, Y_start], [X_start - 40, Y_start]] #first 3 cells of snake", "def draw_kame(self):\r\n #pygame.draw.rect(self.screen, self.color, self.rect, self.image)\r\n self.screen.blit(self.image, self.rect)", "def reset(self, **kwargs):\n pg.display.set_caption('Snake')\n\n self._surface = pg.display.set_mode(\n (self.length * self.cell_size, self.height * self.cell_size),\n pg.HWSURFACE\n )\n self.grid = Grid(\n cell_size=self.cell_size,\n length=self.length,\n height=self.height\n )\n self.snake_cell_image = pg.image.load(__SNAKE_CELL_PATH__).convert()\n self.snake_cell_image = pg.transform.scale(\n self.snake_cell_image,\n (self.cell_size, self.cell_size)\n ).convert()\n\n self.apple_cell_image = pg.image.load(\n __APPLE_CELL_PATH__\n ).convert()\n\n self.apple_cell_image = pg.transform.scale(\n self.apple_cell_image, (self.cell_size, self.cell_size)).convert_alpha()\n\n self.score = 0\n self.actions = 0\n self._exit = False", "def _update_screen(self):\n\t\tself.screen.fill(self.settings.bg_color)\n\t\tself.pigeon.blitme()\n\t\tfor dropping in self.droppings.sprites():\n\t\t\tdropping.draw_dropping()\n\t\tself.autos.draw(self.screen)\n\n\t\t# Draw the score information.\n\t\tself.sb.show_score()\n\n\t\t# Draw the play button if the game is inactive.\n\t\tif not self.stats.game_active:\n\t\t\tself.play_button.draw_button()\n\n\t\t# Make the most recently drawn screen visible.\n\t\tpygame.display.flip()", "def draw(self):\n if self.state == 'alive':\n for i in range(len(self.tail)):\n pygame.draw.rect(display, black, (squareToXPix(self.tail[-(i + 1)][0], objectSize), squareToYPix(self.tail[-(i + 1)][1], objectSize), objectSize, objectSize))\n\n pygame.draw.rect(display, black, (squareToXPix(self.x, objectSize), squareToYPix(self.y, objectSize), objectSize, objectSize))\n\n else:\n for i in range(len(self.tail)):\n pygame.draw.rect(display, red, (squareToXPix(self.tail[-(i + 1)][0], objectSize), squareToYPix(self.tail[-(i + 1)][1], objectSize), objectSize, objectSize))\n\n pygame.draw.rect(display, red, (squareToXPix(self.x, objectSize), squareToYPix(self.y, objectSize), objectSize, objectSize))", "def step(self):\n if not self._is_game_over:\n self._move_snake()\n self._is_game_over = self.is_snake_collides()", "def draw(self):\n self.scene.draw(self.screen)", "def draw(self):\n if not self._move:\n px = self.get_pos_in_pixels()\n self.tile.draw(px.x, px.y, 32, 32)\n else:\n self._move.draw()", "def display_maze(self):\n for y, line in enumerate(self.maze):\n for x, case in enumerate(line):\n if case == \"#\":\n self.screen.blit(source=self.wall, dest=(x * 40, y * 40))\n elif case == \"m\":\n self.screen.blit(source=self.macgyver, dest=(x * 40, y * 40))\n elif case == \" \":\n self.screen.blit(source=self.floor, dest=(x * 40, y * 40))\n elif case == \"L\":\n self.screen.blit(source=self.floor, dest=(x * 40, y * 40))\n self.screen.blit(source=self.needle, dest=(x * 40, y * 40))\n elif case == \"J\":\n self.screen.blit(source=self.floor, dest=(x * 40, y * 40))\n self.screen.blit(source=self.ether, dest=(x * 40, y * 40))\n elif case == \"K\":\n self.screen.blit(source=self.floor, dest=(x * 40, y * 40))\n self.screen.blit(source=self.seringue, dest=(x * 40, y * 40))\n elif case == \"G\":\n self.screen.blit(source=self.guardian, dest=(x * 40, y * 40))\n\n pygame.display.flip()", "def draw(self):\n\t\tself.screen.fill(pygame.Color('black'))\n\t\tfor column in self.model.blocks:\n\t\t\tfor block in column:\n\t\t\t\tr = pygame.Rect(block.left,\n\t\t\t\t\t\t\t\tblock.top,\n\t\t\t\t\t\t\t\tblock.size,\n\t\t\t\t\t\t\t\tblock.size)\n\t\t\t\tpygame.draw.rect(self.screen, block.color,r)\n\t\tpygame.display.update()", "def draw(self):\n if not self.pressed:\n #draw dialogue prompt\n arcade.draw_rectangle_filled(self.center_x, self.center_y, 20, 20, arcade.color.ALABAMA_CRIMSON)\n arcade.draw_text(\"!\", self.center_x, self.center_y, arcade.color.BLACK, anchor_x=\"center\", anchor_y=\"center\")\n else:\n #draw dialogue box\n arcade.draw_rectangle_filled(self.center_x, self.center_y, self.width, self.height, self.color)\n arcade.draw_text(self.text, self.center_x, self.center_y, arcade.color.BLACK, anchor_x=\"center\", anchor_y=\"center\")", "def draw(self):\n self.screen_surf.fill(BKGD_COLOUR)\n self.all_tiles.draw(self.screen_surf) # Tiles before other sprites.\n self.nests.draw(self.screen_surf) # Nests before chipmunks.\n self.chipmunks.draw(self.screen_surf)\n self.acorns.draw(self.screen_surf)\n self.screen_surf.blit(self.acorn_surf, self.acorn_surf.get_rect())\n self.screen_surf.blit(self.timer_surf, self.timer_rect)", "def _update_screen(self):\n # Redraw the screen during each pass of the loop\n self.screen.fill(self.bg_color)\n self.ship.blitme()\n for bullet in self.bullets.sprites():\n bullet.draw_bullet()\n self.aliens.draw(self.screen)\n if not self.stats.game_active:\n self.play_button.draw_button()\n\n #Draw the scoreboard\n self.sb.show_score()\n\n # Make the most recently drawn screen visible\n pygame.display.flip()", "def _update_screen(self):\n self.screen.fill(self.settings.bg_colour)\n # Draw ship on the screen\n self.ship.blitme()\n # Draw all bullets in the sprites group on the screen\n for bullet in self.bullets.sprites():\n bullet.draw_bullet()\n self.stars.draw(self.screen)\n self.sb.show_score()\n if not self.stats.game_active:\n self.play_button.draw_button()\n pygame.display.flip()", "def draw(self):\n self.screen.fill(LIGHT_GRAY)\n\n r = pygame.Rect(self.model.slider.left,\n self.model.slider.top,\n self.model.slider.width,\n self.model.slider.height)\n pygame.draw.rect(self.screen, BLUE, r)\n pygame.display.update()", "def updateScreen(self) -> None:\n\n # fill game display black\n self.surface.fill(Colors.Black)\n\n # draw players and ball\n self.drawImageOnSurface(self.player_one)\n self.drawImageOnSurface(self.player_two)\n self.drawImageOnSurface(self.ball)\n\n # draw all the spacer images\n for image in self.spacers:\n self.drawImageOnSurface(image)\n\n # draw scores and format the scores in byte representation\n self.drawTextOnSurface(format(self._score[0], \"04b\"),\n (Configuration.windowWidth / 4, Configuration.windowHeight / 2), Colors.ByteGreen,\n font=self.font)\n self.drawTextOnSurface(format(self._score[1], \"04b\"),\n (3 * Configuration.windowWidth / 4, Configuration.windowHeight / 2), Colors.ByteGreen,\n font=self.font)\n\n super().updateScreen() # call the parent method to update the screen" ]
[ "0.79485714", "0.7818384", "0.7750519", "0.76774824", "0.75454086", "0.75067466", "0.7198423", "0.7196283", "0.70794916", "0.7040218", "0.7037165", "0.69755983", "0.6951785", "0.6936071", "0.6926142", "0.69155216", "0.68954194", "0.6849644", "0.6842827", "0.6826483", "0.67571723", "0.6735503", "0.6703214", "0.6685514", "0.65376025", "0.652428", "0.649727", "0.64629567", "0.6458263", "0.64553237", "0.6426386", "0.6412605", "0.64081246", "0.6397313", "0.6395004", "0.63910675", "0.63713306", "0.6367356", "0.63560534", "0.6331086", "0.63252664", "0.62898463", "0.6289373", "0.62893474", "0.6289227", "0.62891406", "0.62852037", "0.6271473", "0.62706476", "0.6263207", "0.62500274", "0.62328476", "0.6229655", "0.6216748", "0.6215251", "0.6212102", "0.62040246", "0.6200898", "0.61998785", "0.61946636", "0.61856943", "0.6180123", "0.6171019", "0.61570835", "0.6155742", "0.6147972", "0.6136385", "0.613186", "0.6128511", "0.6127722", "0.61267936", "0.6119083", "0.6117566", "0.6116766", "0.6115285", "0.6112787", "0.6109321", "0.61074877", "0.61069006", "0.61021996", "0.61013323", "0.60925543", "0.6081111", "0.60804904", "0.6071981", "0.6068251", "0.6043417", "0.6041621", "0.6031845", "0.6031694", "0.6029354", "0.60232544", "0.6013848", "0.6012258", "0.6011548", "0.60113543", "0.6011202", "0.60100466", "0.60071933", "0.5999876" ]
0.70201904
11
This Function Displays the Food
def fooddraw(foodx, foody): dis.blit(Food, (foodx, foody)) pygame.display.update()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_food(self):\n for dish in self.food:\n print(dish.get_name())", "def food_eaten(self):\r\n # get values from GUI\r\n\r\n foodList = \"\"\r\n foodCost=0\r\n if self.is_eggs.get():\r\n foodList += \"eggs $2.00\\n\"\r\n foodCost+=2\r\n if self.is_bacon.get():\r\n foodList += \"bacon $4.00\\n\"\r\n foodCost += 4\r\n if self.is_sausage.get():\r\n foodList += \"sausage $4.00\\n\"\r\n foodCost += 4\r\n if self.is_oj.get():\r\n foodList += \"OrangeJuice $3.00\\n\"\r\n foodCost += 3\r\n foodCost = ('%.2f' % foodCost)\r\n\r\n # Create the output to screen of foodlist\r\n story = (\"\\nThank you for joining us here at Order Up!\\n\\nThe foods that you ate are as follows:\\n\\n\\n\"+foodList+\"\\nThe total amount owed is: $\"+foodCost)\r\n # display the summary\r\n self.story_txt.delete(0.0, END)\r\n self.story_txt.insert(0.0, story)", "def substitute_display(self, category_id, food_id):\n ref = category_id, food_id\n self.cursor.execute(\"\"\" SELECT food.name, store.name,\n food.link_openffacts,\n food.nutriscore, food.description, food.id\n FROM food\n INNER JOIN store_food\n ON food.id = store_food.food_id\n INNER JOIN store\n ON store_food.store_id = store.id\n WHERE food.id IN (SELECT category_food.food_id\n FROM category_food\n WHERE category_food.category_id = %s)\n AND food.id != %s\n ORDER BY food.nutriscore\n LIMIT 1 OFFSET 0\"\"\", ref)\n row = self.cursor.fetchone()\n print(\"Voici un subistitut de votre choix initial : \")\n print(\"Nom du produit : \" + row[0])\n print(\"Grade nutriscore : \" + row[3])\n print(\"Lien OpenFoodFacts : \" + row[2])\n print(\"Magasin(s) : \" + row[1])\n print(\"Description du produit : \" + row[4])\n return row[5]", "def food_item(request, food_id):\n\n food = get_object_or_404(Nutrition, pk=food_id)\n\n context = {\n 'food': food,\n }\n\n return render(request, 'nutrition/food.html', context)", "def get(self):\n items = self.request.get_all(\"food\")\n # pases the value in item variable into the jinja2 template\n self.render(\"shopping_list.html\", items=items)", "def getFood(self):\n return self.data.food", "def printing_food_and_calories(food_item_names: list, total_calories: int) -> None:\n avg_calories = total_calories / len(_calories)\n print(\"\\nFood Items:\", sorted(food_item_names))\n print(\"Total Calories:\", total_calories,\n \"Average Calories: %0.1f\\n\" % avg_calories)", "def describe_restaurant(self):\r\n print(\"\\n==========This is our restaurant \" + self.restaurant.title() + \"===============\")\r\n print(\"We serve you amazing \" + self.cuisine + \" 's cusine\")", "def show_pet(self):\n pet = self.pet_factory.get_pet()\n\n print(\"this is a lovely \", pet)\n print(\"It says \", pet.speak())\n print(\"It eats \", self.pet_factory.get_food())", "def food_selected(self, arg):\n\t\tfood = fooditemdao.retrieve_food(self.selected_food.get())\n\t\tself.lbl_unit.config(text=food.info['unit'])", "def show_pet(self):\n pet = self.pet_factory.get_pet()\n print \"We have a lovely {}\".format(pet)\n print \"It says {}\".format(pet.speak())\n print \"We also have {}\".format(self.pet_factory.get_food())", "def describe_restaurant(self):\n msg = f\"{self.name} serves wonderful {self.cuisine_type}.\"\n print(f\"\\n{msg}\")", "def describeRestaurant(self):\n print (f\"{self.name} has the best {self.cuisineType}\")", "def create_food(self):\n self.penup()\n self.shape(\"circle\")\n self.color(\"green\")\n self.x_cordinates = random.randint(-210, 210)\n self.y_cordinates = random.randint(-210, 210)\n self.goto(self.x_cordinates, self.y_cordinates)\n print(f\"This Is Food {self.x_cordinates} and {self.y_cordinates}\")\n # self.stamp()", "def display_fruit(self):\n self.window.addch(self.fruit_position[1], self.fruit_position[0], self.FRUIT_CHAR)", "def feed(self):\n if not self.food_bag or sum(self.food_bag.values()) == 0:\n # if the food_bag is empty:\n print('You ran out of food! Go back to the shop to buy new animal food')\n else:\n print('You currently have:\\n')\n for k, v in self.food_bag.items():\n print(\"{:<8} {:<10}\".format(v,k))\n print(\"\\n\")\n response = input(f'What would you like to feed the {self.animals[self.park_location]}?')\n while response not in self.food_bag.keys():\n response = input(f\"Please enter one of:B {' '.join(list(self.food_bag.keys()))}:\")\n print(f\"The {self.animals[self.park_location]} are eating your {response}.\")", "def display(self):\n print(\n f'\\t\\t {self.name.upper()} {self.potency[0]}{self.potency[1]}\\t\\t'\n f' {self.dose_qty[0]} {self.dose_qty[1]} {self.dose[0]} {self.dose[1].upper()}')", "def main():\n\n restaurant = Restaurant(\n name=\"Table By Basant\",\n description=\"North India, Italian, Chinese\",\n address=\"Redwood Shores\",\n operating_hours=\"11AM to 11PM\",\n ratings=4.8,\n menu=Menu(\n title=\"Delights\",\n num_of_dishes=5,\n dishes=[\n Dish(name=\"Gajrela\", price=300, discount=0.50, description=\"indian sweet\", ratings=4.5),\n Dish(name=\"Noodles\", price=200, discount=0.20, description=\"chinese\", ratings=4.3),\n Dish(name=\"Burger\", price=100, discount=0.40, description=\"western\", ratings=5.0),\n Dish(name=\"Pizza\", price=500, discount=0.10, description=\"italian\", ratings=4.8),\n Dish(name=\"Samosa\", price=30, discount=0.0, description=\"indian snacks\", ratings=5.0)\n ]\n )\n\n )\n\n restaurant.show_restaurant()", "def describe_restaurant(self):\n\t\tprint(f\"{self.restaurant_name.title()} serves {self.cuisine_type}.\")", "def describe_restaurant(self):\r\n\t\tprint(\"Our restaurant is \" + self.restaurant_name.title() + \".\")\r\n\t\tprint(\"We are known for our \" + self.cuisine_type.title())", "async def bestiary(self, ctx: commands.Context, *, fish_name: str = None):\r\n\r\n # See if we want to list all of the fish\r\n if not fish_name:\r\n fields = []\r\n embed = discord.Embed(title=\"All Fish\")\r\n for rarity, fish_types in self.bot.fish.items():\r\n fish_string = [f\"**{' '.join(fish_type.split('_')).title()}**\" for fish_type, fish_info in fish_types.items()]\r\n fields.append((rarity.title(), \"\\n\".join(fish_string)))\r\n return await utils.paginate(ctx, fields, ctx.author, \"**Bestiary**\\n\")\r\n\r\n # Find the fish they asked for\r\n selected_fish = None\r\n for rarity, fish_types in self.bot.fish.items():\r\n for _, fish_info in fish_types.items():\r\n if fish_info[\"name\"] == str(fish_name.title()):\r\n selected_fish = fish_info\r\n break\r\n if selected_fish:\r\n break\r\n else:\r\n return await ctx.send(\"That fish doesn't exist.\")\r\n\r\n # Make and send an embed\r\n embed = discord.Embed(title=selected_fish[\"name\"])\r\n embed.set_image(url=\"attachment://new_fish.png\")\r\n embed.add_field(name='Rarity:', value=f\"{selected_fish['rarity']}\", inline=False)\r\n embed.add_field(name='Base Sell Price:', value=f\"{int(int(selected_fish['cost']) / 2)} <:sand_dollar:877646167494762586>\", inline=False)\r\n embed.add_field(name='Size:', value=f\"{selected_fish['size']}\", inline=False)\r\n embed.color = {\r\n \"common\": 0xFFFFFE, # White - FFFFFF doesn't work with Discord\r\n \"uncommon\": 0x75FE66, # Green\r\n \"rare\": 0x4AFBEF, # Blue\r\n \"epic\": 0xE379FF, # Light Purple\r\n \"legendary\": 0xFFE80D, # Gold\r\n \"mythic\": 0xFF0090, # Hot Pink\r\n }[selected_fish['rarity']]\r\n fish_file = discord.File(selected_fish[\"image\"], \"new_fish.png\")\r\n await ctx.send(file=fish_file, embed=embed)", "def func(self):\n from commands.base_commands.guest import census_of_fealty\n\n fealties = census_of_fealty()\n table = PrettyTable([\"{wFealty{n\", \"{w#{n\"])\n for fealty in fealties:\n table.add_row([fealty, fealties[fealty]])\n self.msg(table)", "def first_entrance(self):\n for item in self.items:\n if issubclass(item.__class__, items.Drink):\n drink_name = item.name[0].lower()\n break\n\n print(\"Location: \" + self.name)\n print(\"A simple room consisting of two couches. Each is accompanied by a potted plant and a table. \"\n \"On one sits a \", end=\"\")\n if drink_name == \"coffee\" or drink_name == \"tea\":\n print(\"cup of \" + drink_name + \".\")\n elif drink_name == \"water\":\n print(\"glass of water.\")\n elif drink_name == \"coke\":\n print(\"cold can of Coke.\")\n else:\n print(drink_name)\n print(\"pile of dust.\")", "def dish_get_info() -> Dish:\r\n return Dish(input(\"Please enter the dish's name: \"),\r\n float(input(\"Please enter the price of the dish: \")),\r\n int(input(\"Please enter the number of calories of the dish: \")))", "def describe_restaurant(self):\n print(self.name.title() + \" is known for it's \" + self.cuisine.title() + \".\")", "def favourite_screen(self):\n\n self.cursor.execute(\"\"\" SELECT *\n FROM favourite\n ORDER BY id \"\"\")\n rows = self.cursor.fetchall()\n print(\"Voici vos recherches sauvegardées: \\n\")\n for row in rows:\n ref = row[1], row[2]\n self.cursor.execute(\"\"\" SELECT name\n FROM food\n WHERE id = %s\n UNION\n SELECT name\n FROM food\n WHERE id = %s \"\"\", ref)\n food_names = self.cursor.fetchall()\n i = 0\n for element in food_names:\n if i == 0:\n print(\"Produit initial : \" + element[0].upper(), end=\"\")\n i += 1\n else:\n print(\" substitué par : \" + element[0].upper())\n print(\"----------------------------------------------------------\")", "def show_list():\n clear_screen()\n print(\"Here is your list: \")\n\n for index, item in enumerate(shopping_list, start = 1):\n print(\"{}. {}\".format(index, item))\n\n print(\"-\" * 10)", "def price_food():\r\n try:\r\n price = input(\"What is your budget? Enter a dollar amount(5.50) >\")\r\n price = price.replace('$', '')\r\n price = float(price)\r\n print('\\n')\r\n if price < 6:\r\n if price >= 5:\r\n print(\"Canteen 1 and Quad Cafe have food under 5 dollars\\n\\n\")\r\n\r\n if price >= 4:\r\n print(\"Canteen 2, Canteen 11, Canteen 16 and North Spine Food Court have food under 4 dollars\\n\\n\")\r\n\r\n if price >= 3:\r\n print(\"Canteen 9, Canteen 13, Canteen 14 and South Spine Food Court have food under 3 dollars\\n\\n\")\r\n\r\n else:\r\n print(\"Price is too low, please try another value.\\n\\n\")\r\n price_food()\r\n\r\n else:\r\n print(\"Any canteen's available for you!\\n\\n\")\r\n print('\\n')\r\n\r\n except ValueError:\r\n print(\"Please enter a dollar value\\n\\n\")\r\n price_food()", "def show_info(self): \n color= Fore.WHITE\n print(f\"\"\" {color} \nNombre: {self.name} \nRuta: {self.route }\nFecha de salida: {self.departure_date}\"\"\")\n print(\"<\"*8, \">\"*8)\n print(\"El precio por habitacion es:\")\n for key, value in self.prize.items():\n color_value= (Fore.GREEN + str(value))\n color_key= Fore.WHITE + \"Habitacion\" + \" \" + key\n print(f\"\"\" {color_key} : {color_value}$ \"\"\")\n \n print(Fore.WHITE + \"<\"*8, \">\"*8)\n for floor, info in self.floors_info.items():\n piso=(Fore.WHITE + floor)\n print(f\" {piso}:{info} \")\n \n \n print(\"<\"*8, \">\"*8)\n print(\"Capacidad por tipo de habitacion: \")\n for key, value in self.room_capacity.items():\n print(f\"Habitacion {key}: {value} personas \",\"\\t\")\n return \"\"", "def buy_animal_food(self):\n if self.location == \"Shop\":\n response = input(\"How many bananas do you want to buy?\")\n while response not in [\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\"]:\n response = input(\"Please specify the number of beers\")\n money = self.money - int(response)\n if money >= 0:\n self.food_bag['Bananas'] += int(response)\n self.money = money\n print(f'You now got {response} more bananas than before, be careful not to drop one - you might slip!')\n else:\n print(\"You idiot apparently spent all your money on beers! You can\\'t buy animal food anymore - \"\n \"better go gambling!\")\n else:\n print('Your are not at the Shop right now, hard to find animal food here.')", "def display_flavor(self):\n print(\"\\nWe currently count with the next flavors:\")\n for flavor in self.flavors:\n print(f\"{flavor}\")", "def show_data(self, ):\r\n return print('society_name : {}\\n'\r\n 'flat : {}\\n'\r\n 'house_no : {}\\n'\r\n 'no_of_members : {}\\n'\r\n 'income : {}\\n '\r\n .format(self.society_name, self.flat, self.house_no, self.no_of_members, self.income))", "def describe_restaurant(self):\n print(f\"\\nRestaurant name: {self.restaurant_name}\")\n print(f\"Cuisine type: {self.cuisine_type}\")", "def view(self):\n\n print('Here\\'s your expense and income records:\\n'+' '*3+'Category'+' '*7+\\\n 'Description'+' '*4+'Amount\\n'+'='*40)\n line = 1\n amount = self._initial_money\n for n in self._records:\n m = n.split() # m is a list in the form ['category', 'item', 'money']\n print(f'{line:<3}{m[0]:<15}{m[1]:<15}{m[2]}')\n amount += int(m[2])\n line += 1\n print('='*40 + f'\\nNow you have {amount} dollars.')", "def food_table(request):\n t = loader.get_template('family_info/food_table.html')\n c = RequestContext(request, {\n 'all_families': Family.objects.all(),\n })\n return HttpResponse(t.render(c))", "def describe_restaurant(self):\n\t\tdetails = f\"{self.restaurant_name} is a {self.cuisine_type} restaurant.\"\n\t\tprint(f\"\\n{details}\")", "def calories() -> None:\n new_item = input(\"Enter food item to add, or ’q’ to exit: \")\n while new_item != \"q\":\n insert_calorie_value(new_item)\n total_calories = 0\n total_calories = adding_total_calories(total_calories)\n food_item_names = []\n appending_food_item_names(food_item_names)\n printing_food_and_calories(food_item_names, total_calories)\n new_item = input(\"Enter food item to add, or ’q’ to exit: \")", "def show_inventory(table):\r\n print('======= The Current Inventory: =======')\r\n print('ID\\tCD Title by: Artist\\n')\r\n for cd in table:\r\n print(cd)\r\n\r\n print('======================================')", "def display(animal):\n for name, valeur in animal.items(): # boucle contenant deux variables pour le nom et la valeur de chaque clef dans le dictionaire\n print(\"donnée de votre animal: {} : {}\".format(name,valeur))", "def show(self, screen):\n\n while True:\n screen.clear()\n self._draw_header(screen, 'Beverages')\n self.set_items(self._create_menu())\n selection = FormBase.get_menu_input(self, screen)\n\n if selection.key == MenuKey.quit:\n return\n elif selection.key == MenuKey.add:\n self.editform.show(screen, {'name': 'New', 'price': None})\n else:\n self.editform.show(screen, selection.item)", "def display_fav_cats():\n\n #display cat general info from db\n #have a delete button/option\n #commit changes to the db\n #if the user clicks the cat for more details, redirect to /more-details/<int:cat_id>", "def bonus_food(self):\n self.penup()\n self.shape(\"turtle\")\n self.color(\"red\")\n self.x_cordinates = random.randint(-210, 210)\n self.y_cordinates = random.randint(-210, 210)\n self.goto(self.x_cordinates, self.y_cordinates)\n print(f\"This Is Bonus Food {self.x_cordinates} and {self.y_cordinates}\")", "def show_inventory(self):\n\t\tclear_screen()\n\n\t\tprint(\"# INVENTORY #\\n\")\n\t\tprint(\"Weapon{:.>15} \".format(self.inventory['Weapon']))\n\t\tprint(\"Clothing{:.>13} \".format(self.inventory['Clothing']))\n\t\tprint(\"Items{:.>16} \".format(self.inventory['Items']))\n\n\t\tpress_enter()", "async def fish(self, ctx):\n board = self._sort_leaderboard(json.loads(await self._query(ctx, \"fish\")))\n player = \"\"\n fish = \"\"\n place = \"\"\n i = 1\n for x in board:\n player += x['player'] + \"\\n\"\n fish += str(x['fish']) + \"\\n\"\n place += str(i) +\"\\n\"\n i += 1\n embed: discord.Embed = discord.Embed(\n color = discord.Color.blue()\n )\n embed.add_field(name = \"Place\", value =place, inline=True)\n embed.add_field(name = \"Player\", value=player, inline=True)\n embed.add_field(name = \"Fish\", value=fish, inline=True)\n await ctx.send(embed = embed)", "def view_delivery() -> str:\r\n #List with amount of bottles ready for delivery for each lsit\r\n delivery_amounts = []\r\n delivery_amounts.append(delivery_information[\"Organic Red Helles\"])\r\n delivery_amounts.append(delivery_information[\"Organic Pilsner\"])\r\n delivery_amounts.append(delivery_information[\"Organic Dunkel\"])\r\n return render_template(\"view_delivery.html\",\r\n delivery_displays=delivery_amounts)", "def build_menu(names,values,calories):\n menu=[]\n for i in range(len(names)):\n menu.append(Food(values[i],calories[i],names[i]))\n\n return menu", "def food_page():\n fridge_ingredients = request.args.get('b', 0, type=str) #raw input from HTML form\n global current_user\n current_user.fridge.make_fridge(fridge_ingredients) #uses function imported from recipe_program\n recipe_dictionaries = current_user.get_timed_recipes(time_global) #uses function imported from recipe_program, time global set in timed_recipes()\n #initalizing lists\n recipe_names = []\n recipe_ids = []\n recipe_pics = []\n cooktimes = []\n new_pics = []\n for i in range(len(recipe_dictionaries)): #created lists of current recipe links, title, pictures, etc\n recipe_names.append(recipe_dictionaries[i]['recipeName'].encode('ascii','ignore')) #recipe name list\n recipe_ids.append(recipe_dictionaries[i]['id'].encode('ascii','ignore')) #recipe id list to generate links\n recipe_pics.append(recipe_dictionaries[i]['imageUrlsBySize']['90'].encode('ascii','ignore')) #recipe image links\n cooktimes.append(int(recipe_dictionaries[i]['totalTimeInSeconds']/60.0)) #recipe cooktime list\n for i in range(len(recipe_pics)):\n new_pics.append(recipe_pics[i][:len(recipe_pics[i])-4]+'250-c') #this calls an image that is 300x300 px\n return jsonify(names = recipe_names, ids = recipe_ids, pics = new_pics, times = cooktimes); #returns lists used to generate html page", "def PrintWeather(Weather):\n print('Temperature : {}°C'.format(Weather[0]))\n print('Humidity : {} %'.format(Weather[1]))\n print('Description : {}'.format(Weather[2])+'\\n')\n return 1", "def get_furniture():", "def describe_restaurant(self):\n\t\tprint(\"name of the restaurant is \" + self.restaurant_name)\n\t\tprint(\"cuisine type is \" + self.cuisine_type)", "def display_inventory(self):\n header = \"Carrying:\\n\"\n nothing_func = lambda *args: None\n action_list = [(item, nothing_func) for item in self.inventory]\n if len(action_list) == 0:\n header += \"Nothing at all\"\n events.trigger_event(\"print_list\", action_list, header=header)", "def display(self):\r\n\t\tfor each_item in self.items:\r\n\t\t\teach_item.display()", "def plants (plant_name, plant_type):\n print (f\"\\n{plant_name.title()} is a {plant_type}. \\n\")", "def post(self):\n #Obtain the dessert name from recipe.html\n food=request.form['Dessert']\n #To grab the recipe details of the given dessert\n foodname = self.findrecipe(food)\n #render recipe details in recipe.html\n return render_template('recipe.html',foodname=foodname)", "def show(list_of_dicts, key):\n print(\"\\nHere are the stocks I have considered for you:\")\n for i in list_of_dicts: # iterates through list_of_dicts and prints Name and Market Cap\n print(f\" - {i['Name']} - {key} is {i[key]} \")", "def show_products():\n\n print \"These are the products in sale\"\n for key, value in ADD_PRODUCTS.iteritems():\n print \"%s: Q%.2f\" % (key, value)", "def use(self):\n return_string = ''\n item = input(f\"What do you want to use?\\n>\")\n if item in self.backpack:\n if self.backpack[item].type is \"Food\":\n if (self.health + self.backpack[item].heal_amount) > standard_health:\n self.health = standard_health\n else:\n self.health += self.backpack[item].heal_amount\n self.backpack[item].charges -= 1\n return_string = f\"You ate {self.backpack[item].name}. {self.backpack[item].heal_amount} health restored\"\n if self.backpack[item].charges == 0:\n del self.backpack[item]\n return return_string\n else:\n return \"You cant eat this\"\n else:\n return \"You dont have this\"", "def hfg():\r\n fruit = random.choice(pycorpora.foods.fruits[\"fruits\"])\r\n bread = random.choice(pycorpora.foods.breads_and_pastries[\"breads\"])\r\n animal = random.choice(pycorpora.animals.common[\"animals\"])\r\n beer = random.choice(pycorpora.foods.beer_styles[\"beer_styles\"])\r\n sausage = random.choice(pycorpora.foods.sausages[\"sausages\"])\r\n \r\n return(\"A %s %s, with a %s chutney upon %s.\" % (animal, sausage, fruit, bread))", "def describe_restaurant(self):\n print(f\"{self.restaurant_name} is a new restaurant opening on Main Street!\")\n print(f\"The restaurant specializes in {self.cuisine_type}-style food.\")", "def describe_restaurant(self):\n print(\"The Restaurant is called {} and offers {} cuisine.\".format(self.restaurant_name, self.cuisine_type))\n print(\"It has served {} clients.\".format(self.number_served))", "def generate_fish_report(self):\n if len(self.fish) == 0:\n print('No fish in here, come back later')\n\n for species, count in self.fish.items():\n print(f'{species}: {count}')", "def display_flavors(self):\r\n print(\"We have the following flavors\"\"\")\r\n for flavor in self.flavors:\r\n print(\" ...\" + str(flavor.title()))", "def foodtruck_tile(request):\n try:\n menu_date = Menu.objects.get(date__gte=timezone.now(),\n date__lte=timezone.now() + datetime.timedelta(days=6)).date.strftime(\"%d.%m.%Y\")\n menu = Food.objects.filter(menu_item__date__gte=timezone.now(),\n menu_item__date__lte=timezone.now() + datetime.timedelta(days=6))\n\n context = dict(menu=menu, menu_date=menu_date)\n except ObjectDoesNotExist:\n print(\"No appropriate food truck items or menus found for the current week!\")\n context = dict(menu=None, hidden=True)\n\n return render(request, 'tiles/foodtruck.html', context)", "def _show_ingredient_list(self):\n if self._ingredients_view:\n self._ingredients_view.destroy()\n\n username = self.food_service.get_user().get_username()\n ingredients = self.food_service.list_added_ingredients(username, expire=True)\n self._ingredients_view = IngredientsView(\n self._ingredients_frame,\n ingredients,\n self._handle_mark\n )\n\n self._ingredients_view.pack()", "def restaurant():\n\n # Initialize variables.\n menu = {'egusi': 150, \n 'akpu': 150, \n 'onugbu': 200, \n 'okro': 150, \n 'garri': 150, \n 'nsala': 300, \n 'rice': 150, \n 'stew': 150, \n 'isiewu': 1000\n }\n total = 0.0\n\n print()\n # Request input from user. Exit program if blank line is entered.\n while True:\n order = input(\"Order: \").strip().lower()\n if not order:\n break\n \n # Check if customer order is available in the menu. Increment total\n # if order is available and display appropriate message.\n if order in menu:\n total += menu[order]\n print(f'{order} cost {menu[order]}, total is {total}')\n else:\n print(f'Sorry, we are fresh out of {order} today.')\n\n # print(f'Your total is {total}')\n\n return total", "def show_inventory(table):\r\n if (table):\r\n print('======= The Current Inventory: =======')\r\n print('ID\\tCD Title (by: Artist)\\n')\r\n for row in table:\r\n print('{}\\t{} (by:{})'.format(*row.values()))\r\n print('======================================')\r\n else:\r\n print ('Inventory is empty.\\n')\r\n # return None\r", "def show_catalogue(self):\n\n data = cur.execute(\"\"\"SELECT productid, productname, unitcost, stock, location \n FROM catalogue WHERE vendorname = ?\"\"\", (self.vendorname,)).fetchall()\n print(tabulate(data, headers=[\"Product ID\", \"Name\", \"Unit Cost\", \"Stock\", \"Location\"]))", "def describe_pet(animal_type, pet_name):\r\n print(f\"\\nI have a {animal_type}.\")\r\n print(f\"My {animal_type}'s name is {pet_name.title()}.\")", "def Restaurant_get_info() -> Restaurant:\r\n name = input(\"Please enter the restaurant's name: \")\r\n cuisine = input(\"Please enter the kind of food served: \")\r\n phone = input(\"Please enter the phone number: \")\r\n menu = menu_enter()\r\n return Restaurant(name, cuisine, phone, menu)", "def describe_pets(animal_type, pet_name):\n print(f\"\\nI have a {animal_type}.\")\n print(f\"My {animal_type}'s name is {pet_name.title()}\")", "def describe_pets(animal_type, pet_name):\n print(f\"\\nI have a {animal_type}.\")\n print(f\"My {animal_type}'s name is {pet_name.title()}\")", "def describe_pet(animal_type, pet_name):\n print(f\"\\nI have a {animal_type}.\")\n print(f\"My {animal_type}'s name is {pet_name.title()}.\")", "def describe_pet(animal_type, pet_name):\n print(f\"\\nI have a {animal_type}.\")\n print(f\"My {animal_type}'s name is {pet_name.title()}.\")", "def show_data():", "def get_food(self, game_state):\n if self.red:\n return game_state.get_blue_food()\n else:\n return game_state.get_red_food()", "def display(self):\r\n\t\ts = self.options['space']\r\n\t\tv = self.level\r\n\t\tt = self.options['tab']\r\n\t\tp = self.options['sep']\r\n\t\tb = self.options['bullet']\r\n\t\tprint(v*t+b+s+self.text)\r\n\t\tfor each_item in self.items:\r\n\t\t\teach_item.display()", "def display_all(self):\n print(\"Price: \" + str(self.price))\n print(\"Speed: \" + str(self.speed) + \"mph\")\n print(\"Fuel: \" + self.fuel)\n print(\"Mileage: \" + str(self.mileage) + \"mpg\")\n print(\"Tax: \" + str(self.tax))\n return self", "def cat_details(cat_id, shelter_id):\n\n shelter = petfinder.shelter_data_map(shelter_id)\n shelter = list(shelter.values())\n cat = petfinder.cat_data_map(cat_id)\n cat = list(cat.values())\n\n return render_template('more_details.html',\n shelter=shelter,\n cat=cat)\n\n #if user selects <3 to favorite a cat then redirct to the login page", "def checkFood(self, food):\n pass", "def describe_pet(animal_type,pet_name):\n print(\"\\nI have a \"+animal_type+\".\")\n print(\"\\tMy \"+animal_type+\"'s name is \"+pet_name+\".\")", "def feed(self):\n self.health += random.randrange(1, 3) # food increases heath by (1, 3)\n if self.health > self.MAX_HEALTH: # if health exceeds the max, set it to the max\n self.health = self.MAX_HEALTH\n print(self.name, \"ate food\")\n self.show_health()", "def __str__(self):\n return f'{self._name} has {self._calories} calories, {self._carbohydrates}' +\\\n f'g. carbohydrates, {self._fat}g. of fat and {self._proteins}g. of proteins'", "def display(self):\n for i in range(0, len(self.__drawn)):\n if self.__drawn[i]:\n print(str(i+1) + \". You drew a short straw!\")\n else:\n print(str(i+1) + \". You're okay.\")", "def generate_food() -> FoodItem:\n presets = random.choice(FOOD_BANK)\n return FoodItem(presets['name'], presets['hp'], presets['msg'])", "def describe_restaurant(self):\n print(f\"{self.restaurant_name} is a new restaurant opening on Main Street!\")\n print(f\"The restaurant specializes in {self.cuisine_type}.\")", "def describe_pet(animal, name):\r\n print(\"\\nI have a \" + animal + \".\")\r\n print(\"Its name is \" + name + \".\")", "def caloriesCmd(bot, trigger):\n replyFmt = '\"%s\" has %i kcal per 100 g'\n lookup = lookupFoodAndReply(bot, trigger, replyFmt, nutrient=CALORIE)", "def print_inventory(self):\n print(\"Backpack:\")\n # Loop for each item in the players inventory\n for item in self.inventory:\n print('* ' + str(item))\n # Assigns the best weapon\n best_weapon = self.most_powerful_weapon()\n # print statement telling the best weapon in inventory\n print(\"Your best weapon is your {}\".format(best_weapon))", "def checkforfood(curpos):\n if DARK and not HAS_FLASHLIGHT:\n printmessage(\"But you can't see a thing!\", 5, MAGENTA, 2) # was 2\n return\n\n if FOOD_LIST[curpos] != 6:\n printmessage(\"You found some %s here.\" % FOODTYPES[FOOD_LIST[curpos]], 5, MAGENTA, 0) # was 2\n addnourishment(FOOD_LIST[curpos])\n FOOD_LIST[curpos] = (int(len(FOODTYPES) - 1)) \n pause_for_keypress()\n else:\n printmessage(\"You scrounge around for food, but there is nothing edible here.\", 5, CYAN, 2) # was 2", "def dish_str(n:Dish):\r\n return (n.name + \" $\" + str(n.price) + \" \" + str(n.calories) + \" cal\")", "def displayFolowers(database):\n firstname=str(input(\"who do you want to display followers :\"))\n usr,find=getByName(database,firstname)\n if find:\n print(f\"{usr.firstname} {usr.lastname} is folowed by:\")\n for folower in usr.folowed:\n print(folower)", "def show_category(update, context):\n query = update.callback_query\n bot = context.bot\n # loads json received from callback_data into dictionary\n ids = json.loads(query.data)\n category_id = ids['category_id']\n keyboard = []\n\n products = menu[category_id]['products']\n # iterates over all products in selected category\n for product_id in products.keys():\n product = products[product_id]\n # add each product to keyboard with id information as callback_data\n keyboard.append([InlineKeyboardButton(product['name'] + \" \" + product['price'],\n callback_data=json.dumps({\"category_id\": category_id,\n \"product_id\": product_id}))])\n\n reply_markup = InlineKeyboardMarkup(keyboard)\n # edits last message to keyboard with all products from category\n bot.edit_message_text(chat_id=query.message.chat_id,\n message_id=query.message.message_id,\n text='Select desired food in ' + menu[category_id]['category'] + \":\",\n reply_markup=reply_markup)\n # notify ConversationHandler of THIRD stage\n return THIRD", "def getFood(self, gameState):\n\n if (self.red):\n return gameState.getBlueFood()\n else:\n return gameState.getRedFood()", "def display_flavors(self):\n for flavor in self.flavors:\n print(f\"- {flavor}\")", "def show(self, opc=True):\n self.ventana.show_all()\n if opc:\n #mostrar la ventana\n self.ventana.show()\n else:\n #ocultarla\n self.ventana.hide()", "def eat(self):\n print(self.name + ' is eating.')", "def graf_F(self):\n vert_funktion(self, typ='D', titel='$Empirische\\; Verteilungsfunktion$' + '\\n ')", "def getDetail(self):\n\t\t\n\t\treturn (super().setParameters(0,self.getDefense(),0))\n\t\t\n\t\t#return \"\\n#########################################################\\n\"+\"\\nItem of Defense, Name of item:\"+self.getName()+\"\\nCapacity of defense:\"+str(self.getDefense())+\"\\nCapacity of attack:0 \\n Capacity of heal:0 \\n\"+\"#########################################################\\n\"", "def show_orders():\n return 'hehe'", "def get_one_meal():" ]
[ "0.77116036", "0.7256722", "0.68620783", "0.68321484", "0.6552086", "0.654516", "0.65346205", "0.6491626", "0.64828205", "0.64672315", "0.63990873", "0.63842154", "0.63528955", "0.6261036", "0.6243647", "0.62275267", "0.6203672", "0.61670315", "0.6158984", "0.60892135", "0.60569364", "0.6047044", "0.6043429", "0.6037669", "0.6035086", "0.6034373", "0.59953874", "0.5987746", "0.59774846", "0.59700847", "0.5961451", "0.5955321", "0.595324", "0.5910892", "0.59090936", "0.5906295", "0.58946204", "0.5884312", "0.58634573", "0.5837577", "0.58148974", "0.5813524", "0.58089167", "0.58084184", "0.58053887", "0.5804619", "0.57990384", "0.5770099", "0.57664794", "0.5765908", "0.57512033", "0.5746795", "0.5733028", "0.5731454", "0.5727823", "0.5711002", "0.57034963", "0.5698999", "0.56988764", "0.56776386", "0.56763005", "0.56748295", "0.566907", "0.5662416", "0.5650512", "0.56459045", "0.5641857", "0.56284213", "0.5626894", "0.5607834", "0.5607834", "0.5603621", "0.5603621", "0.5595288", "0.5595176", "0.55934995", "0.55893266", "0.55688053", "0.5551581", "0.5550527", "0.5543098", "0.5542486", "0.55416274", "0.5538383", "0.5510009", "0.55059934", "0.55027336", "0.55024815", "0.550223", "0.5496506", "0.5468529", "0.54670906", "0.5461634", "0.5460723", "0.5458771", "0.54563123", "0.544774", "0.5445631", "0.5443263", "0.5434345" ]
0.5860002
39
This Function Shows a message on the screen
def message(msg ,font_size, color , pos): font_style = pygame.font.SysFont( "Times New Roman" , font_size) mesg = font_style.render(msg, True, color) dis.blit(mesg, pos) pygame.display.update()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_message():", "def showMessage(self):", "def showMessage(self, message):\r\n print message", "def display_message(self, message):\n\t\tself.render('message.html', {'message': message})", "def display(self,message):\r\n \r\n print(message)", "def showme(message):\n print(message)", "def showMessage(self, message):\r\n util.raiseNotDefined()", "def display_message():\n\tmessage = \"I'm learning how to use function.\"\n\tprint(message)", "def display_message():\n msg = \"I'm learning to store code in functions.\"\n print(msg)", "def display_message(self, message):\n params = {\n 'message': message\n }\n self.render_template('message.html', params)", "def _display_message(message: str) -> None:\n print(message)", "def display_message(window, msg):\n v = create_output_panel(window, '')\n _append(v, msg)", "def show_message(self, message):\n self.sense.show_message(\n message,\n scroll_speed=self.SCROLL_SPEED,\n text_colour=self.TEXT_COLOUR\n )", "def show_message(message, col=c.r, update=False):\n g.content = generate_songlist_display()\n g.message = col + message + c.w\n\n if update:\n screen_update()", "def display_message(self, message):\n text = self.font.render(message, True,\n self.display_states[self.display_names[self.current_display_state]]['text'])\n temp_width = text.get_rect().width\n self.gameDisplay.blit(text, ((self.SCREEN_WIDTH / 2) - (temp_width/2), 100))", "def display_simple_message(message):\n lcd_display.clear()\n lcd_display.write(message, 1)", "def display_message():\n\tprint(\"In this chapter we will be learning how to write functions\")", "def disp_msg(msg):\n from x84.bbs import getterminal, echo\n term = getterminal()\n echo(u''.join((u'\\r\\n\\r\\n',\n term.bold_yellow('%s ' % (msg,),),\n term.yellow_reverse_bold(u'...'),)))", "def _showMessage(self, msg: str) -> None:\n\n raise NotImplementedError()", "def show_message(request):\n return render_to_response('woodstock/messages/view.html', {},\n context_instance = RequestContext(request))", "def display_message(self, message):\n context_id = self.status_bar.get_context_id(\"\")\n self.status_bar.show()\n self.status_bar.push(context_id, message)", "def display_message(self, message):\n params = {\n 'message' : message\n }\n self.render_template('/auth/message.html', params)", "def display_message_on_ableton(self, message):\n self._show_message(message)", "def show_message(message):\n print(message) # noqa: WPS421", "def showmessage(self):\n return self.message", "def showmessage(self):\n return self.message", "def display_starting_message(): # opening message\n starting_message = \"Is your cat plotting to kill you?? \\nLet's find out. \\n(Please note that this is merely a pythonic presentation of an app created by The Oatmeal. \\nI do not claim credit for its brilliance. I'm just trying to learn Python.)\"\n print(starting_message)", "def display_message():\n\tprint(\"Learnt to write functions, which are named blocks of code that are designed to do one specific job.\")", "def info(msg):\n click.secho(msg, fg='blue')", "def _message_display(string):\n print(\"========================================\")\n print(string)\n print(time.asctime(time.localtime(time.time())))\n print(\"========================================\")", "def display_message():\n message = \"I am learning about functions, function calls, parameters and \"\n message+= \"arguments.\"\n print(message)", "def showinfo(self, msg):\n tkinter.messagebox.showinfo('Information', msg)", "def showMessage(self, msg): \n QtGui.QMessageBox.information(None, \"Info\", msg)", "def showMessage(self, message, surface=None, bg_color=None, rect=None):\r\n if surface is None:\r\n surface = self.infoPanel \r\n if bg_color is None:\r\n bg_color = gu.INFO_PANEL_COLOR\r\n if rect is None:\r\n rect = gu.INFO_RECT\r\n \r\n surface.fill(bg_color) # clear from previous messages\r\n \r\n lines = message.split(\"\\n\")\r\n font = pygame.font.Font(None, 25)\r\n dy = 20\r\n for i, line in enumerate(lines):\r\n txt_surf = font.render(line, False, gu.WHITE)\r\n new_rect = txt_surf.get_rect().move(0, i*dy)\r\n surface.blit(txt_surf, new_rect)\r\n \r\n self.screen.blit(surface, rect)\r\n self.wait()\r\n pygame.display.update()", "def show_msg(self):\n if self.result and self.success_msg:\n print color_str('g', '\\n'.join(self.success_msg))\n elif self.result == False and self.fail_msg:\n print color_str('r', '\\n'.join(self.fail_msg))\n if self.stat_msg:\n print color_str('b', '\\n'.join(self.stat_msg))", "def show_info(title, message):\n\n pass", "def display_message(self, message, subtitle=None, arg=None):\n if message is None:\n # Display same message as the placeholder\n message = self.placeholder\n xml = alfred.xml([\n alfred.Item(\n title=message,\n subtitle=subtitle,\n attributes={\n 'uid': alfred.uid(0),\n 'arg': arg\n },\n icon='icon.png',\n )\n ]) # compiles the XML answer\n alfred.write(xml) # writes the XML back to Alfred\n exit()", "def showMessage(self, text, location, font, fontSize, colour=(255,255,255),\n input=False, secs=None):\n self.fill()\n self.text(text, location, font, fontSize, colour=colour)\n pygame.display.update()\n if input:\n currentEvent = self.input.input()\n while not self.input.checkInput(currentEvent):\n currentEvent = self.input.input()\n if not secs:\n self.timer.wait(secs)", "def showMessage(self, msg):\r\n super(SplashScreen, self).showMessage(\r\n msg, self.labelAlignment, QColor(_QtCore.Qt.white))\r\n QApplication.processEvents()", "def message(self, msg: str, time=None) -> None:\n bar = self.statusBar()\n if time is None:\n bar.showMessage(msg)\n else:\n bar.showMessage(msg, time)", "def display_message(self, message, level=\"information\"):\n box = QMessageBox(self.win)\n box.setText(message)\n box.setWindowTitle(QApplication.applicationName())\n if level == \"critical\":\n box.setIcon(QMessageBox.Critical)\n elif level == \"warning\":\n box.setIcon(QMessageBox.Warning)\n else:\n box.setIcon(QMessageBox.Information)\n box.exec_()", "def msg_show(self,msg):\r\n self.frame.Show()\r\n self.frame.Raise()", "def ShowMessage(self, title=u\"\", message=u\"\", msgType=INFOBAR_INFO):\n self.Title.SetLabel(title)\n self.Message.SetLabel(message)\n self.MessageType = msgType\n self.Show(True)", "def print_message(message):\n print(\"-------------------------\")\n print(message)", "def say(self, message):\r\n print message", "def message_display(text, loc, size, color=None):\n # gameDisplay = pygame.display.set_mode((width, height))\n largeText = pygame.font.Font('freesansbold.ttf', size)\n TextSurf, TextRect = text_objects(text, largeText, color)\n TextRect.center = (loc[0], loc[1])\n gameDisplay.blit(TextSurf, TextRect)\n\n pygame.display.update()", "def message(msg, color):\n mesg = font_style.render(msg, True, color)\n dis.blit(mesg, [dis_width / 3, dis_height / 2])", "def displayMessage(j, s):\n if j:\n j.DisplayMessage(\"info\", s.replace(\" \", \"&nbsp;\").replace(\"j.\", \"\", 1))\n print(\" \" + s)", "def info(self, message):\n self._clear()\n print(message)\n self._draw()", "def display_messages(self, layout):", "def logToScreen(message):\n\tif CyGame().isFinalInitialized():\n\t\tCyInterface().addImmediateMessage(escapeXml(message), \"\")", "def message(self, message):\n messagebox.showinfo(\n GT_('Menu'),\n message\n )", "def _show_message(self, message, message_color, background_color=(0, 0, 0)):\n\n # Need to be sure we revert any changes to rotation\n self._sense_hat.rotation = 0\n self._sense_hat.show_message(message, Config.SCROLL_TEXT_SPEED, message_color, background_color)", "def show_messages(self):\n self.masterlog.revealme()", "def showStatus(self, message):\n self.status_bar.showMessage(message)", "def print_outgoing_msg():\n\n print(\"\"\"\n A LOVELY little potential energy surface has been successfully generated by the\n Lim, Launder, and Moore auto-plotter (LLAMA) vers. 0.3!\n\n ############################################################################### \n LLAMA 0.3 written By:\n [a] Andrew Launder and Kevin Moore\n Center for Computational Quantum Chemistry, \n Dept. of Chemistry, Univ. of Georgia, Athens, GA, United States\n [b] Victoria Lim\n Dept. of Chemistry, Belmont University, Nashville, TN, United States\n ###############################################################################\n\n Thank you for very much for plotting with us today! Please do so again soon!\n \"\"\")\n\n return None", "def show_message(self, title: str, message: str):\n QMessageBox.about(self, title, message)", "def ShowMessage(msg, title=None, kind='info'):\n kind = kind.lower()\n if (kind.startswith('info')):\n if (title is None): title = 'Information'\n opts = wx.OK|wx.ICON_INFORMATION\n elif (kind.startswith('error')):\n if (title is None): title = 'Error'\n opts = wx.OK|wx.ICON_ERROR\n elif (kind.startswith('warn')):\n if (title is None): title = 'Warning'\n opts = wx.OK|wx.ICON_WARNING\n else:\n opts = wx.OK\n if (title is None):\n title = \"\"\n dial = wx.MessageDialog(None, msg, title, opts)\n dial.ShowModal()", "def error_mess():\n print(\"Sorry, I didn't understand that.\")", "def beware_msg(msg):\n print(\"\\n\\n\\n************************************************************\")\n print(\"!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\\n\\n\\n\")\n print(msg)\n print(\"\\n\\n\\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\")\n print(\"************************************************************\\n\\n\\n\")", "def show_message(messages):\n for message in messages:\n printed_message = f\"{message}\"\n print(printed_message)", "def message(self, string):\n print (string)", "def show_text(text, colour):\n message = font_style.render(text, True, colour)\n dis.blit(message, [game_size_x/2, game_size_y/2])", "def show_on_screen(self, string, location, font='Arial', font_size=20, colour=WHITE):\n msg = pygame.font.SysFont(font, font_size).render(str(string), True, colour)\n self.screen.blit(msg, location)", "def show_about():\r\n\tmsg = messagebox\r\n\tmsg.showinfo(\"\", '''Creator: Ellis, Kevin\r\nOrganization: n/a\r\nDescription: Retrieve the network information from a database\r\nDate: 2020208\r\nVersion: 1.4''')", "def do_message(self, message):\r\n \r\n if not self.display_game:\r\n return\r\n \r\n if SlTrace.trace(\"message\"):\r\n if (self.prev_message is None\r\n or len(message.text) > len(self.prev_message)\r\n or len(message.text) > SlTrace.trace(\"message_len\", default=25) > 25):\r\n SlTrace.lg(f\"{len(message.text)}: {message}\")\r\n self.prev_message = message.text\r\n message.text = message.text[0:SlTrace.trace(\"message_len\", default=25)]\r\n SlTrace.lg(\"do_message(%s)\" % (message.text), \"execute\")\r\n if not self.run:\r\n return\r\n \r\n if (self.mw is None or not self.mw.winfo_exists()\r\n or self.msg_frame_base is None\r\n or not self.msg_frame_base.winfo_exists()):\r\n return\r\n \r\n self.wait_message(message)\r\n if self.msg_frame is not None:\r\n self.msg_frame.destroy() # Remove all message frames\r\n self.msg_frame = None\r\n self.msg_frame = Frame(self.msg_frame_base)\r\n self.msg_frame.pack(side=\"top\", expand=NO, fill=NONE)\r\n text = f'{message.text:40}'\r\n color = message.color\r\n font_size = message.font_size\r\n if font_size is None:\r\n font_size=40\r\n time_sec = message.time_sec\r\n\r\n \r\n if (self.mw is None or not self.mw.winfo_exists()\r\n or self.msg_frame is None\r\n or not self.msg_frame.winfo_exists()):\r\n return\r\n \r\n if self.mw is not None and self.mw.winfo_exists():\r\n if self.cur_message is not None:\r\n self.cur_message.destroy()\r\n self.cur_message = None\r\n width = self.get_width()\r\n if width < 500:\r\n width = 500\r\n message.msg = Message(self.msg_frame, text=text, width=width) # Seems to be pixels!\r\n message.msg.config(fg=color, bg='white',\r\n anchor=S,\r\n font=('times', font_size, 'italic'))\r\n message.msg.pack(side=\"top\")\r\n ###message.msg.pack(side=\"bottom\")\r\n self.cur_message = message\r\n if time_sec is not None:\r\n if self.speed_step >= 0:\r\n time_sec = self.speed_step # Modify for view / debugging\r\n end_time = datetime.now() + timedelta(seconds=time_sec)\r\n message.end_time = end_time", "def display_message(\n self, subject=\"Find My iPhone Alert\", message=\"This is a note\", sounds=False\n ):\n data = json.dumps(\n {\n \"device\": self.content[\"id\"],\n \"subject\": subject,\n \"sound\": sounds,\n \"userText\": True,\n \"text\": message,\n }\n )\n self.session.post(self.message_url, params=self.params, data=data)", "def __draw_message(self, message):\n x_offset = (curses.COLS - len(message)) // 2\n self.message_win.addstr(0, x_offset, message)", "def display_message(self, text, color, text_size, coordinates):\n\n if text_size == \"large\":\n text_size = 100\n elif text_size == \"normal\":\n text_size = 60\n \n text_style = pygame.font.Font(\"etc/Roboto-Regular.ttf\", text_size)\n text_surface, text_rect = self.text_objects(text, text_style, color)\n text_rect.center = coordinates\n self.game_display.blit(text_surface, text_rect)\n pygame.display.update()", "def modeMsgBox(self, messageText):\n self.createMessage(messageText)", "def echo(self, msg=None):\n return msg", "def screen(self, _strMessage=\"\"):\n self.edLogging.screen(_strMessage)", "def print_message(self, message, color):\n\n xpos = 20\n ypos = self.height\n\n size = common.FONT_SIZE\n\n arcade.draw_text(\n text=message,\n start_x=xpos,\n start_y=ypos,\n anchor_x=\"left\",\n anchor_y=\"top\",\n width=size*len(message),\n color=color,\n font_size=size,\n bold=True)", "def print_message(contact, message):\n print(f\"{contact}: {message}\")", "def Print(self, message):\n self.script.append('ui_print(\"%s\");' % (message,))", "def showMessage(self, msg):\n msgBox = QMessageBox()\n msgBox.setText(msg)\n #msgBox.setInformativeText(\"Do you want to save your changes?\")\n #msgBox.setStandardButtons(QMessageBox::Save | QMessageBox::Discard | QMessageBox::Cancel);\n #msgBox.setDefaultButton(QMessageBox::Save);\n ret = msgBox.exec();", "async def show_notification(self, message: str, timeout: int):\n self.notification_text = HTML(\n f'<style bg=\"white\" color=\"black\">[ {message} ]</style>'\n )\n await asyncio.sleep(timeout)\n self.notification_text = None", "def message(self,message,style=wx.OK | wx.ICON_INFORMATION):\n dlg = wx.MessageDialog(self, message, self.app.title, style)\n answer = dlg.ShowModal()\n dlg.Destroy()\n return answer", "def DisplayMsg(pstrMessage, pintLine, pintStart=0):\n Line = [0, 8, 16, 24, 32, 40, 48]\n if pintLine in Line:\n display.text(pstrMessage, pintStart, pintLine)", "def stand_by_msg(msg: str = \"\"):\n print(msg)\n input(\"Pressez une touche pour continuer...\")", "def show_statusmessage(self, message='', time=1, clear=False):\n self.parent.show_statusmessage(message, time, clear=clear)", "def show_greeting(self):\n self.output(' ------------------------ ')\n self.output('You are now playing ' + self.name)\n self.output(self.greeting)\n self.output(' ------------------------ ')", "def horde_message(self, message):", "def _flash(self,id,msg,duration=30.0):\n if duration>0:\n pass #gtk.timeout_add(duration,'')\n return self.statusbar.push(id,msg)", "def about(self):\n self.main_window.message(\n width=200, aspect=100, justify=tkinter.CENTER,\n text=\"Jeu de Ping\\n\\n\"\n \"(C) Maximin Duvillard, August 2022.\\nLicence = GPL\")", "def print_message(self, message):\n print(message)", "def print_message(self, message):\n print(message)", "def text_message(update: Update, _: CallbackContext) -> None:\n update.message.reply_text(\n f\"Thank you for sending: {update.message.text},\\n\" +\n f\"but I am waiting only for images...\")", "def print_message(message):\r\n return print(message)", "def show_help():\n messagebox.showinfo(title='How to Use', message=\"It's really easy.\")", "def handle_gui_example_one_intent(self, message):\n self.gui.show_text(\"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec placerat varius turpis porta scelerisque. Nam feugiat, lectus a ultricies tempus, mi sem tempor felis, vitae laoreet nisi ipsum vitae mauris.\")", "def greeting_message(name):\n print('My name is ' + name)", "def show_warning(title, message, print_message=False):\n\n pass", "def show_info_message(self, msg, msecs=3):\n\n message.PopupMessage.info(msg, parent=self, duration=msecs, closable=True)", "def show_lose_screen():\n print(\"\"\"\n \n _ _ __ _ _ __ __ ____ ____ _ _ \n( \\/ )/ \\ / )( \\ ( ) / \\ / ___)( __) (_)/ ) \n ) /( O )) \\/ ( / (_/\\( O )\\___ \\ ) _) _( ( \n(__/ \\__/ \\____/ \\____/ \\__/ (____/(____) (_)\\_) \n\"\"\")", "def __window_alert(self, text):\n print str(text)\n config.VERBOSE(config.VERBOSE_DEBUG, '[DEBUG] alertmsg: ' + str(text))", "def render(self, screen: pygame.Surface) -> None:\n win_msg = self.gamedata.fonts[\"menu\"].render(f'{self.lost_message}', True, (0, 0, 0))\n return_msg = self.gamedata.fonts[\"debug\"].render(f'{self.back_to_menu}', True, (0, 0, 0))\n\n screen.blit(win_msg, (100, 325))\n screen.blit(return_msg, (500, 470))", "def show_popup(cls, content, level):\n\n current_view = sublime.active_window().active_view()\n message = cls.get_message_template(content, level)\n\n current_view.show_popup(content=message, max_width=400)", "def showmessage(parent, message, title, flags = wx.OK):\n\tdlg = wx.MessageDialog(parent, message, title, flags)\n\tdlg.ShowModal()\n\tdlg.Destroy()", "def status_display(self, message, level=0, field=0):\n #print(message)\n self.statusbar_txt.set(message)" ]
[ "0.8671978", "0.8480241", "0.828352", "0.8071773", "0.80670726", "0.8040086", "0.78809476", "0.78725165", "0.77866286", "0.7779627", "0.77761686", "0.77727395", "0.7755101", "0.7697426", "0.7672498", "0.76241535", "0.75889695", "0.75355613", "0.7512984", "0.7485261", "0.74392503", "0.7420362", "0.73834467", "0.73754525", "0.7365098", "0.7365098", "0.7322507", "0.7295174", "0.7278314", "0.72732294", "0.7268072", "0.726515", "0.72647184", "0.7202371", "0.7174914", "0.71651363", "0.7143256", "0.7138393", "0.7107624", "0.70977294", "0.708445", "0.70759004", "0.70727056", "0.7011176", "0.6998086", "0.6990705", "0.6989323", "0.6973347", "0.6959496", "0.69429797", "0.6939244", "0.69069016", "0.6906838", "0.6902003", "0.68842465", "0.68741953", "0.6870061", "0.685886", "0.68518263", "0.6812579", "0.67998403", "0.6797154", "0.67728806", "0.6736736", "0.6727497", "0.67161834", "0.6712296", "0.6708642", "0.6707436", "0.6697897", "0.6682272", "0.6681032", "0.66764927", "0.667495", "0.66623443", "0.6657288", "0.6653533", "0.66521275", "0.6651762", "0.6648328", "0.66457355", "0.6628568", "0.66177434", "0.6612172", "0.66016406", "0.6601471", "0.6601471", "0.65954405", "0.65817463", "0.6581292", "0.65807796", "0.65799487", "0.6576404", "0.65690047", "0.6567963", "0.6567741", "0.6558171", "0.65575325", "0.6549446", "0.6542275" ]
0.70739853
42
This Function is used to control the Movement of the Snake
def Movement(): keys = pygame.key.get_pressed() if keys[pygame.K_LEFT] and not snake.ang==90: snake.x_change = -snake.vel snake.y_change = 0 snake.left = True snake.right = False snake.up = False snake.down = False snake.ang = -90 elif keys[pygame.K_RIGHT] and not snake.ang==-90: snake.x_change = snake.vel snake.y_change = 0 snake.left = False snake.right = True snake.up = False snake.down = False snake.ang = 90 elif keys[pygame.K_UP] and not snake.ang==0: snake.x_change = 0 snake.y_change = -snake.vel snake.left = False snake.right = False snake.up = True snake.down = False snake.ang = 180 elif keys[pygame.K_DOWN] and not snake.ang==180: snake.x_change = 0 snake.y_change = snake.vel snake.left = False snake.right = False snake.up = False snake.down = True snake.ang = 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move(self):\r\n piece = []\r\n if self.direction == \"UP\":\r\n piece = [self.body[0][0], self.body[0][1] - self.POS_CHANGE] # create piece at new coordinates\r\n elif self.direction == \"DOWN\":\r\n piece = [self.body[0][0], self.body[0][1] + self.POS_CHANGE]\r\n elif self.direction == \"LEFT\":\r\n piece = [self.body[0][0] - self.POS_CHANGE, self.body[0][1]]\r\n elif self.direction == \"RIGHT\":\r\n piece = [self.body[0][0] + self.POS_CHANGE, self.body[0][1]]\r\n\r\n if piece:\r\n if piece in self.body: # Lose game if snake touches itself\r\n self.alive = False\r\n else:\r\n self.body.insert(0, piece) # insert new piece at head of snake\r\n if len(self.body) > self.length:\r\n self.body.pop() # delete last piece of snake, if length isnt increased\r\n\r\n self.draw_snake()", "def movement_handler(new_direction, snake):\n #Get the head of the snake\n new_head = snake[0].copy()\n #Update the new head position based on where the snake moved\n if new_direction == INPUT.LEFT:\n new_head[0] -= CELL_SIZE\n elif new_direction == INPUT.UP:\n new_head[1] -= CELL_SIZE\n elif new_direction == INPUT.RIGHT:\n new_head[0] += CELL_SIZE\n else: #new_direction == INPUT.DOWN:\n new_head[1] += CELL_SIZE\n\n #We will update the position of the snake's head\n new_x, new_y = new_head\n if len(snake) >= 2:\n old_x, old_y = snake[1]\n else:\n old_x, old_y = [-1, -1]\n\n #If the player is running into themselves, reverse their INPUTection\n if new_x == old_x and new_y == old_y:\n if new_direction == INPUT.LEFT:\n new_head[0] += CELL_SIZE*2\n elif new_direction == INPUT.UP:\n new_head[1] += CELL_SIZE*2\n elif new_direction == INPUT.RIGHT:\n new_head[0] -= CELL_SIZE*2\n else: #new_direction == INPUT.DOWN:\n new_head[1] -= CELL_SIZE*2\n return new_head", "def event_handler(event):\n if event.type == pygame.QUIT: # click close button at top corner of the screen\n pygame.quit()\n quit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP and snake_direction[Y] == 0: # occur when up key is pressed\n snake_direction[X] = 0\n snake_direction[Y] = UP\n elif event.key == pygame.K_DOWN and snake_direction[Y] == 0: \n snake_direction[X] = 0\n snake_direction[Y] = DOWN\n elif event.key == pygame.K_LEFT and snake_direction[X] == 0: \n snake_direction[X] = LEFT\n snake_direction[Y] = 0\n elif event.key == pygame.K_RIGHT and snake_direction[X] == 0: \n snake_direction[X] = RIGHT\n snake_direction[Y] = 0\n # implement other directions here", "def __snake_move(self):\n self.__eat_candy()\n # move tail and body\n n_snake = len(self.__snake)\n for i in range(1, n_snake):\n s2 = self.__snake[n_snake - i]\n s1 = self.__snake[n_snake - i - 1]\n s2.move(s1.pos())\n # move head\n pos = self.__snake[0].pos()\n tmp_snake = Snake(self, direction=self.__h_direction, position=pos)\n h_pos = self.__get_next_head_pos(tmp_snake)\n tmp_snake.remove()\n icon = f'resources/{self.__directions[self.__h_direction]}.svg'\n new_head = Snake(self, icon, self.__h_direction, self.__cell_edge, h_pos)\n old_head = self.__snake[0]\n self.__snake[0] = new_head\n old_head.remove()\n self.__crash_check()", "def movement(self):", "def move(self): \n # range(start, stop, step)\n for seg_num in range(len(self.segments) - 1, 0, -1):\n new_x_position = self.segments[seg_num - 1].xcor()\n new_y_position = self.segments[seg_num - 1].ycor()\n self.segments[seg_num].goto(new_x_position, new_y_position)\n\n # moving first snake's segment 20 spaces and updating last_direction\n self.head.forward(MOVE_DISTANCE)\n self.last_direction = self.head.heading()", "def movement(self):\n self.rect.left -= self.speedx #to move the asteroid to the left", "def snake_move(snake, direction):\n head = snake[0].copy()\n\n if direction == RIGHT:\n head[0] = head[0] + 1\n elif direction == LEFT:\n head[0] = head[0] - 1\n elif direction == UP:\n head[1] = head[1] - 1\n elif direction == DOWN:\n head[1] = head[1] + 1\n else:\n return snake\n \n snake.insert(0,head)\n snake.pop()\n \n return snake", "def move_player(self, pressed_keys):\n # Arrow-key movement\n if pressed_keys[K_UP]:\n self.player.rect.move_ip(0, -2)\n self.player.movement_check = True\n self.player.up_check = True\n self.player.down_check = False\n if pressed_keys[K_DOWN]:\n self.player.rect.move_ip(0, 2)\n self.player.movement_check = True\n self.player.up_check = False\n self.player.down_check = True\n if pressed_keys[K_LEFT]:\n self.player.rect.move_ip(-2, 0)\n self.player.movement_check = True\n self.player.direction_check = False\n self.player.up_check = False\n self.player.down_check = False\n if pressed_keys[K_RIGHT]:\n self.player.rect.move_ip(2, 0)\n self.player.movement_check = True\n self.player.direction_check = True\n self.player.up_check = False\n self.player.down_check = False\n # WASD movement\n if pressed_keys[K_w]:\n self.player.rect.move_ip(0, -2)\n self.player.movement_check = True\n self.player.up_check = True\n self.player.down_check = False\n if pressed_keys[K_s]:\n self.player.rect.move_ip(0, 2)\n self.player.movement_check = True\n self.player.up_check = False\n self.player.down_check = True\n if pressed_keys[K_a]:\n self.player.rect.move_ip(-2, 0)\n self.player.movement_check = True\n self.player.direction_check = False\n self.player.up_check = False\n self.player.down_check = False\n if pressed_keys[K_d]:\n self.player.rect.move_ip(2, 0)\n self.player.movement_check = True\n self.player.direction_check = True\n self.player.up_check = False\n self.player.down_check = False\n #Boundary\n if self.player.rect.left < 0:\n self.player.rect.left = 0\n if self.player.rect.right > self.board.screen_width:\n self.player.rect.right = self.board.screen_width\n if self.player.rect.top <= 0:\n self.player.rect.top = 0\n if self.player.rect.bottom >= self.board.screen_height:\n self.player.rect.bottom = self.board.screen_height", "def step(self):\n if not self._is_game_over:\n self._move_snake()\n self._is_game_over = self.is_snake_collides()", "def snakeSetup(self,display):\n if display:\n self.screen = pygame.display.set_mode(windowSize)\n pygame.display.set_caption('Snake!')\n pygame.init()\n self.clock = pygame.time.Clock()\n self.dir = left #round(3 * random.random())\n self.s = snake(playerColor, unitSize,self.dir)\n self.setup = True", "def snake_move(self,param=(),duration = None,ignore_error_handle = False):\n message = {};\n step = 'draw a snake on device with ' + str(len(param)) + ' joint';\n try:\n touch_action = TouchAction(self.driver);\n touch_action.snake_move(param,duration).release().perform();\n message = self.feedback.feedback_action_ok(step);\n except BaseException,e:\n message = self.feedback.feedback_action_fail(step,str(e),ignore_error_handle);\n finally:\n return message;", "def change_movement(self, action):\r\n if action == \"diagonal\" and self.movement != \"diagonal\":\r\n self.movement = \"diagonal\"\r\n self.x_speed = 3\r\n self.y_speed = 3\r\n self.canvas.after(50, self.move_diagonal)\r\n elif action == \"horizontal\" and self.movement != \"horizontal\":\r\n self.movement = \"horizontal\"\r\n self.x_speed = 3\r\n self.y_speed = 0\r\n self.canvas.after(50, self.move_horizontal)\r\n elif action == \"vertical\" and self.movement != \"vertical\":\r\n self.movement = \"vertical\"\r\n self.x_speed = 0\r\n self.y_speed = 3\r\n self.canvas.after(50, self.move_vertical)\r\n elif action == \"inward_outward\":\r\n self.movement = \"inward_outward\"\r\n self.canvas.after(50, self.move_inward_outward)", "def control(self, keyCode):\n if (keyCode == DOWN and (self.on_left or self.on_right)):\n if self.on_left:\n self.x = self.maze.LEFT_VERT\n else:\n self.x = self.maze.RIGHT_VERT\n self.rot_begin = self.MOUTH_DOWN_BEGIN_ANGLE\n self.rot_end = self.MOUTH_DOWN_END_ANGLE\n self.x_add = 0\n self.y_add = self.velocity\n elif (keyCode == UP and (self.on_left or self.on_right)):\n if self.on_left:\n self.x = self.maze.LEFT_VERT\n else:\n self.x = self.maze.RIGHT_VERT\n self.rot_begin = self.MOUTH_UP_BEGIN_ANGLE\n self.rot_end = self.MOUTH_UP_END_ANGLE\n self.x_add = 0\n self.y_add = -(self.velocity)\n elif (keyCode == LEFT and (self.on_top or self.on_bottom)):\n if self.on_top:\n self.y = self.maze.TOP_HORIZ\n else:\n self.y = self.maze.BOTTOM_HORIZ\n self.rot_begin = self.MOUTH_LEFT_BEGIN_ANGLE\n self.rot_end = self.MOUTH_LEFT_END_ANGLE\n self.x_add = -(self.velocity)\n self.y_add = 0\n elif (keyCode == RIGHT and (self.on_top or self.on_bottom)):\n if self.on_top:\n self.y = self.maze.TOP_HORIZ\n else:\n self.y = self.maze.BOTTOM_HORIZ\n self.rot_begin = self.MOUTH_RIGHT_BEGIN_ANGLE\n self.rot_end = self.MOUTH_RIGHT_END_ANGLE\n self.x_add = self.velocity\n self.y_add = 0", "def move(self):\r\n\r\n # Randomizes movement after 40 steps and flips sprite \\\r\n # (if x-value of speed variable changes from positive to negative)\r\n if step == 40 and 0 < hunger < 205 and thirst < 175 and self.speed[0] not in range(-1000, 0):\r\n self.speed[0] = random.randint(-5, -1)\r\n self.speed[1] = random.randint(-7, 7)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n\r\n # Randomizes movement after 40 steps, but doesn't flip sprite because \\\r\n # x-value of speed variable doesn't change from positive to negative\r\n elif step == 40 and 0 < hunger < 205 and thirst < 175 and self.speed[0] in range(-1000, 0):\r\n self.speed[0] = random.randint(-5, -1)\r\n self.speed[1] = random.randint(-7, 7)\r\n\r\n # Randomizes movement after 80 steps and flips sprite \\\r\n # (if x-value of speed variable changes from negative to positive)\r\n if step == 80 and 0 < hunger < 205 and thirst < 175 and self.speed[0] not in range(0, 1000):\r\n self.speed[0] = random.randint(1, 5)\r\n self.speed[1] = random.randint(-7, 7)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n\r\n # Randomizes movement after 80 steps, but doesn't flip sprite \\\r\n # because x-value of speed variable doesn't change from positive to negative\r\n elif step == 80 and 0 < hunger < 205 and thirst < 175 and self.speed[0] in range(0, 1000):\r\n self.speed[0] = random.randint(1, 5)\r\n self.speed[1] = random.randint(-7, 7)\r\n\r\n # Flips the dino sprite when it hits the left or right side of the enclosure \\\r\n # and reverses dino's speed\r\n if self.rect.right > 818 or self.rect.left < 182:\r\n # Keeps sprite from getting stuck on wall in an endless cycle of flipping\r\n if step != 40 and step != 80 and 0 < hunger < 205 and thirst < 175:\r\n self.speed[0] = - self.speed[0]\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n\r\n # Reverses the dino's speed if it hits the top or bottom side of the enclosure\r\n if self.rect.top < 55 or self.rect.bottom > 542:\r\n # Keeps sprite from getting stuck on wall in an endless cycle of flipping\r\n if step != 40 and step != 80 and 0 < hunger < 205 and thirst < 175:\r\n self.speed[1] = - self.speed[1]\r\n\r\n # Causes dinosaur to go to the tree when hunger is high enough\r\n if hunger >= 205:\r\n if step != 40 and step != 80 and 0 < thirst < 175:\r\n if self.rect.left > 300 and self.speed[0] not in range(-1000, 0):\r\n # Speed must be rounded so that speed[0] and speed[1] is in the range functions above \\\r\n # (range function doesn't take decimal point numbers)\r\n self.speed[0] = round((300 - self.rect.left)/30)\r\n self.speed[1] = round((340 - self.rect.top)/30)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n elif self.rect.left > 300 and self.speed[0] in range(-1000, 0):\r\n self.speed[0] = round((300 - self.rect.left)/30)\r\n self.speed[1] = round((340 - self.rect.top)/30)\r\n if self.rect.left < 300 and self.speed[0] not in range(1, 1000):\r\n self.speed[0] = round((300 - self.rect.left)/30)\r\n self.speed[1] = round((340 - self.rect.top)/30)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n elif self.rect.left < 300 and self.speed[0] in range(1, 1000):\r\n self.speed[0] = round((300 - self.rect.left)/30)\r\n self.speed[1] = round((340 - self.rect.top)/30)\r\n\r\n # Causes dinosaur to go to the pond when thirst is high enough\r\n if thirst == 175:\r\n if step != 40 and step != 80:\r\n if self.rect.left > 540 and self.speed[0] not in range(-1000, 0):\r\n self.speed[0] = round((540 - self.rect.left)/30)\r\n self.speed[1] = round((120 - self.rect.top)/30)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n elif self.rect.left > 540 and self.speed[0] in range(-1000, 0):\r\n self.speed[0] = round((540 - self.rect.left)/30)\r\n self.speed[1] = round((120 - self.rect.top)/30)\r\n if self.rect.left < 540 and self.speed[0] not in range(1, 1000):\r\n self.speed[0] = round((540 - self.rect.left)/30)\r\n self.speed[1] = round((120 - self.rect.top)/30)\r\n self.image = pygame.transform.flip(self.image, 1, 0)\r\n elif self.rect.left < 540 and self.speed[0] in range(1, 1000):\r\n self.speed[0] = round((540 - self.rect.left)/30)\r\n self.speed[1] = round((120 - self.rect.top)/30)\r\n\r\n # Sets rectangle surrounding dino sprite to new position based on its speed\r\n newpos = self.rect.move(self.speed)\r\n self.rect = newpos", "def reset(self):\r\n self.body = [[int(self.x_pos/2), int(self.y_pos/2)]] # initial snake starts at center of screen\r\n self.direction = \"UP\"\r\n self.length = 1\r\n self.alive = True\r\n self.speed = 10", "def interact():\n env = SnakeEnv()\n done = False\n r = 0\n action = random.randrange(4)\n delay_time = 0.2\n\n # After the first run of the method env.render()\n # env.renderer.viewer obtains an attribute 'window'\n # which is a pyglet.window.Window object\n env.render(mode='human')\n # Use the arrows to control the snake's movement direction\n @env.renderer.viewer.window.event\n def on_text_motion(motion):\n \"\"\"\n Events to actions mapping\n \"\"\"\n\n nonlocal action\n if motion == MOTION_UP:\n action = 0\n elif motion == MOTION_DOWN:\n action = 2\n elif motion == MOTION_LEFT:\n action = 3\n elif motion == MOTION_RIGHT:\n action = 1\n\n while not done:\n time.sleep(delay_time)\n obs, reward, done, info = env.step(action)\n env.render(mode='human')\n if reward:\n r += reward\n # Speeding up snake after eating food\n delay_time -= 1/6 * delay_time\n\n return r", "def move():\n Robot.move()", "def player_movement(self):", "def example_move(self):\n self.right() # start rotating right\n time.sleep(1) # turn for a second\n self.stop() # stop\n self.servo(1000) # look right\n time.sleep(.25) # give your head time to move\n self.servo(2000) # look left", "def example_move(self):\n self.right() # start rotating right\n time.sleep(1) # turn for a second\n self.stop() # stop\n self.servo(1000) # look right\n time.sleep(.25) # give your head time to move\n self.servo(2000) # look left", "def left(event):\n if event.action == sense_hat.ACTION_RELEASED:\n snake.changeDirection(LEFT)", "def __new_snake(self):\n self._snake = self.Snake(Direction.RIGHT, Position(4, 4), Position(3, 4), Position(2, 4))", "def update(self, pressed_keys):\r\n # read key presses in event log and change position accordingly\r\n if pressed_keys[K_UP]:\r\n if self.direction == \"down\":\r\n pass\r\n else:\r\n self.yChange = -block\r\n self.xChange = 0\r\n self.direction = \"up\"\r\n self.surf = pygame.transform.scale(self.image[0], (block, block))\r\n if pressed_keys[K_DOWN]:\r\n if self.direction == \"up\":\r\n pass\r\n else:\r\n self.yChange = block\r\n self.xChange = 0\r\n self.direction = \"down\"\r\n self.surf = self.imgD\r\n if pressed_keys[K_LEFT]:\r\n if self.direction == \"right\":\r\n pass\r\n else:\r\n self.xChange = -block\r\n self.yChange = 0\r\n self.direction = \"left\"\r\n self.surf = self.imgL\r\n if pressed_keys[K_RIGHT]:\r\n if self.direction == \"left\":\r\n pass\r\n else:\r\n self.xChange = block\r\n self.yChange = 0\r\n self.direction = \"right\"\r\n self.surf = self.imgR\r\n\r\n # when snake passes the boundaries of the screen it will loop through to the opposite side\r\n if self.x >= dis_width:\r\n self.x = 0\r\n if self.x < 0:\r\n self.x = dis_width\r\n if self.y >= dis_height:\r\n self.y = 0\r\n if self.y < 0:\r\n self.y = dis_height\r\n\r\n # add the direction change based on button press\r\n self.x += self.xChange\r\n self.y += self.yChange\r\n\r\n self.head = []\r\n self.head.append(self.x)\r\n self.head.append(self.y)\r\n self.head.append(self.direction)\r\n self.list.append(self.head)\r\n\r\n #if list has more items than the length of snake delete first item in list\r\n if len(self.list) > self.length:\r\n del self.list[0]", "def play_step(self, action):\n self.players[0].moving_left = False\n self.players[0].moving_right = False\n if action == MOVE_LEFT:\n self.players[0].moving_left = True\n for i in range(LOOP_AT_EACH_MOVE_UPDATE):\n self.update(is_a_star=True)\n if self.dead_player or not self.players[0].is_alive:\n break\n self.players[0].moving_left = False\n if self.dead_player or not self.players[0].is_alive:\n return\n elif action == MOVE_RIGHT:\n self.players[0].moving_right = True\n for i in range(LOOP_AT_EACH_MOVE_UPDATE):\n self.update(is_a_star=True)\n if self.dead_player or not self.players[0].is_alive:\n break\n self.players[0].moving_right = False\n if self.dead_player or not self.players[0].is_alive:\n return\n elif action == SHOOT:\n if self.dead_player or not self.players[0].is_alive:\n self.update(is_a_star=True)\n return\n if not self.players[0].weapon.is_active:\n self.players[0].shoot()\n for i in range(LOOP_AT_EACH_MOVE_UPDATE):\n self.update(is_a_star=True)\n if self.dead_player or not self.players[0].is_alive:\n break\n if self.dead_player or not self.players[0].is_alive:\n return", "def on_key_press(self, key, modifiers):\n if key == arcade.key.LEFT:\n self.player.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player.change_x = MOVEMENT_SPEED\n elif key == arcade.key.UP:\n self.player.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player.change_y = -MOVEMENT_SPEED", "def play_move(plateau, sens):\n if not (sens.upper() == \"B\" or sens.upper() == \"H\" or sens.upper() == \"D\" or sens.upper() == \"G\"):\n return \"Erreur !\"\n # mouvement des colonne\n if sens.upper() == \"B\":\n columns_move(plateau, 0)\n\n elif sens.upper() == \"H\":\n columns_move(plateau, 1)\n\n # mouvement des lignes\n elif sens.upper() == \"D\":\n lines_move(plateau, 0)\n\n elif sens.upper() == \"G\":\n lines_move(plateau, 1)", "def up(event):\n if event.action == sense_hat.ACTION_RELEASED:\n snake.changeDirection(UP)", "def move(self):\n keys = pygame.key.get_pressed()\n\n if keys[pygame.K_w]:\n self.y -= self.vel\n if keys[pygame.K_a]:\n self.x -= self.vel\n if keys[pygame.K_s]:\n self.y += self.vel\n if keys[pygame.K_d]:\n self.x += self.vel", "def automove(self):\n if self.x < self.end_cinematic_x_pos:\n self.x += self.SHIP_SPEED\n if self.x > self.end_cinematic_x_pos:\n self.x -= self.SHIP_SPEED\n if self.y < self.end_cinematic_y_pos:\n self.y += self.SHIP_SPEED\n if self.y > self.end_cinematic_y_pos:\n self.y -= self.SHIP_SPEED", "def on_key_press(self, key, modifiers):\r\n if key == arcade.key.UP:\r\n self.player.change_y = MOVEMENT_SPEED\r\n elif key == arcade.key.DOWN:\r\n self.player.change_y = -MOVEMENT_SPEED\r\n elif key == arcade.key.LEFT:\r\n self.player.change_x = -MOVEMENT_SPEED\r\n elif key == arcade.key.RIGHT:\r\n self.player.change_x = MOVEMENT_SPEED", "def on_key_press(self, key, modifiers):\r\n if key == arcade.key.UP:\r\n self.player.change_y = MOVEMENT_SPEED\r\n elif key == arcade.key.DOWN:\r\n self.player.change_y = -MOVEMENT_SPEED\r\n elif key == arcade.key.LEFT:\r\n self.player.change_x = -MOVEMENT_SPEED\r\n elif key == arcade.key.RIGHT:\r\n self.player.change_x = MOVEMENT_SPEED", "def snakes_move(self,param=(),duration = 1000,ignore_error_handle = False):\n message = {};\n step = 'draw ' + str(len(param)) + ' snakes on screen';\n multi_action = MultiAction(self.driver);\n try:\n snakes_count = len(param);# the count of the snakes\n for snakes_index in range(0,snakes_count):\n current_snake = param[snakes_index];\n current_snake_joints_count = len(current_snake);\n touch_action = TouchAction(self.driver);\n touch_action._add_action('press',touch_action._get_opts(None,current_snake[0]['x'],current_snake[0]['y'],duration));\n for current_snake_joints_index in range(1,current_snake_joints_count):\n current_joint = current_snake[current_snake_joints_index];\n touch_action._add_action('moveTo',touch_action._get_opts(None,current_joint['x'],current_joint['y'],duration));\n touch_action._add_action('wait',{'ms':duration});\n touch_action.release();\n multi_action.add(touch_action)\n multi_action.perform();\n message = self.feedback.feedback_action_ok(step);\n except BaseException,e:\n message = self.feedback.feedback_action_fail(step,str(e),ignore_error_handle);\n finally:\n return message;", "def play_game(self):\n\n while True:\n self.pixels = [clear] * 64\n\n # sense HAT controller\n for event in self.sense.stick.get_events():\n if event.action == \"pressed\":\n if event.direction == \"up\":\n self.set_direction(0)\n elif event.direction == \"right\":\n self.set_direction(1)\n elif event.direction == \"down\":\n self.set_direction(2)\n elif event.direction == \"left\":\n self.set_direction(3)\n # insert to the start of the array\n self.trail.insert(0, [self.trail[0][0] + self.direction[0], self.trail[0][1] + self.direction[1]])\n\n # one border cross in and the other off\n if self.trail[0][0] < 0:\n self.trail[0][0] = 7\n if self.trail[0][1] < 0:\n self.trail[0][1] = 7\n if self.trail[0][0] > 7:\n self.trail[0][0] = 0\n if self.trail[0][1] > 7:\n self.trail[0][1] = 0\n\n # we cover the situation, when the apple pos is a snake pos in this if statement\n if self.trail[0] == self.apple_pos:\n self.apple_pos = []\n while self.apple_pos == []:\n self.apple_pos = [random.randint(0, 7), random.randint(0, 7)]\n if self.apple_pos in self.trail:\n self.apple_pos = []\n self.length += 1\n # snake runs into itself\n elif self.trail[0] in self.trail[1:]:\n self.length = 1\n else:\n while len(self.trail) > self.length:\n # remove from the end ( \"like\" moving, but the length is correct)\n self.trail.pop()\n\n for pos in self.trail:\n # snake visualize on the pixel map (2d coord to 1d coord)\n self.pixels[pos[1] * 8 + pos[0]] = white\n\n # y * rowSize + x -> coordinate convert because of the pixel map\n self.pixels[self.apple_pos[1] * 8 + self.apple_pos[0]] = red\n # apple position (red led)\n self.sense.set_pixels(self.pixels)\n\n time.sleep(0.15)", "def set_control_commands(self, ref_state, ref_ind):\n if not self.at_dest:\n self.commands['speed'] = self.cruising_speed * (5. / self.traffic_level)\n else:\n self.commands['speed'] = 0.0\n dx = ref_state[0] - self.x\n dy = ref_state[1] - self.y\n dx_v = numpy.cos(self.yaw) * dx + numpy.sin(self.yaw) * dy\n\n # To overtake, move to the left a little bit and follow your original traj.\n stay_overtake = False\n if self.overtake:\n self.overtake_begin_ignore += 1\n else:\n self.overtake_begin_ignore = 0\n if self.overtake and len(self.radar_readings[0, :]) > 0:\n stay_overtake = numpy.min(self.radar_readings[0, :]) > 30\n rospy.logerr(self.overtake_begin_ignore)\n if self.overtake_begin_ignore < 3:\n stay_overtake = True\n if not stay_overtake:\n self.overtake = False\n self.overtake_begin_counter = 0\n self.commands['speed'] *= 0\n # rospy.logerr('chcek for stay overtaking: ' + str(stay_overtake))\n else:\n stay_overtake = True\n\n if self.overtake and stay_overtake:\n self.commands['speed'] *= 1.5\n dy_v = -numpy.sin(self.yaw) * dx + numpy.cos(self.yaw) * dy + 7.5\n else:\n dy_v = -numpy.sin(self.yaw) * dx + numpy.cos(self.yaw) * dy\n dyaw_v = ref_state[2] - self.yaw\n # Correct yaw difference. dyaw_v 0..pi\n while dyaw_v > numpy.pi:\n dyaw_v -= 2*numpy.pi\n while dyaw_v < -numpy.pi:\n dyaw_v += 2*numpy.pi\n # Calculate steering command from dy_v, dx_v and dyaw_v\n steering_command = dy_v + dyaw_v * 1.5 / (1 + dx_v)\n # Compare with max steering angle\n if steering_command > 0.5:\n steering_command = 0.5\n elif steering_command < -0.5:\n steering_command = -0.5\n self.commands['steering_angle'] = steering_command", "def on_key_press(self, key, modifiers):\n if key == arcade.key.UP:\n self.player.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player.change_y = -MOVEMENT_SPEED\n elif key == arcade.key.LEFT:\n self.player.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player.change_x = MOVEMENT_SPEED", "def draw_snake(self, dis, snake_Part, snake_Body):\n for x in snake_Body:\n if self.left:\n direction = pygame.transform.rotate(x[2], x[3]) \n dis.blit(direction, (x[0], x[1]))\n elif self.right:\n direction = pygame.transform.rotate(x[2], x[3]) \n dis.blit(direction, (x[0], x[1]))\n elif self.up:\n direction = pygame.transform.rotate(x[2], x[3]) \n dis.blit(direction, (x[0], x[1]))\n elif self.down:\n direction = pygame.transform.rotate(x[2], x[3]) \n dis.blit(direction, (x[0], x[1]))\n if self.left:\n x1 = self.x + self.x_change - 10\n y1 = self.y + self.y_change - 10\n hdir = pygame.transform.rotate(Head, -90)\n dis.blit(hdir, (x1,y1))\n elif self.right:\n x1 = self.x + self.x_change - 10\n y1 = self.y + self.y_change - 10\n hdir = pygame.transform.rotate(Head, 90)\n dis.blit(hdir, (x1,y1))\n elif self.up:\n x1 = self.x + self.x_change - 10\n y1 = self.y + self.y_change - 10\n hdir = pygame.transform.rotate(Head, 180)\n dis.blit(hdir, (x1,y1))\n elif self.down:\n x1 = self.x + self.x_change - 10\n y1 = self.y + self.y_change - 10\n hdir = pygame.transform.rotate(Head, 0)\n dis.blit(hdir, (x1,y1)) \n\n pygame.display.update", "def _move_forward(self):\n\t\tself.x,self.y = Mario._get_coordinates(Board.prev_i,Board.prev_j,Board.prev_k)\n\t\tif(self.y<=798):\n\t\t\tself.y = self.y+1\n\t\t\tif Board.board[self.x][self.y]=='0':\n\t\t\t\tMario.score += 1\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y]='M'\n\n\t\t\telif Board.board[self.x][self.y]=='P':\n\t\t\t\tMario.lives+=1\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y]='M'\n\t\t\t\tcall([\"aplay\",\"-q\",\"smb_1-up.wav\"])\n\n\t\t\telif Board.board[self.x][self.y]=='A':\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y]='M'\n\t\t\t\tMario.attack = 1\n\t\t\t\tcall([\"aplay\",\"-q\",\"smb_powerup.wav\"])\n\n\t\t\telif Board.board[self.x][self.y]=='@':\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tMario.lives-=1\n\t\t\t\tcall([\"aplay\",\"-q\",\"smb_mariodie.wav\"])\n\t\t\t\tif Mario.lives<=0:\n\t\t\t\t\tcall([\"aplay\",\"-q\",\"smb_gameover.wav\"])\n\t\t\t\t\treturn \"exit\"\n\t\t\t\tos.system('clear')\n\t\t\t\tprint(\"\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\t\\t\\t\\t\\t\\tNumber of Mario left\",Mario.lives)\n\t\t\t\tMario.respawn(self.x,self.y)\n\t\t\t\ttime.sleep(2)\n\t\t\t\tinit_board(Board.prev_i,Board.prev_j,Board.prev_k)\n\n\t\t\telif(Board.board[self.x][self.y]=='/'):\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x-1][self.y]='M'\n\n\t\t\telif Board.board[self.x][self.y]=='I':\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tcall([\"aplay\",\"-q\",\"smb_stage_clear.wav\"])\n\t\t\t\tBoard.bonus_round()\n\n\t\t\telif Board.board[self.x][self.y]=='K':\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tcall([\"aplay\",\"-q\",\"smb_stage_clear.wav\"])\n\t\t\t\tenemy.boss_round()\n\n\t\t\telif(Board.board[self.x][self.y] in obstacles):\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y-1]='M'\n\n\t\t\telif((Board.board[self.x+1][self.y-1]=='/' or Board.board[self.x+1][self.y-1]=='T') and Board.board[self.x+1][self.y]==' '):\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y+1]='M'\n\t\t\t\tMario.go_down(self)\n\t\t\telse:\n\t\t\t\tMario._pass(self.x,self.y-1)\n\t\t\t\tBoard.board[self.x][self.y]='M'\n\n\t\tif( self.y-1 >= ((Board.prev_j+Board.prev_k)/2) ):\n\t\t\tos.system('clear')\n\t\t\tBoard.prev_j += 1 \n\t\t\tBoard.prev_k += 1\n\t\t\tinit_board(Board.prev_i,Board.prev_j,Board.prev_k)\n\t\telse:\n\t\t\tos.system('clear')\n\t\t\tinit_board(Board.prev_i,Board.prev_j,Board.prev_k)", "def on_key_press(self, key):\n if key == LEFT:\n self.player.change_x = -5\n elif key == RIGHT:\n self.player.change_x = 5\n elif key == UP:\n self.player.change_y = -5 \n elif key == DOWN:\n self.player.change_y = 5", "def move(self, t, s):\n raise NotImplementedError", "def on_draw(delta_time):\n # draws all our objects\n arcade.start_render()\n\n generate_grid()\n apple()\n snake(on_draw.snake_part_x, on_draw.snake_part_y, 20, 20, snake_color)\n snake(on_draw.snake_part2_x, on_draw.snake_part2_y, 20, 20, snake_color)\n snake(on_draw.snake_part3_x, on_draw.snake_part3_y, 20, 20, snake_color)\n snake(on_draw.snake_part4_x, on_draw.snake_part4_y, 20, 20, snake_color)\n snake(on_draw.snake_part5_x, on_draw.snake_part5_y, 20, 20, snake_color)\n snake(on_draw.snake_part6_x, on_draw.snake_part6_y, 20, 20, snake_color)\n\n\n if on_draw.snake_part_x <= 230:\n snake(on_draw.snake_part6_x + 20, on_draw.snake_part6_y, 20, 20, snake_color)\n\n\n \"\"\" If statements that will make snake part one move \"\"\"\n if on_draw.snake_part_x >= 550:\n on_draw.snake_part_x -= 20\n\n elif on_draw.snake_part_x <= 550:\n on_draw.snake_part_y += 20\n if on_draw.snake_part_y >= 500:\n on_draw.snake_part_y -= 20\n on_draw.snake_part_x -= 20\n if on_draw.snake_part_x <= 180:\n on_draw.snake_part_x += 20\n\n \"\"\" If statements that will make snake part two move \"\"\"\n if on_draw.snake_part2_x >= 550:\n on_draw.snake_part2_x -= 20\n\n elif on_draw.snake_part2_x <= 550:\n on_draw.snake_part2_y += 20\n if on_draw.snake_part2_y >= 500:\n on_draw.snake_part2_y -= 20\n on_draw.snake_part2_x -= 20\n if on_draw.snake_part2_x <= 200:\n on_draw.snake_part2_x += 20\n\n \"\"\" If statements that will make snake part three move \"\"\"\n if on_draw.snake_part3_x >= 550:\n on_draw.snake_part3_x -= 20\n\n elif on_draw.snake_part3_x <= 550:\n on_draw.snake_part3_y += 20\n if on_draw.snake_part3_y >= 500:\n on_draw.snake_part3_y -= 20\n on_draw.snake_part3_x -= 20\n if on_draw.snake_part3_x <= 220:\n on_draw.snake_part3_x += 20\n\n \"\"\" If statements that will make snake part four move \"\"\"\n if on_draw.snake_part4_x >= 550:\n on_draw.snake_part4_x -= 20\n\n elif on_draw.snake_part4_x <= 550:\n on_draw.snake_part4_y += 20\n if on_draw.snake_part4_y >= 500:\n on_draw.snake_part4_y -= 20\n on_draw.snake_part4_x -= 20\n if on_draw.snake_part4_x <= 240:\n on_draw.snake_part4_x += 20\n\n \"\"\" If statements that will make snake part five move \"\"\"\n if on_draw.snake_part5_x >= 550:\n on_draw.snake_part5_x -= 20\n\n elif on_draw.snake_part5_x <= 550:\n on_draw.snake_part5_y += 20\n if on_draw.snake_part5_y >= 500:\n on_draw.snake_part5_y -= 20\n on_draw.snake_part5_x -= 20\n if on_draw.snake_part5_x <= 260:\n on_draw.snake_part5_x += 20\n\n \"\"\" If statements that will make snake part six move \"\"\"\n if on_draw.snake_part6_x >= 550:\n on_draw.snake_part6_x -= 20\n\n elif on_draw.snake_part6_x <= 550:\n on_draw.snake_part6_y += 20\n if on_draw.snake_part6_y >= 500:\n on_draw.snake_part6_y -= 20\n on_draw.snake_part6_x -= 20\n if on_draw.snake_part6_x <= 280:\n on_draw.snake_part6_x += 20", "def game_loop(): \n # prev auto quit\n game_over = False\n game_close = False\n \n # display\n x1 = dis_width / 2\n y1 = dis_height / 2\n x1_change = 0\n y1_change = 0\n\n snake_list = []\n snake_length = 1\n \n foodx = round(random.randrange(0, dis_width - snake_block) / 10.0) * 10.0\n foody = round(random.randrange(0, dis_width - snake_block) / 10.0) * 10.0\n \n while not game_over:\n # wait game \n while game_close == True:\n dis.fill(white)\n your_score(snake_length - 1)\n rules(\"gather food without hitting the border or your snake body\", black)\n controls(\"WASD or arrow keys\", red)\n message(\"Press Q-Quit or E-Play\", red)\n pygame.display.update()\n \n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_e: # play again\n game_loop()\n if event.key == pygame.K_q: # quit\n game_over = True\n game_close = False\n # in game\n for event in pygame.event.get():\n # close on x\n if event.type == pygame.QUIT:\n game_over = True\n # move snake\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT or event.key == pygame.K_a:\n x1_change = -snake_block\n y1_change = 0\n elif event.key == pygame.K_RIGHT or event.key == pygame.K_d:\n x1_change = snake_block\n y1_change = 0\n elif event.key == pygame.K_UP or event.key == pygame.K_w:\n x1_change = 0\n y1_change = -snake_block\n elif event.key == pygame.K_DOWN or event.key == pygame.K_s:\n x1_change = 0\n y1_change = snake_block\n \n # border block\n if x1 >= dis_width or x1 < 0 or y1 >= dis_height or y1 < 0:\n game_close = True\n \n x1 += x1_change\n y1 += y1_change\n dis.fill(white)\n pygame.draw.rect(dis, blue, [foodx, foody, snake_block, snake_block])\n snake_head = []\n snake_head.append(x1)\n snake_head.append(y1)\n snake_list.append(snake_head)\n\n if len(snake_list) > snake_length:\n del snake_list[0]\n\n for x in snake_list[:-1]:\n if x == snake_head:\n game_close == True\n \n our_snake(snake_block, snake_list)\n your_score(snake_length - 1)\n pygame.display.update()\n \n if x1 == foodx and y1 == foody:\n foodx = round(random.randrange(0, dis_width - snake_block) / 10.0) * 10.0\n foody = round(random.randrange(0, dis_height - snake_block) / 10.0) * 10.0\n snake_length += 1\n clock.tick(snake_speed)\n \n pygame.quit()\n quit()", "def play(self):\n\n self.useKeys = true;\n clock = pygame.time.Clock()\n s = snake(playerColor, unitSize)\n dir = round(3 * random.random())\n s.move(self.dir)\n screen = pygame.display.set_mode((gameSize*unitSize,gameSize*unitSize))\n pygame.display.set_caption('Snake!')\n global boardRect, boardPosX, boardPosY, score\n boardRect = pygame.Rect((0, 0), (gameSize * unitSize,\n gameSize * unitSize))\n tempPosX = boardPosX\n tempPosY = boardPosY\n boardPosX = 0\n boardPosY = 0\n\n while 1:\n if not gameOver:\n #pygame.time.delay(50)\n #clock.tick(10)\n self.draw(screen, s)\n self.score = score\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n boardPosX = tempPosX\n boardPosY = tempPosY\n boardRect = pygame.Rect((boardPosX, boardPosY), (gameSize * unitSize,\n gameSize * unitSize))\n self.useKeys = false\n return\n\n self.checkMove(s);\n\n else:\n self.gameOver = True;\n pygame.font.init()\n screen.fill(0)\n pygame.draw.rect(screen, boardColor, boardRect)\n font = pygame.font.Font(pygame.font.get_default_font(),\n 15)\n self.score = score;\n text = font.render('Score: ' + str(score), True, (255,\n 255, 255), (100, 100, 100))\n textRect = text.get_rect()\n screen.blit(text, textRect)\n pygame.display.flip()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n boardPosX = tempPosX\n boardPosY = tempPosY\n boardRect = pygame.Rect((boardPosX, boardPosY), (gameSize * unitSize,\n gameSize * unitSize))\n self.useKeys = false\n return", "def start(self):\n self._canvas.focus_set()\n self._canvas.bind('<Left>', lambda event: self._move_snake(LEFT))\n self._canvas.bind('<Right>', lambda event: self._move_snake(RIGHT))\n self._canvas.bind('<Up>', lambda event: self._move_snake(UP))\n self._canvas.bind('<Down>', lambda event: self._move_snake(DOWN))\n self.put_apple()\n self._after_id.append(self._canvas.after(SNAKE_SPEED, self._move_snake, RIGHT))\n self.paint()", "def reset_movement(self):\n self.direction = [0, 0]", "def run(self):\n super(MovementControl,self).run()", "def setMovement(self, movement, isSpecial = False, canControl = True):\n\n vel = self.ode_body.getLinearVel()\n for i in range(len(self.direction)):\n vel[i] = self.direction[i] * movement\n\n self.ode_body.setLinearVel(vel)\n\n self.moveVal = self.direction\n self.moveSpecial = isSpecial\n self.isMove = [False, False]\n self.direction = [self.moveVal[0], self.moveVal[1]]\n\n if not canControl:\n self.knockback()\n self.moveLock(None, 9999)\n self.isKnockback = True\n \n # Play Sound\n if movement > 10:\n self.sfx['lunge'].play()", "def makeMove(self, move, player):", "def start_and_stop(self, action):\r\n if action == \"start\" and self.flag_move != 1:\r\n self.flag_move = 1\r\n if self.movement == \"diagonal\":\r\n self.canvas.after(50, self.move_diagonal)\r\n elif self.movement == \"horizontal\":\r\n self.canvas.after(50, self.move_horizontal)\r\n elif self.movement == \"vertical\":\r\n self.canvas.after(50, self.move_vertical)\r\n elif self.movement == \"inward_outward\":\r\n self.canvas.after(50, self.move_inward_outward)\r\n elif action == \"stop\":\r\n self.flag_move = 0", "def run(self):\n while self.direction != \"\":\n if self.direction == \"decrease\":\n if self.position > 200:\n self.position -= 15\n elif self.direction == \"increase\":\n if self.position < 800:\n self.position += 15\n if self.direction != \"neutral\":\n self.move_joint(self.position, 900)\n time.sleep(0.1)", "def on_key_press(self, key, modifiers):\n\n if key == arcade.key.UP:\n self.player_sprite.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player_sprite.change_y = -MOVEMENT_SPEED\n elif key == arcade.key.LEFT:\n self.player_sprite.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player_sprite.change_x = MOVEMENT_SPEED", "def AeroMove(self, pos):\r\n\r\n pass", "def move(event):\r\n\t\tif event.char == \"a\":\r\n\t\t\tcanvas.move(z[a], -10, 0)\r\n\t\telif event.char == \"d\":\r\n\t\t\tcanvas.move(z[a], 10, 0)\r\n\t\telif event.char == \"w\":\r\n\t\t\tcanvas.move(z[a], 0, -10)\r\n\t\telif event.char == \"s\":\r\n\t\t\tcanvas.move(z[a], 0, 10)", "def on_key_press(self, key, modifiers):\n #if self.player_sprite.amphet_excited is False:\n \n\n if key == arcade.key.UP:\n self.player_sprite.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player_sprite.change_y = -MOVEMENT_SPEED\n elif key == arcade.key.LEFT:\n self.player_sprite.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player_sprite.change_x = MOVEMENT_SPEED\n\n\n elif key == arcade.key.ESCAPE:\n raise Exception(\"\\n\\n See You soon, fork it share it !\")", "def on_key_press(self, key, modifiers):\n if self.current_state == GAME_RUNNING:\n if key == arcade.key.UP:\n self.player_sprite.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player_sprite.change_y = -MOVEMENT_SPEED\n elif key == arcade.key.LEFT:\n self.player_sprite.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player_sprite.change_x = MOVEMENT_SPEED", "def move_dart(self):\n global level\n if level == 0:\n self.rect.centerx+=self.delta\n if self.rect.centerx >= 1000: \n self.delta = -1\n elif self.rect.centerx < 500:\n self.delta = 1\n elif level == 1:\n self.rect.centery+=self.delta\n if self.rect.centery <= 150: \n self.delta = 2\n elif self.rect.centery > 650:\n self.delta = -2\n elif level == 2:\n self.rect.centerx+=self.delta #To make changes in both x and y direction\n self.rect.centery+=self.delta\n if self.rect.centerx < 100 or self.rect.centery <= 100: \n self.delta = random.randint(1,10) #adds random speeds to the motion\n elif self.rect.centerx >= 900 or self.rect.centery > 700:\n self.delta = -random.randint(1,10)", "def move(self):\n\n if self.range > 0:\n self.dirty = 1\n self.rect.move_ip([self.x * self.speed, self.y * self.speed])\n self.range -= self.speed\n else:\n self.kill()", "def move(self, direction):\n # replace with your code\n pass", "def move(self, direction):\n # replace with your code\n pass", "def begin_auto_moving(self, direction):\n self.direction = direction\n self.image_list = self.animation_dict[direction]\n self.state = 'automoving'\n self.x_vel = self.vector_dict[direction][0]\n self.y_vel = self.vector_dict[direction][1]\n self.move_timer = self.current_time", "def move(contr):\n # get the object this script is attached to\n camera = contr.owner\n\n # set the movement speed\n speed = camera['Speed']\n\n # Get sensor named Mouse\n keyboard = contr.sensors['All_Keys']\n\n # Default movement speed\n move_speed = [0.0, 0.0, 0.0]\n\n keylist = keyboard.events\n for key in keylist:\n # key[0] == GameKeys.keycode, key[1] = status\n if key[1] == GameLogic.KX_INPUT_ACTIVE:\n # Also add the key corresponding key for an AZERTY keyboard\n if key[0] == GameKeys.WKEY or key[0] == GameKeys.ZKEY:\n move_speed[2] = -speed\n elif key[0] == GameKeys.SKEY:\n move_speed[2] = speed\n # Also add the key corresponding key for an AZERTY keyboard\n elif key[0] == GameKeys.AKEY or key[0] == GameKeys.QKEY:\n move_speed[0] = -speed\n elif key[0] == GameKeys.DKEY:\n move_speed[0] = speed\n elif key[0] == GameKeys.RKEY:\n move_speed[1] = speed\n elif key[0] == GameKeys.FKEY:\n move_speed[1] = -speed\n\n # The second parameter of 'applyMovement' determines\n # a movement with respect to the object's local\n # coordinate system\n camera.applyMovement( move_speed, True )\n\n # Get sensor named Mouse\n #for sensor in contr.sensors:\n #if sensor.isA(GameTypes.SCA_KeyboardSensor):", "def handle_movement(self, k_pressed: list) -> None:\n if self.cpu_controlled:\n return\n if pressed(\"LEFT\", k_pressed) and self.x - self.SHIP_SPEED > 0:\n self.x -= self.SHIP_SPEED\n if pressed(\"RIGHT\", k_pressed) and self.x + self.SHIP_SPEED + self.width < CST.SCREEN_WIDTH:\n self.x += self.SHIP_SPEED\n if pressed(\"UP\", k_pressed) and self.y - self.SHIP_SPEED > 0:\n self.y -= self.SHIP_SPEED\n if pressed(\"DOWN\", k_pressed) and self.y + self.SHIP_SPEED + self.height < CST.SCREEN_HEIGHT:\n self.y += self.SHIP_SPEED", "def _onmove(self, event):", "def test_snake_snake_colision(self):\n manager = DummyLevelManager()\n game = Game(manager)\n state = game.move(GameMoves.DOWN)\n self.assertEqual(state, LevelState.RUNNING)\n state = game.move(GameMoves.LEFT)\n self.assertEqual(state, LevelState.RUNNING)\n state= game.move(GameMoves.UP)\n self.assertEqual(state, LevelState.LOSE)", "def make_move(self, direction):\r\n\t\tif direction == 0:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x-1][self.y] = self.board[self.x-1][self.y], self.board[self.x][self.y]\r\n\t\t\tself.x -= 1\r\n\r\n\t\telif direction == 1:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x][self.y+1] = self.board[self.x][self.y+1], self.board[self.x][self.y]\r\n\t\t\tself.y += 1\r\n\r\n\t\telif direction == 2:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x+1][self.y] = self.board[self.x+1][self.y], self.board[self.x][self.y]\r\n\t\t\tself.x += 1\r\n\r\n\t\telif direction == 3:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x][self.y-1] = self.board[self.x][self.y-1], self.board[self.x][self.y]\r\n\t\t\tself.y -= 1", "def _move(self, dx, dy):\n # horizontal velocity is dx, vertical velocity is dy\n self._player.set_velocity((dx, dy))", "def joystick_move(self, emphasis=1):\n step = int(20*emphasis)\n self.display.ship.move_vertical(step=step)", "def handle_keyboard_input(self):\n keys = pg.key.get_pressed()\n\n if (keys[K_UP]):\n self.grid.change_direction(Direction.up)\n if (keys[K_DOWN]):\n self.grid.change_direction(Direction.down)\n if (keys[K_LEFT]):\n self.grid.change_direction(Direction.left)\n if (keys[K_RIGHT]):\n self.grid.change_direction(Direction.right)\n if (keys[K_SPACE]):\n self.grid.snake.grow()\n if (keys[K_RIGHTBRACKET]):\n self.actions_per_second += 1\n if (keys[K_LEFTBRACKET]):\n self.actions_per_second -= 1\n if (keys[K_t]):\n self.is_training = True\n print(\"========================================================================\")\n print(\"Training: ON\")\n print(\"========================================================================\")\n if (keys[K_s]):\n self.is_training = False\n print(\"========================================================================\")\n print(\"Training: OFF\")\n print(\"========================================================================\")", "def move(self) -> None:\n\n new_x = self.getX() + self.speed[0]\n new_y = self.getY() + self.speed[1]\n self.setX(new_x)\n self.setY(new_y)", "def move(self):\r\n segments = len(self.all_turtles) - 1\r\n for i in range(len(self.all_turtles)):\r\n if segments == 0:\r\n self.all_turtles[segments].forward(MOVE_DISTANCE)\r\n else:\r\n new_x = self.all_turtles[segments - 1].xcor()\r\n new_y = self.all_turtles[segments - 1].ycor()\r\n self.all_turtles[segments].goto(new_x, new_y)\r\n segments -= 1", "def play_move(self,state):\n raise AIError(\"Must be implemented for child class!\")", "def auto_resting(self):\n self.image_list = self.animation_dict[self.direction]\n self.image = self.image_list[self.index]\n\n if self.rect.y % 32 != 0:\n self.correct_position(self.rect.y)\n if self.rect.x % 32 != 0:\n self.correct_position(self.rect.x)\n\n if (self.current_time - self.move_timer) > 2000:\n direction_list = ['up', 'down', 'left', 'right']\n random.shuffle(direction_list)\n direction = direction_list[0]\n self.begin_auto_moving(direction)\n self.move_timer = self.current_time", "def __run(self):\n # init snake show\n self.__init_snake()\n self.__introduction.hide()\n # start ticktock for snake moving\n self.__ticker.start()\n # enable key press\n self.__enable_key = True", "def on_key_press(self, key, modifiers):\n if self.state == 0 and key in (arcade.key.UP, arcade.key.DOWN, arcade.key.LEFT, arcade.key.RIGHT):\n self.state = 1 #state of game turns to one when a game begins \n \n if key == arcade.key.P: #p pauses and unpauses the game\n if self.state in (1,2): #toggle math\n self.state = 3 - self.state\n elif self.state == 0: \n self.state = 2\n elif not self.state in (2,3): #if game is not paused or ended, direction is changed\n if key == arcade.key.DOWN:\n self.snake.set_direction(0)\n elif key == arcade.key.LEFT:\n self.snake.set_direction(1)\n elif key == arcade.key.UP:\n self.snake.set_direction(2)\n elif key == arcade.key.RIGHT:\n self.snake.set_direction(3)\n elif self.state == 3: #if game is ended, pressing space starts a new game\n if key == arcade.key.SPACE:\n self.new_game()", "def get_new_snake():\n global direction, snake, X_start, Y_start\n X = [x for x in range(40, WINDOWWIDTH - 80, 20)] #multiplier list 20\n Y = [y for y in range(40,WINDOWHEIGHT - 80, 20)]#multiplier list 20\n X_start = random.choice(X)#random multiplier of 20\n Y_start = random.choice(Y)#random multiplier of 20\n direction = \"right\"\n snake = [[X_start, Y_start], [X_start - 20, Y_start], [X_start - 40, Y_start]] #first 3 cells of snake", "def move(self):\n\n if self.rect.right >= SCREEN_WIDTH:\n self.rect.right = SCREEN_WIDTH\n elif self.rect.left <= 0:\n self.rect.left = 0\n #elif self.rect.right < SCREEN_WIDTH and self.rect.left: \n \n self.rect.move_ip(self.speed_p,0)", "def update_movement(self):\n if self.way_idx < len(self.waypoints) and not self.moving_object.is_moving:\n self.moving_object.start_moving(self.waypoints[self.way_idx])\n self.way_idx += 1", "def move(self):\n \n self.position = self.wander()", "def down(event):\n if event.action == sense_hat.ACTION_RELEASED:\n snake.changeDirection(DOWN)", "def __move__(self):\n v = self.velocity\n p = self.position\n p += v\n self.rect.x = round(p.x)\n self.rect.y = round(p.y)", "def move(self):\n pass", "def move(self):\n\n # get the location we WOULD go to\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n while (abs (newX) > self.BOX_RANGE) or (abs(newY) > self.BOX_RANGE):\n # print(\"choosing new direction... \",end=\"\")\n self.chooseNewDirection()\n # print(self.dx, self.dy)\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n\n # now move our monster\n super().move()", "def step(self):\r\n\r\n self.velocity = 1\r\n new_pos = self.pos\r\n self.model.space.move_agent(self, new_pos)", "def right(event):\n if event.action == sense_hat.ACTION_RELEASED:\n snake.changeDirection(RIGHT)", "def handle_keys(self, maze, game_display, key):\n Drone.handle_keys(self, maze, game_display, key)\n if self.auto_flag:\n if self.state == DroneState.LAND and self.time_in_air > 0:\n self.state = DroneState.TAKE_OFF\n self.auto_move(maze=maze, game_display=game_display)\n if key[pygame.K_a]:\n self.auto_move(maze=maze, game_display=game_display)\n if key[pygame.K_d] and self.time_in_air > 0:\n self.auto_flag = True\n if key[pygame.K_s]:\n self.state = DroneState.LAND\n if key[pygame.K_w]:\n self.slam.show()\n return False", "def nod():\n while True:\n MOVEMENTS.set_raw_angle(7, 52)\n sleep(2)\n MOVEMENTS.set_raw_angle(7, 0)\n sleep(2)", "def move(self):\n self.position += self.speed", "def run(self):\n\n # avviso l'utente sul livello che sta per partire il livello di\n # gioco corrente\n #self.drawMessage(\" Livello %s \" % (self.levelIndex + 1), 2000)\n self.state = GameState.PLAYING\n\n # variabili per la gestione del tempo\n # del corrente livello di gioco\n self.start = time.time()\n self.remaining_time = self.time\n pause_time = 0\n\n # incremento della velocita del giocatore\n # che aumento se si tiene premuta la barra spaziatrice\n vel_inc = 0\n\n # memorizzo il tipo di movimento da far eseguire al player\n player_move_action = self.player.sprite.stop\n\n\n # ciclo principale del gioco\n while not self.done:\n\n # gestione degli eventi\n for ev in pygame.event.get():\n\n if ev.type == QUIT:\n self.done = True\n elif ev.type == KEYDOWN:\n if ev.key == K_q:\n self.done = True\n\n # verifico per lo sparo\n elif ev.key == K_LCTRL:\n self.player.sprite.shot(self.player_bullets_filename, self.playerBullets, self.sound_weapon)\n elif ev.key == K_SPACE:\n vel_inc = 1\n elif ev.key == K_LEFT:\n player_move_action = self.player.sprite.moveLeft\n elif ev.key == K_RIGHT:\n player_move_action = self.player.sprite.moveRight\n elif ev.key == K_UP:\n player_move_action = self.player.sprite.moveUp\n elif ev.key == K_DOWN:\n player_move_action = self.player.sprite.moveDown\n elif ev.key == K_r:\n self.setupLevel()\n self.run()\n elif ev.key == K_n:\n self.levelIndex += 1\n self.setupLevel()\n self.run()\n elif ev.key == K_b:\n self.levelIndex -= 1\n self.setupLevel()\n self.run()\n elif ev.key == K_p or ev.key==K_h:\n if self.state == GameState.PLAYING:\n # registro l'istante in cui è avvenuta la pausa\n pause_time = time.time()\n if ev.key==K_p:\n self.state = GameState.PAUSED\n else:\n self.state = GameState.HELP\n else:\n # ricavo il tempo passato da quando sono in pausa\n dtime = time.time() - pause_time\n # aumento il numero di secondi del tempo iniziale\n # con quelli trascorsi durante la pausa\n self.start += dtime\n self.state = GameState.PLAYING\n print(\"Stato:%s\" % self.state)\n elif ev.type == KEYUP:\n if ev.key == K_SPACE:\n vel_inc = 0\n elif ev.key != K_LCTRL:\n player_move_action =self.player.sprite.stop\n\n # se ho un joystick collegato lo uso per gli spostamenti\n if self.joystick!=None:\n velx = self.joystick.get_axis(0)\n vely = self.joystick.get_axis(1)\n #print(\"%s %s\" % (velx,vely))\n self.player.sprite.move(velx,vely)\n # controllo se ho premuto un pulsante per lo sparo\n if ev.type == pygame.JOYBUTTONDOWN:\n self.player.sprite.shot(self.player_bullets_filename, self.playerBullets, self.sound_weapon)\n # altrimenti uso le informazioni ricevute dalla tastiera\n else:\n # aziono il movimento del giocatore sulla base\n # della combinazione dei tasti premuti\n player_move_action(vel_inc)\n\n # aggiorno la posizione del giocatore e\n # dei nemici, verifico collisioni e poteri\n # solo se sono in fase di gioco\n\n if self.state == GameState.PLAYING:\n self.remaining_time = self.get_remaining_time()\n if self.remaining_time <= 0:\n self.removeLife()\n\n # aggiorno le posizioni di tutti gli sprite\n self.updateAllSpritesPositions()\n\n # gestisco le collisioni di tutti gli sprite\n self.handleAllSpritesCollisions()\n\n # gestisco i vari poteri del player\n self.handlePowers()\n\n\n # verifico se ho, in un modo o nell'altro.\n # rimosso tutte le monete..in tal caso\n # dichiaro il livello completato\n if len(self.coins.sprites()) ==0:\n if self.levelIndex>=len(levels)-1:\n self.state = GameState.PLAYER_WON\n self.done = True\n else:\n self.state = GameState.LEVEL_COMPLETED\n\n #\n # Aggiornamento dello schermo\n #\n\n #cancello lo schermo\n self.screen.fill((0, 0, 0))\n\n\n # il labirinto lo inserisco nella superficie scrollabile\n self.scrollSurface.blit(self.mazeSurf,self.mazeSurf.get_rect())\n\n # disegno tutti gli sprite di gioco\n self.drawAllSprites()\n\n\n # centro la superficie del labirinto rispetto al centro del giocatore\n sc_x = self.screen.get_rect().center[0] - self.player.sprite.rect.center[0]\n sc_y = self.screen.get_rect().center[1] - self.player.sprite.rect.center[1]\n scrollSurfaceRect = Rect((sc_x,sc_y+self.gamebarSurface.get_rect().height),(self.scrollSurface.get_rect().width, self.scrollSurface.get_rect().height))\n\n if (self.background_image!=None):\n self.screen.blit(self.background_image,(0,0))\n self.screen.blit(self.scrollSurface, scrollSurfaceRect)\n\n # disegno la barra di informazioni di gioco\n self.drawNewGamebarSurface()\n\n # gestisco la logica di gioco sulla base dello stato corrente\n self.handleGameState()\n\n # riporto tutto a video\n # n.b: se ometto la seguente istruzione non vedo nulla!\n pygame.display.flip()\n\n # scandisco la velocità del gioco\n self.clock.tick(self.clockTime)\n\n\n # --- Uscita dal gioco\n pygame.mixer.music.stop()\n while pygame.mixer.get_busy():\n self.clock.tick(30)\n\n print(\"Uscita\")\n pygame.quit()\n sys.exit()", "def move(self, row, col, player):", "def move(self):\n self.tick()\n self.pressed = pygame.key.get_pressed()\n\n self.player.update(self)", "def on_key_press(self, key, key_modifiers):\n if key == arcade.key.UP or key == arcade.key.W:\n if self.physics_engine.can_jump():\n self.sprite1.change_y = JUMP_SPEED\n elif key == arcade.key.LEFT or key == arcade.key.A:\n self.sprite1.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT or key == arcade.key.D:\n self.sprite1.change_x = MOVEMENT_SPEED", "def make_move(self):\r\n if self.running and self.run:\r\n if self.board is None:\r\n SlTrace.lg(\"sp.board is None\")\r\n return False\r\n \r\n SlTrace.lg(\"running_loop self.running and self.run\", \"running_loop\")\r\n SlTrace.lg(\"running_loop self.start_move\", \"running_loop\")\r\n if self.start_move():\r\n SlTrace.lg(\"running_loop successful start_move\", \"running_loop\")\r\n self.next_move_no()\r\n SlTrace.lg(\"running_loop after start_move\", \"running_loop\")\r\n if self.to_pause:\r\n self.pause_cmd()\r\n self.to_pause = False\r\n return True", "def move_turtle(self):\n self.forward(self.move_speed)", "def move(self,dt):\n raise NotImplementedError(\"Robot.move\")", "def update_position(self, canvas):\n if self.x <= 0:\n if self.direction == \"SW\":\n self.direction = \"SE\"\n if self.direction == \"W\":\n self.direction = \"E\"\n if self.direction == \"NW\":\n self.direction = \"NE\"\n if self.x >= canvas.width:\n if self.direction == \"SE\":\n self.direction = \"SW\"\n if self.direction == \"E\":\n self.direction = \"W\"\n if self.direction == \"NE\":\n self.direction = \"NW\"\n if self.y <= 0:\n if self.direction == \"NW\":\n self.direction = \"SW\"\n if self.direction == \"N\":\n self.direction = \"S\"\n if self.direction == \"NE\":\n self.direction = \"SE\"\n if self.y >= canvas.height:\n if self.direction == \"SW\":\n self.direction = \"NW\"\n if self.direction == \"S\":\n self.direction = \"N\"\n if self.direction == \"SE\":\n self.direction = \"NE\"\n if self.direction == \"N\":\n self.y -= 1\n if self.direction == \"NE\":\n self.y -= 1\n self.x += 1\n if self.direction == \"E\":\n self.x += 1\n if self.direction == \"SE\":\n self.x += 1\n self.y += 1\n if self.direction == \"S\":\n self.y += 1\n if self.direction == \"SW\":\n self.x -= 1\n self.y += 1\n if self.direction == \"W\":\n self.x -= 1\n if self.direction == \"NW\":\n self.y -= 1\n self.x -= 1", "def main_game():\n #All the initial perimeters required to Run the Game\n Game_quit = False\n Loss = False\n foodx = round(random.randrange(50, dis_width - 50))\n foody = round(random.randrange(50, dis_height - 50))\n snake_Body = []\n score = 4\n redraw(dis)\n\n while not Game_quit:\n \n keys = pygame.key.get_pressed()\n \n Game_quit = Quit()\n \n if not snake.Start: \n StartScreen()\n if keys[pygame.K_RETURN]:\n snake.Start = True \n\n if Loss:\n \n EndScreen(score)\n \n if keys[pygame.K_RETURN]:\n main_game()\n \n elif keys[pygame.K_ESCAPE]:\n Game_quit = True\n\n snake.x = dis_width/2\n snake.y = dis_height/2\n #Playing Game......\n while snake.Start and not Loss and not Game_quit :\n\n Clock.tick(20)\n Game_quit = Quit() \n \n\n Movement()\n \n snake.x += snake.x_change\n snake.y += snake.y_change\n \n if snake.x < 0 or snake.x > dis_width - snake.width or snake.y < 0 or snake.y > dis_height - snake.height:\n Loss = True\n \n #For Drawing Snake Body having different x and y with each part of snake \n \n snake_Part = []\n snake_Part.append(snake.x)\n snake_Part.append(snake.y)\n snake_Part.append(Mid)\n snake_Part.append(snake.ang)\n snake_Body.append(snake_Part) \n\n if len(snake_Body) > score:\n \n del snake_Body[0]\n \n #For Drawing the Head of the snake\n \n snake_x = snake.x\n snake_y = snake.y\n snake_x += snake.x_change \n snake_y += snake.y_change\n headposx = snake_x\n headposy = snake_y \n \n #Collision Detection between head and body\n \n for track in snake_Body[:-1]:\n \n if track[0] == headposx and track[1] == headposy:\n Loss = True\n \n snake.draw_snake(dis, snake_Part, snake_Body) \n fooddraw(foodx, foody) \n \n d = Distance(foodx,foody)\n #Collision Detection between Snake and the Food\n if d < 40:\n foodx = round(random.randrange(50, dis_width - 50))\n foody = round(random.randrange(50, dis_height - 50))\n score += 1\n \n scoredraw(score)\n redraw(dis)", "def on_key_press(self, key, modifiers):\n if key == arcade.key.UP or key == arcade.key.W:\n self.direction = MoveEnum.UP\n elif key == arcade.key.DOWN or key == arcade.key.S:\n self.direction = MoveEnum.DOWN\n elif key == arcade.key.LEFT or key == arcade.key.A:\n self.direction = MoveEnum.LEFT\n elif key == arcade.key.RIGHT or key == arcade.key.D:\n self.direction = MoveEnum.RIGHT\n elif key == arcade.key.SPACE:\n self.shoot()", "def on_key_press(self, key, key_modifiers):\n if key == arcade.key.UP or key == arcade.key.W:\n self.player_sprite.change_y = PLAYER_MOVEMENT_SPEED\n elif key == arcade.key.DOWN or key == arcade.key.S:\n self.player_sprite.change_y = -PLAYER_MOVEMENT_SPEED\n elif key == arcade.key.LEFT or key == arcade.key.A:\n self.player_sprite.change_x = -PLAYER_MOVEMENT_SPEED\n elif key == arcade.key.RIGHT or key == arcade.key.D:\n self.player_sprite.change_x = PLAYER_MOVEMENT_SPEED", "def __play_move(self, x, y):\n\t\tself.board[y][x] = self.current_player\n\t\tself.current_player = self.__negated_player(self.current_player)", "def update(self, seconds):\n # Gravity\n self.calcGravity(seconds)\n \n # Move left/right\n self.rect.x += self.change_x\n \n # See if we hit anything\n block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n for block in block_hit_list:\n # If we are moving right,\n # set our right side to the left side of the item we hit\n if self.change_x > 0:\n self.rect.right = block.rect.left\n elif self.change_x < 0:\n # Otherwise if we are moving left, do the opposite.\n self.rect.left = block.rect.right\n \n # Move up/down\n self.rect.y += self.change_y\n \n # update arm position\n self.arm.update(seconds)\n \n # Check and see if we hit anything\n block_hit_list = pygame.sprite.spritecollide(self, self.level.platform_list, False)\n for block in block_hit_list:\n \n # Reset our position based on the top/bottom of the object.\n if self.change_y > 0:\n self.rect.bottom = block.rect.top\n elif self.change_y < 0:\n self.rect.top = block.rect.bottom\n \n # Stop our vertical movement\n self.change_y = 0" ]
[ "0.7399674", "0.6836237", "0.6778097", "0.6765576", "0.67256254", "0.66972667", "0.65777445", "0.6498739", "0.6492528", "0.6492464", "0.64858997", "0.6451853", "0.64459854", "0.640347", "0.64023644", "0.6400139", "0.6397963", "0.6363882", "0.6341559", "0.63338673", "0.63338673", "0.6331024", "0.6329555", "0.6322084", "0.6315082", "0.6307919", "0.6303197", "0.6290147", "0.6255717", "0.6253231", "0.6244571", "0.6244571", "0.62316597", "0.6227857", "0.6224293", "0.62225145", "0.62092805", "0.6205704", "0.61980546", "0.6194001", "0.6188142", "0.61852354", "0.61793095", "0.6169601", "0.6166539", "0.6159389", "0.61569095", "0.6138581", "0.6137954", "0.61300117", "0.61227965", "0.6122507", "0.6118878", "0.61036116", "0.61017346", "0.6099146", "0.60816437", "0.60812825", "0.60812825", "0.60767984", "0.60673946", "0.6056449", "0.6055912", "0.6046878", "0.6033225", "0.60311145", "0.6023871", "0.6016748", "0.6016103", "0.6006486", "0.60032904", "0.6000912", "0.60005766", "0.5999323", "0.5991909", "0.59814113", "0.5971818", "0.5968811", "0.59674793", "0.5967358", "0.5966401", "0.59480506", "0.59176093", "0.591229", "0.58981055", "0.5892499", "0.5892236", "0.5884943", "0.588156", "0.58722", "0.5870785", "0.58699346", "0.5864316", "0.5856827", "0.5843752", "0.58376306", "0.58334273", "0.58192813", "0.58163005", "0.58129597" ]
0.78153515
0
This Function Shows the current Score of the Player
def scoredraw(score): msg = "SCORE : " + str(score - 4) font_style = pygame.font.SysFont( "Microsoft Sans Serif" , 30) mesg = font_style.render(msg, True, (255,255,255)) dis.blit(mesg, (10,10)) pygame.display.update()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_score(self, win, player, computer):\n font = pygame.font.SysFont('comicsans', 70)\n if player < 10 and computer < 10:\n pygame.draw.rect(win, black, (150, 30, 75, 50))\n pygame.draw.rect(win, black, (295, 30, 75, 50))\n text1 = font.render(str(player), 1, white)\n text2 = font.render(str(computer), 1, white)\n win.blit(text1, (185, 35))\n win.blit(text2, (297, 35))", "def score(self):\n score_message = {\n 'Onewins': \"\\nThe Winner is Player 1!\",\n 'Twowins': \"\\nThe Winner is Player 2!\",\n 'Tie': \"\\nTie! Looks like everyone's a winner!\",\n 'Nowinner': \"\\nYikes, neither of you win!\"\n }\n if self.pone_score > self.ptwo_score:\n print(score_message['Onewins'])\n elif self.pone_score < self.ptwo_score:\n print(score_message['Twowins'])\n elif self.pone_score == 0 and self.ptwo_score == 0:\n print(score_message['Nowinner'])\n else:\n print(score_message['Tie'])", "def scorer(self, current_score):\r\n text = self.field.render(\"Score: \" + str(current_score // 2), True, BLACK_COLOUR)\r\n win.blit(text, (0, 0))", "def disp_score():", "def your_score(score):\n value = score_font.render(\"Your Score: \" + str(score), True, green)\n dis.blit(value, [0, 0])", "def show_score(x, y):\n score = font.render(\"Score: \" + str(score_value), True, (255, 255, 255))\n screen.blit(score, (x, y))", "def __show_scoreboard(self):\n self.clear_screen()\n\n print('\\n' * 2, end=\"\")\n for line in self.__fame:\n print((\" \" * 5) + line, end=\"\")\n print('\\n' * 2, end=\"\")\n\n with open(\"mastermind/assets/scores.json\", \"r\") as data:\n board = list(load(data).items())\n\n space = \" \" * 11\n print(f\"{space}RANK {'PLAYER':<30}\" +\n f\"{'TIME':>7} (seconds){'POINTS':>29}\\n\")\n\n lines_printed = 0\n for idx, entry in enumerate(board[:10]):\n lines_printed += 1\n space = \" \" * 10\n n = idx + 1\n year, month, day, time = entry[0].split(\" \")\n points = entry[1][\"points\"]\n playtime = entry[1][\"playtime\"]\n player = entry[1][\"player\"]\n\n print(f\"{space}{n:>4}. {player:<30}\" +\n f\"{playtime:>7,.2f}{points:>36}/15\")\n\n lines = \"\\n\" * (12 - lines_printed)\n print(f\"{lines}{space}\", end=\"\")\n sleep(.25)\n self.cool_print(\"Press ENTER to return to player menu.\",\n newline=False, margin=0)\n input()", "def show_score(self):\n self._pause = True # pause the game when you check the score\n score_list = self.get_high_score(self._filename) # get the record\n top = tk.Toplevel() # create a Toplevel\n top.title('Score Board')\n # create a text label for notification\n title = tk.Label(top, text='High Scored Player in This Level', width=70)\n title.pack(side=tk.TOP, ipady=1)\n if score_list is None: # check whether the record is empty\n tk.Label(top, text='No record in this level yet!', width=70).pack(side=tk.TOP, ipady=1)\n else: # if not empty\n for record in score_list: # shows up all the detail\n tk.Label(top, text=record[0] + ' : ' + record[1]).pack(side=tk.TOP, ipady=1)", "def draw_score(self):\n score_text = \"Score: {}\".format(self.score)\n start_x = 10\n start_y = (SCREEN_HEIGHT - 20)\n arcade.draw_text(score_text, start_x=start_x, start_y=start_y, font_size=12, color=arcade.color.BLACK)", "def score(self):\n return self.client.call('GET', self.name + 'score')", "def draw_score(self):\n score_text = \"Score: {}\".format(self.score)\n start_x = 10\n start_y = SCREEN_HEIGHT - 20\n arcade.draw_text(score_text, start_x=start_x, start_y=start_y, font_size=12, color=arcade.color.NAVY_BLUE)", "def draw_score(self):\r\n score_text = \"Score: {}\".format(self.score)\r\n start_x = 10\r\n start_y = SCREEN_HEIGHT - 20\r\n arcade.draw_text(score_text, start_x=start_x, start_y=start_y, font_size=12, color=arcade.color.NAVY_BLUE)", "def update_score(self, engine, *args):\n #pdb.set_trace()\n self.score_label.text = \"Gold: {}/{}\".format(str(engine.score),\n str(engine.win_score))", "def show_score(self, display, score):\n text = self.FONT.render(f'Score: {int(score)}', True, c.WHITE, c.BLACK)\n text_rect = text.get_rect()\n text_rect.centerx = self.SCORE_POS_X\n text_rect.centery = self.SCORE_POS_Y\n display.blit(text, text_rect)\n pygame.display.update()", "def print_scores(self):\n print(\"scores: \", self.get_scores())", "def matchscore(self):\n print(self.team1.name + \" \" + str(self.team1score) + \" - \" + str(self.team2score) + \" \" + self.team2.name)", "def see_score(score_):\r\n pygame.font.init()\r\n myfont = pygame.font.SysFont('Comic Sans MS', 20)\r\n\r\n textsurface_score = myfont.render('Score: {}'.format(score_[len(score_) - 1]), False, WHITE)\r\n window.blit(textsurface_score, (30, 0))", "def print_current_scores(self, round_num, index):\n print(f'\\n{self._players_list[index].name.upper()} '\n f'YOUR TURN. ROUND: {round_num + 1}')\n\n print('-'*21)\n print('ROLL SCORES'.rjust(16))\n self._players_list[index].print_stacked_score_dict()\n\n print('-'*21)\n print('TOP SCORE BONUS'.rjust(19))\n print(f\"Top Score:\".ljust(16) +\n f\"{self._players_list[index].get_top_score()}\".rjust(3))\n print(f\"Top Bonus Score:\".ljust(16) +\n f\"{self._players_list[index].get_top_bonus_score()}\".rjust(3))\n\n print('-'*21)\n print('TOTAL SCORES'.rjust(19))\n print(f\"Total Top:\".ljust(16) +\n f\"{self._players_list[index].get_total_top_score()}\".rjust(3))\n print(f\"Total Bottom:\".ljust(16) +\n f\"{self._players_list[index].get_total_bottom_score()}\".rjust(3))\n\n print('-'*21)\n print(f\"GRAND TOTAL:\".ljust(16) +\n f\"{self._players_list[index].get_grand_total_score()}\".rjust(3))", "def win(self):\n self.score += 1\n self.ids['score'].text = 'SCORE: ' + str(self.score)", "def show_score(self):\r\n\t\tself.screen.blit(self.score_image, self.score_rect)\r\n\t\tself.screen.blit(self.high_score_imgae, self.high_score_rect)\r\n\t\tself.screen.blit(self.level_image, self.level_rect)\r\n\t\t#Draw ships.\r\n\t\tself.ships.draw(self.screen)", "def draw_score(score: int):\r\n arcade.draw_text(\r\n \"Number of Wins: \" + str(score), 0, WINDOW_HEIGHT - 30, arcade.color.WHITE, 20\r\n )", "def enter_player_score(player):\n score = 2\n while score > 1 or score < 0:\n score = view.enter_player_view(player.player_first_name)\n try:\n score = float(score)\n except ValueError:\n score = 2\n view.message('erreur')\n continue\n else:\n if score < 0:\n view.message('negatif')\n continue\n if score > 1:\n view.message('superieur')\n continue\n player.total_score += score\n return score", "def say_scores(score0, score1):\n print(\"Player 0 now has\", score0, \"and Player 1 now has\", score1)\n return say_scores", "def display_player_points():\r\n pass", "def draw_score():\n global score, hi_score, score2, win\n if game_on2:\n score2 += 1 / 30\n score2 = round(score2, 3)\n # if score % 30 == 0:\n # score2 += 1\n camera.draw(\"Time: \" + str(score2), 30, \"black\", camera.x + 250, 30)\n if hi_score < 1000000000000000:\n camera.draw('Hi ' + str(hi_score), 30, \"black\", camera.x + 150, 30)\n if win:\n draw_hi_score()", "def Score (i):\n text = font.render(\"Score: \"+str(i), True, black)\n window.blit(text,(210,0))", "def get_current_score(self):\n\n # Return the player's current turn score\n return self._current_score", "def show_score(self):\n\t\tself.screen.blit(self.score_image, self.score_rect)\n\t\tself.screen.blit(self.high_score_image, self.high_score_rect)\n\t\tself.screen.blit(self.level_image, self.level_rect)\n\t\t#Draw the ships to the screen.\n\t\tself.ships.draw(self.screen)", "def to_score(self):\n self._bottom_tab(2)\n self._goto(\"score\")", "def update_score(self, score_point: int):\r\n self._score_point = score_point\r\n self._update_score() # change the visual display of points for the player\r", "def get_score(self, player):\n\n df = pd.read_csv('RPSscores.csv')\n if not str(player) in df['Name'].to_dict().values():\n df.loc[len(df.index)] = [str(player),\n 0, 0, 0]\n player_index = int(df.loc[df['Name'] == str(player)].index[0])\n result = 'wins: ' + str(df.iloc[player_index, 1]) + '\\n' + \\\n 'draws: ' + str(df.iloc[player_index, 2]) + '\\n' + \\\n 'losses: ' + str(df.iloc[player_index, 3])\n return result", "def showScore(self,score):\n\t\tscoreDigits = [int(x) for x in list(str(score))]\n\t\ttotalWidth = 0 # total width of all numbers to be printed\n\t\tfor digit in scoreDigits:\n\t\t\ttotalWidth += self.IMAGES['numbers'][digit].get_width()\n\t\tXoffset = (self.SCREENWIDTH - totalWidth) / 2\n\t\tfor digit in scoreDigits:\n\t\t\t#SCREEN.blit(self.IMAGES['numbers'][digit], (Xoffset, self.SCREENHEIGHT * 0.1))\n\t\t\tself.GAME_IMAGE.blit(self.IMAGES['numbers'][digit], (Xoffset, self.SCREENHEIGHT * 0.1))\n\t\t\tXoffset += self.IMAGES['numbers'][digit].get_width()", "def get_score(self, player):\n if player in self.player_scores:\n return self.player_scores[player]\n else:\n raise Exception(\"Player not in score list\")", "def update_score(self):\n score_text = ' ' + str(self.x_score) + ' - ' + str(self.o_score) + ' '\n self.Score_Label.configure(text=score_text, foreground='#FFFFFF')", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def get_score(self):\n return self.score", "def score_game(self):\r\n players = self.player_control.get_players()\r\n ###game_control = self.game_control\r\n ###if game_control is not None:\r\n ### game_control.set_vals() # Update any changed game control settings\r\n if len(players) == 0:\r\n return # No players\r\n n_top_score = 0\r\n top_score = players[0].get_score()\r\n for player in players:\r\n if player.get_score() > top_score:\r\n top_score = player.get_score()\r\n for player in players:\r\n player_score = player.get_score()\r\n if player_score == top_score:\r\n n_top_score += 1\r\n \r\n for player in players:\r\n player_score = player.get_score()\r\n player_played = player.get_played()\r\n player_ties = player.get_ties()\r\n player_wins = player.get_wins()\r\n new_played = player_played+1\r\n player.set_played(new_played)\r\n player.set_prop(\"played\")\r\n if player_score == top_score:\r\n if n_top_score > 1:\r\n new_ties = player_ties + 1\r\n player.set_ties(new_ties)\r\n player.set_prop(\"ties\")\r\n else:\r\n new_wins = player_wins + 1\r\n player.set_wins(new_wins)\r\n player.set_prop(\"wins\")\r\n self.update_score_window()", "def show_score(self):\n self.screen.blit(self.score_image, self.score_rect)\n self.screen.blit(self.high_score_text_image, self.high_score_text_rect)\n self.screen.blit(self.high_score_image, self.high_score_rect)\n self.screen.blit(self.wave_image, self.wave_rect)\n self.spaceships.draw(self.screen)", "def getScore(self):\r\n return self._score", "def display_result(self) -> None:\n winner = self.state.winner\n if winner:\n self._display_message(winner + ' wins!')\n else:\n self._display_message('Draw')\n\n self._display_message(\n f'\\n{self.state.player1} has {self.state.player1_score} wins'\n )\n self._display_message(\n f'{self.state.player2} has {self.state.player2_score} wins\\n'\n )", "def get_score(self):\n return self.score", "def update_scoreboard(self):\n self.clear()\n self.goto(-(WIDTH//6), (HEIGHT//2-30))\n self.write(self.l_score, align = 'center', font = ('Courier', 20, 'normal'))\n self.goto((WIDTH//6), (HEIGHT//2-30))\n self.write(self.r_score, align = 'center', font = ('Courier', 20, 'normal'))", "def update_score():\n pass", "def show_scores(self):\n for text in self.score_text:\n text.draw()", "def draw_score(self, DISP, points:int):\r\n\r\n Text_Surf_Obj = self.text_input.font_object.render('HIGH SCORE', True, Colors.colors['WHITE'])\r\n Score_Surf_Obj = self.text_input.font_object.render(self.name + ' ' + str(points), True, Colors.colors['WHITE']) \r\n index = self.highscore_counter - 1\r\n\r\n # When the highscore_counter reaches zero the current Player has the highest Score\r\n if self.highscore_counter == 0:\r\n index = 0\r\n\r\n highscore_name = self.names[index] #< The Name of the Player with the next possible Highscore\r\n highscore = str(self.scores[index]) #< The Score of the Player with the next possible Highscore\r\n\r\n # Checks if the Points from the current Player are greater then the next best Highscore\r\n if points > self.scores[index]:\r\n\r\n # Decreases the highscore_counter by 1 when the highscore_counter > 0\r\n if self.highscore_counter > 0:\r\n self.highscore_counter -= 1\r\n \r\n # If the current Player already has the highest score, his name and score will be printed on the display\r\n elif self.highscore_counter == 0:\r\n highscore = str(points)\r\n highscore_name = self.name\r\n\r\n # The rest of the function is making the output on the screen, for further details what the functions do visit https://www.pygame.org/docs/\r\n High_Score_Surf_Obj = self.text_input.font_object.render(highscore_name+ ' ' + highscore, True, Colors.colors['WHITE'])\r\n Textrec = Text_Surf_Obj.get_rect()\r\n score_rec = Score_Surf_Obj.get_rect()\r\n highscore_rec = High_Score_Surf_Obj.get_rect()\r\n windowsize = DISP.get_size()\r\n Textrec.centerx = windowsize[0] - highscore_rec.width // 2 - 3 * self.grid_size\r\n Textrec.top = 0\r\n score_rec.left = 3 * self.grid_size\r\n score_rec.top = self.grid_size\r\n highscore_rec.right = windowsize[0] - 3 * self.grid_size\r\n highscore_rec.top = self.grid_size\r\n DISP.blit(Text_Surf_Obj, Textrec)\r\n DISP.blit(Score_Surf_Obj, score_rec)\r\n DISP.blit(High_Score_Surf_Obj, highscore_rec)", "def show_score(self, bird):\n font = pygame.font.SysFont(DisplayConsts.FONT_TYPE, DisplayConsts.FONT_SIZE)\n surface = font.render(str(bird.score), True, DisplayConsts.FONT_COLOR) # show score\n rect = surface.get_rect()\n rect.midtop = (DisplayConsts.SCREEN_WIDTH // 2.5, DisplayConsts.SCREEN_HEIGHT // 20)\n self.screen.blit(surface, rect)\n pygame.display.update() # update the screen", "def print_scores(self):\n ### FILL IN ###", "def showtopscores(self):\n top_scores = LeaderBoard.gettopscorerslist(CURRENT_GAME_LEVEL)\n level_string = \"\"\n if CURRENT_GAME_LEVEL == DifficultyLevel.ExpertLevel:\n level_string = \"Expert level\"\n elif CURRENT_GAME_LEVEL == DifficultyLevel.BeginnerLevel:\n level_string = \"Beginner level\"\n else:\n level_string = \"Intermediate level\"\n leaderboard = \"Rank\".ljust(10) + \"Player Name\".ljust(30) + \"Score\".ljust(10) + '\\n'\n print leaderboard,\n rank = 1\n for score in top_scores:\n score = str(rank).ljust(10) + score\n print score,\n leaderboard = leaderboard + score\n rank = rank + 1\n QtGui.QMessageBox.about(self, \"Leaderboard for \" + level_string, leaderboard)", "def _draw_score(self) -> None:\n score_digits = [int(x) for x in list(str(self.game.score))]\n total_width = 0 # total width of all numbers to be printed\n\n for digit in score_digits:\n total_width += self.images['numbers'][digit].get_width()\n\n x_offset = (self._screen_width - total_width) / 2\n\n for digit in score_digits:\n self.surface.blit(self.images['numbers'][digit],\n (x_offset, self._screen_height * 0.1))\n x_offset += self.images['numbers'][digit].get_width()", "def updateScore(currentScore, highScore):\n screen.fill(pygame.Color(\"black\"), (10, 210, 130, 20))\n hsWidth = getTextWidth(str(highScore))\n hsPos = (150 - hsWidth) // 2, 210\n displayText(str(highScore), GOLD, hsPos)\n\n screen.fill(pygame.Color(\"black\"), (10, 130, 130, 20))\n csWidth = getTextWidth(str(currentScore))\n csPos = (150 - csWidth) // 2, 130\n displayText(str(currentScore), GOLD, csPos)", "def get_score(self):\n\n sql = \"SELECT score FROM Users WHERE username = '\" + self.username + \"'\"\n self.cursor.execute(sql)\n return self.cursor.fetchall()[0][0]", "def displayScoreAfterGame(self, options):\n if not self.setup:\n self.snakeSetup(true)\n\n screen = self.screen\n\n time = 10000000000;\n if \"time\" in options and time > 0:\n time = options.get(\"time\")\n\n startTime = pygame.time.get_ticks()\n\n while(pygame.time.get_ticks()-startTime < time):\n pygame.font.init()\n screen.fill(0)\n pygame.draw.rect(screen, boardColor, boardRect)\n font = pygame.font.Font(pygame.font.get_default_font(),\n 15)\n global score\n self.score = score;\n text = font.render('Score: ' + str(score), True, (255,\n 255, 255), (0, 0, 0))\n textRect = text.get_rect()\n textRect.move(500, 100)\n screen.blit(text, textRect)\n pygame.display.flip()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n return", "def update(self):\n self.clear()\n self.score += 1\n self.write(f\"Score : {self.score}\",\n align=\"center\", font=(\"Arial Black\", 20))", "def text(self):\n surface_score = pygame.font.SysFont('Helvetic', 100).render(str(self.score), False, BLACK)\n screen.blit(surface_score, (50, 50))", "def prep_score(self):\r\n\t\trounded_score=int(round(self.stats.score, -1))\r\n\t\tscore_str=\"Score: \"+\"{:,}\".format(rounded_score)\r\n\t\tself.score_image=self.font.render(score_str, True, self.text_color,\r\n\t\t\tself.ai_settings.bg_color)\r\n\r\n\t\t#Display the score at the top right of the screen.\r\n\t\tself.score_rect=self.score_image.get_rect()\r\n\t\tself.score_rect.right=self.screen_rect.right-20\r\n\t\tself.score_rect.top=20", "def prep_score(self):\n\t\trounded_score = int(round(self.stats.score,-1))\n\t\tscore_string = \"Score: \" + \"{:}\".format(rounded_score)\n\t\tself.score_image = self.font.render(score_string, True, self.text_color, self.ai_settings.bg_color)\n\n\t\t#Display the score at the top right corner of the screen.\n\t\tself.score_rect = self.score_image.get_rect()\n\t\tself.score_rect.right = self.screen_rect.right - 20\n\t\tself.score_rect.top = self.screen_rect.top = 20", "def score(self):", "def updateScore(self, player: int) -> None:\n\n if player == 1:\n self._score[0] += 1\n elif player == 2:\n self._score[1] += 1\n\n # logging\n logger.info(\"Player {winner} has scored a goal. Score: {score}\", winner=player, score=str(self._score))", "def printOutcome(self):\n o = self.getOutcome()\n if o == 0:\n print(\"No winner\")\n else:\n print(\"Player\", o, \" won\")", "def getScore(self):\n return self._score", "def show_results(self):\r\n\r\n if self.player_cards > self.computer_cards: # player wins\r\n print('\\nCongratulations!!')\r\n print('You WIN by {0} / {1}'.format(self.player_cards, self.computer_cards))\r\n elif self.player_cards < self.computer_cards: # computer wins\r\n print('\\nToo bad!!')\r\n print('You LOST by {0} / {1}'.format(self.player_cards, self.computer_cards))\r\n else: # tied\r\n print('You TIED by {0} / {1}'.format(self.player_cards, self.computer_cards))", "def showScore(score):\n scoreDigits = [int(x) for x in list(str(score))]\n totalWidth = 0 # total width of all numbers to be printed\n\n for digit in scoreDigits:\n totalWidth += IMAGES['numbers'][digit].get_width()\n\n Xoffset = SCREEN_WIDTH * 0.9\n\n for digit in scoreDigits:\n SCREEN.blit(IMAGES['numbers'][digit], (Xoffset, SCREEN_HEIGHT * 0.1))\n Xoffset += IMAGES['numbers'][digit].get_width()", "def score_display():\n data = score_reader()\n for index, datum in enumerate(data):\n score_format = \"%s ...... %s/%s\" % (datum[0].capitalize(), datum[1], datum[2])\n print(score_format)", "def scoreboard():\n box = font.render(\"Kills: \" + str(score), True, WHITE) # Rendering font for the scoreboard\n screen.fill(BROWN, rect=box.get_rect(topright=(635, 20))) # Filling the background of the scoreboard with brown\n screen.blit(box, (550, 20)) # Putting the scoreboard in the top right corner of the screen\n pygame.display.update() # Updating the screen", "def update_stats(self):\n self.scoreText = pygame.font.Font(FONT, 20)\n\n #update score\n textsurface = self.scoreText.render((\"Score: \"+str(self.current_score)), False, BLUE)\n self.screen.blit(textsurface,(5,5))\n\n #update high score\n if self.highest_score <= self.current_score:\n self.highest_score = self.current_score\n #To write highest score to file\n filename = \"highscore.txt\"\n file = open(filename,\"w\")\n file.write(str(self.highest_score))\n file.close()\n\n #Display High Score\n textsurface = self.scoreText.render((\"Highest Score: \"+str(self.highest_score)), False, BLUE)\n self.screen.blit(textsurface,(230,5))\n\n #Display Life Text\n textsurface = self.scoreText.render(\"Lives: \", False, BLUE)\n self.screen.blit(textsurface,(570,5))\n\n #Shows lifes left\n for i in range(self.lives):\n self.live = pygame.image.load(\"./images/ship.png\").convert_alpha()\n self.live = pygame.transform.scale(self.live , (20, 20))\n self.screen.blit(self.live, (670+(i*25), 7))\n\n #Mute Button\n button=pygame.image.load(\"./images/mutebutton.png\")\n button=pygame.transform.scale(button,(30,30))\n self.screen.blit(button, (750,5))", "def showScore(score):\n scoreDigits = [int(x) for x in list(str(score))]\n totalWidth = 0 # total width of all numbers to be printed\n\n for digit in scoreDigits:\n totalWidth += IMAGES['numbers'][digit].get_width()\n\n Xoffset = (SCREENWIDTH - totalWidth) / 2\n\n for digit in scoreDigits:\n SCREEN.blit(IMAGES['numbers'][digit], (Xoffset, SCREENHEIGHT * 0.1))\n Xoffset += IMAGES['numbers'][digit].get_width()", "def GetResult(self, playerjm):\n return self.score / len(self.scores)", "def print_scorecard(self):\n self.compute_score()\n frameline = \"|\"\n scoreline = \"|\"\n\n # Assemble frameline (pins knocked over)\n # All but final frame\n for i in xrange(FRAMES - 1):\n if self.frames[i][0] == 10:\n frameline += \"X| |\"\n elif self.frames[i][0] + self.frames[i][1] == 10:\n frameline += str(self.frames[i][0]) + \"|\\\\|\"\n else:\n frameline += (str(self.frames[i][0]) + \"|\" +\n str(self.frames[i][1]) + \"|\")\n\n # Final Frame1\n # If Strike in last frame\n if self.frames[FRAMES - 1][0] == 10:\n frameline += (\"X|\" + str(self.frames[FRAMES][0]) + \"|\" +\n str(self.frames[FRAMES + 1][0]))\n # If Spare in last frame\n elif self.frames[FRAMES - 1][0] + self.frames[FRAMES - 1][1] == 10:\n frameline += (str(self.frames[FRAMES - 1][0]) + \"\\\\|\" +\n str(self.frames[FRAMES][0]))\n else:\n frameline += (str(self.frames[FRAMES - 1][0]) + \"|\" +\n str(self.frames[FRAMES - 1][1]))\n\n # Assemble Scoreline (total points scored for each frame)\n for score in self.scores:\n scoreline += str(score).ljust(3) + \"|\"\n\n # Print Scorecard\n print self.competitor_name.center(43, \"=\")\n print frameline\n print scoreline\n print \"Total Score: \" + str(self.score)\n print \"=\" * 43", "def get_scores(self):\n return self.score", "def player_win(self):\r\n\r\n self.summary = (\" \" * 83) + \"YOU WIN\"\r\n print(\"Player wins against opponent.\\n\")\r\n self.player_wins += 1", "def updateScore(score):\n return score + 1", "def check_score(self) -> None:\n self.player_1, self.player_2 = 0, 0\n for cell in self.cells:\n if cell.player == 1:\n self.player_1 += 1\n elif cell.player == 2:\n self.player_2 += 1", "def add_score(score):\n global SCORE\n SCORE = SCORE + score\n # update the display\n mvaddstr(1, 2, \"Score:\", color_pair(HEADING_COLOUR) | A_BOLD)\n mvaddstr(1, 9, \"%d\" % SCORE, color_pair(TEXT_COLOUR) | A_BOLD)", "def bcp_player_score(self, value, prev_value, change, **kwargs):\n\n if self.player:\n self.player['score'] = int(value)", "def get_current_scores_buffer(self):\n\n player_scores = self.get_scores_in_order_of_players()\n score_string = \"Scores:\\n\"\n\n for color, score in zip(self.player_color_order, player_scores):\n player_score = \"{}: {}\".format(color, score)\n score_string += player_score\n score_string += \"\\t\"\n\n return score_string", "def score(self) -> str:\n return self._score", "def showScore(score):\n scoreDigits = [int(x) for x in list(str(score))]\n totalWidth = 0 # total width of all numbers to be printed\n\n for digit in scoreDigits:\n totalWidth += IMAGES['numbers'][digit].get_width()\n\n Xoffset = (SCREENWIDTH - totalWidth)\n\n for digit in scoreDigits:\n SCREEN.blit(IMAGES['numbers'][digit], (Xoffset,0))\n Xoffset += IMAGES['numbers'][digit].get_width()", "def handle_tournament_over(self, score):\n #############################\n #\n #\n # YOUR CODE HERE\n #\n #\n #############################\n print(\"Tournament over. Your score was: \" + str(score))", "def getScore(data):\n return score", "def get_score(self):\n return self._score", "def get_score(self):\n return self._score", "def get_score(self):\n return self._score", "def get_winner(self):\n winner: Player = Player('none')\n points_winner = 0\n for player in self.players:\n for key, value in player.get_stats().items():\n print('{}: {}'.format(key, value))\n if key == 'points':\n if value >= points_winner:\n winner = player\n print()\n\n print('The winner is: ' + winner.get_name())\n return winner", "def printPlayerStats(self):\n\t\tplayerStats = ['Name = ' + self.name, \n\t\t\t\t\t 'Agility = ' + str(self.agility), \n\t\t\t\t\t 'Personality = ' + str(self.personality), \n\t\t\t\t\t 'Sanity = ' + str(self.sanity), \n\t\t\t\t\t 'Strength = ' + str(self.strength), \n\t\t\t\t\t 'Progress = ' + str(self.progress)]\n\t\tprint playerStats", "def view_scores(self, wid, score=None):\n s_win = Scorewin(self, score)", "def get_score(self):\n return self.__score", "def get_current_score(self, game_id: int, player_id: int) -> int:\n with self.eng.session_mgr() as session:\n return session.query(\n func.sum(TablePlayerRound.score)\n ).filter(and_(\n TablePlayerRound.player_key == player_id,\n TablePlayerRound.game_key == game_id\n )).scalar()", "def display_stats(self):\n print(\"Simulation took: {:.2f} seconds to execute\".format(time.time() - self.start_time))\n for i, win in enumerate(self.wins):\n average = 0\n if win:\n average = float(self.tries[i]) / win\n print(\"Player {} wins: {} with (average number of rounds: {:.2f})\".format(i+1, win, average))", "def enter_game_scores():\n pass", "def update_score_board(self):\n score = ''\n for key, value in self.model.game_score.items():\n score += key + \"-\" + str(value) + ':'\n if self.view.score_board_entry.get():\n self.view.score_board_entry.delete(0, tkinter.END)\n self.view.score_board_entry.insert('1', score)", "def score():\r\n\r\n point_1 = 0\r\n point_2 = 0\r\n print(term.move_xy(82,15) + term.white + 'Score joueur 1 : ', end='')\r\n print(point_1)\r\n print(term.move_xy(82,16) + term.white + 'Score joueur 2 : ', end='' )\r\n print(point_2)", "def get_score(self):\r\n return self.lcp.get_score()", "def draw_score(self, x, y, score):\n\n screen.blit(self.font.render('{}'.format(score), 1, BLACK), (x, y))", "def print_scores(board: Connect4Board) -> None:\r\n print('')\r\n # print('\\n******************************')\r\n # print('************SCORES************')\r\n # print('******************************')\r\n # print(' BLACK = {} & WHITE = {} '.format(board.get_score('B'), board.get_score('W')))\r\n # print('******************************\\n')\r", "def get_score(self, player: int) -> int:\n score = 0\n i = 0\n while i < len(self.leylines):\n score += 1 if self.leylines[i].player == player else 0\n score += 1 if self.rights[i].player == player else 0\n score += 1 if self.lefts[i].player == player else 0\n i += 1\n return score", "def print_player_rank_and_points(self):\r\n pass", "def score_board():\r\n \r\n return str(score) + \"/\" + str(rounds)", "def display_scores(scores):\n\n\t#print(\"\\t\\t\\tScores: \", list(scores))\n\tprint(\"\\t\\t\\tMean: \", scores.mean())\n\tprint(\"\\t\\t\\tStandard Deviation: \", scores.std())", "def draw_scores(self):\n font = pygame.font.SysFont(self.font_name, self.font_size)\n scores = parse_scores_file(self.scores_file)\n x = 330\n y = 163\n gap = 51\n for i, score in enumerate(scores):\n txt = \"%s: %s lvl: %s\" %(score.name, score.score, score.level)\n image = font.render(txt, True, self.dark_blue)\n self.display.blit(image, (x, y + (gap * i)))", "def give_round_scores(list_of_players):\n print(\"\\nThe round has ended !\\nWe shall now unveil the cards and the scores!\")\n\n for player in list_of_players:\n cards = [card.name for card in player.cards]\n cards_string = \" \"\n for card in cards:\n cards_string += card + \", \"\n cards_string = cards_string[:-2]\n print(\"\\n{} has these cards: \".format(player.name), cards_string)\n print(\"{} has a score of {}\".format(player.name, player.score()))\n final_scores = [player.score() for player in list_of_players]\n min_score = min(final_scores)\n winners_index = [i for i, x in enumerate(final_scores) if x == min_score]\n if len(winners_index) == 1:\n index_winner = winners_index[0]\n winner = list_of_players[index_winner]\n print(winner.name, \"won the round with a score of {}\".format(winner.score()))\n if len(winners_index) > 1:\n print(\"It's a tie!\")\n winners_names = \"\"\n winners = [list_of_players[i] for i in winners_index]\n for winner in winners:\n winners_names += winner.name\n print(winners_names, \"won the round with a score of \", str(min_score))" ]
[ "0.7532108", "0.7476087", "0.74749184", "0.74059784", "0.7357234", "0.73177207", "0.7274991", "0.71760094", "0.714652", "0.7103239", "0.7076728", "0.7065711", "0.7053772", "0.7051625", "0.70301944", "0.70110327", "0.6995284", "0.6994245", "0.6992214", "0.6988295", "0.6986813", "0.69865286", "0.69743145", "0.69554174", "0.6949766", "0.6940924", "0.6915002", "0.6906017", "0.68552506", "0.68455034", "0.68445027", "0.6829922", "0.68191165", "0.6776222", "0.6774942", "0.6774942", "0.6774942", "0.67636955", "0.67605674", "0.6733757", "0.6732082", "0.6725605", "0.6715349", "0.670427", "0.6701341", "0.66771847", "0.6667024", "0.6662596", "0.6659764", "0.6658208", "0.6652069", "0.6648812", "0.66464144", "0.66377383", "0.66313636", "0.6630396", "0.6610601", "0.66077226", "0.65744764", "0.6562775", "0.6551749", "0.6543289", "0.6542294", "0.6532451", "0.6528833", "0.65246904", "0.65141255", "0.64907426", "0.6489704", "0.6489475", "0.64772934", "0.64676696", "0.6466004", "0.64589924", "0.64576167", "0.6446806", "0.64421237", "0.64403373", "0.6438902", "0.6436654", "0.64341223", "0.64341223", "0.64341223", "0.64263624", "0.6404073", "0.63868", "0.63863915", "0.63852733", "0.6384973", "0.63819295", "0.63782233", "0.6376504", "0.63685644", "0.63541555", "0.63457483", "0.632992", "0.6320909", "0.6318759", "0.6297397", "0.62971854", "0.62942314" ]
0.0
-1
This Function draws The Background of the Game
def redraw(dis): dis.blit(Bg,(0,0))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_bg(self):\n self.screen.fill(self.bg)", "def mainmenu_background():\n gameDisplay.fill((40, 0, 40))", "def mainmenu_background():\n gameDisplay.fill((40, 0, 40))", "def drawBackground(self,screen):\n pygame.draw.rect(screen,(240,240,240),(self.basepos[0],self.basepos[1],204,504))\n pygame.draw.rect(screen,(0,0,0),(self.basepos[0]+2,self.basepos[1]+2,200,500))", "def main_background():\n surface.fill(COLOR_BACKGROUND)", "def draw_bg(self):\n for y in range(WIN_HEIGHT/32): #TODO: make sure this process is correct and efficient.\n for x in range(WIN_WIDTH/32):\n self.screen_image.blit(self.bg, (x * 32, y * 32))", "def background():\n sky_color = (66, 170, 255) # color of the sky\n grass_color = (0, 128, 0) # color of the grass\n\n rect(screen, sky_color, (0, 0, 500, 250), 0) # sky\n rect(screen, grass_color, (0, 250, 500, 250), 0) # grass", "def main_background():\n surface.fill(COLOR_GRAY)", "def draw_game():\n # Fill window with background color\n RENDER_WINDOW.fill(BACKGROUNDCOLOR)\n\n # Draw Game Title\n draw_title()\n\n # Draw Puzzle\n draw_puzzle()\n \n # Draw buttons to GUI \n draw_buttons()\n\n # Draw Text\n draw_text() \n\n # Draw random toggle\n draw_rand_toggle()", "def mainmenu_background():\n surface.fill((40, 0, 40))", "def blit_background(self):\n self.screen.fill([67, 67, 67])\n self.screen.blit(self.background, (0,0))\n pygame.draw.rect(self.screen, (0, 0, 0), self.seperate_line)", "def background(self, Background):\n SCREEN.blit(Background, (0, 0))\n #SCREEN.fill((0,0,0))", "def draw(self, draw_surface):\n draw_surface.blit(self.background_frame, (0, 120))", "def main_background(self):\n self.screen.blit(self.background, (0, 0))", "def draw_ground(self):\r\n win.blit(self.ground, (0, 400))", "def draw_background(self):\n backgrounds = {\n \"forest\": (38, 106, 46),\n \"desert\": (194, 178, 128)\n }\n self.background_surface.fill(backgrounds[self.geography])", "def draw_background(self, t):\n pass", "def draw(self, background):\n background.blit(self.image, (self.x_pos, self.y_pos))", "def draw(self):\n self.screen.fill(BACKGROUND_COLOR)\n self.cannon.draw(self.screen)\n self.objects.draw(self.screen)", "def paintScreen(self):\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(BACKGROUND_LEFT)\n self.imgBackgroundLeft = guiobjects.OcempImageMapTransparent(imgPath)\n self.window.add_child(self.imgBackgroundLeft)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(BACKGROUND_RIGHT)\n imgBackgroundRight = guiobjects.OcempImageMapTransparent(imgPath)\n imgBackgroundRight.topleft = 297, 0\n self.window.add_child(imgBackgroundRight)", "def draw_bg (self):\n self.health = max(0.0, min(1.0, (self.healthsteps + self.mud.value) / self.healthsteps))\n healthycolor = (0x11, 0x22, 0x44)\n pollutedcolor = (0x66, 0x66, 0)\n self.watercolor = [int((a - b) * self.health + b)\n for a,b in zip(healthycolor, pollutedcolor)]\n colorname = \"rgb({},{},{})\".format(*self.watercolor)\n w, h = self.width, self.height\n self.draw.rectangle((0,0,w-1,self.level_px-1), \"#000000\")\n self.draw.rectangle((0,self.level_px,w-1,h-1), colorname)", "def draw_background(self):\n back = pygame.Surface(self.size)\n width, height = self.size\n self.shapes['gradient'] = shapes.gen_gradient(\n (width, height / 2),\n self.colors[3],\n self.colors[4]\n )\n back.blit(self.shapes['gradient'], (0, height - self.sh('gradient')))\n\n # TODO: Don't use static path/icon\n image = '/usr/share/icons/Tango/scalable/mimetypes/audio-x-generic.svg'\n self.shapes['musicimg'] = load_svg(image, [height/2]*2)\n back.blit(\n self.shapes['musicimg'],\n (width / 10, (height - self.sh('musicimg')) / 2)\n )\n return back", "def draw():\n screen.fill((0, 0, 0))\n alien.draw()", "def draw_gameDisplay(gameDisplay, hero):\n \n draw_background(gameDisplay, hero)", "def build_background():\n layer_1 = GRect(800, 550)\n layer_1.filled = True\n layer_1.color = 'silver'\n layer_1.fill_color = 'silver'\n window.add(layer_1)\n layer_2 = GRect(800, 90)\n layer_2.filled = True\n layer_2.color = 'whitesmoke'\n layer_2.fill_color = 'whitesmoke'\n window.add(layer_2)\n layer_3 = GRect(800, 40, x=0, y=510)\n layer_3.filled = True\n layer_3.color = 'whitesmoke'\n layer_3.fill_color = 'whitesmoke'\n window.add(layer_3)", "def make_background(self):\n for x in range(self.env_list[0].size):\n for y in range(self.env_list[0].size):\n img = load_image(\"dirt.png\")[0]\n self.background.blit(img, (x*50, y*50))", "def draw_game(self) -> None:\n\n self.screen.fill(THECOLORS['royalblue4'])\n self.our_board.draw(self.screen)\n self.game_status.draw(self.screen)\n self.heading_bar.draw(self.screen)\n\n if self.our_game_state == STATE_PREPARING:\n self.start_game_button.draw(self.screen)\n elif not self.our_game_state == STATE_READY_TO_START:\n self.their_board.draw(self.screen)", "def draw():\n background(255)\n for i in range(COLS):\n for j in range(ROWS):\n if (BOARD[i][j] == 1): fill(0)\n else: fill(255)\n noStroke() # stroke(0)\n rect(i * CELL_SIZE, j * CELL_SIZE, CELL_SIZE, CELL_SIZE)\n if (PLAY):\n generate()", "def drawBackground(self):\n if self.newFrameArrived and not self.reshaping:\n imgHeight, imgwidth, _ = self.currentFrame.shape\n if imgHeight == self.height and imgwidth == self.width:\n glDisable(GL_DEPTH_TEST)\n glMatrixMode(GL_MODELVIEW)\n glPushMatrix()\n glLoadIdentity()\n glMatrixMode(GL_PROJECTION)\n glPushMatrix()\n #print \"Happy printings1\"\n #glMatrixMode(GL_MODELVIEW)\n #glLoadIdentity()\n\n #print \"Happy printings\"\n glLoadIdentity()\n #print \"Happy printings\"\n glOrtho(0, self.width, 0, self.height, -1.0, 1.0)\n #print \"Happy printings\"\n glViewport(0, 0, self.width, self.height)\n #print \"Happy printings\"\n glDisable(GL_TEXTURE_2D)\n glPixelZoom(1, -1)\n glRasterPos3f(0, self.height-0.5, -1)\n #print \"Happy printings5\"\n glDrawPixels(self.width, self.height, GL_RGB, GL_UNSIGNED_BYTE, self.currentFrame)\n #print \"Happy printings6\"\n # glBegin(GL_QUADS)\n # glTexCoord2f(0.0,0.0); glVertex3f(-4.0,-3.0,-10.0)\n # glTexCoord2f(1.0,0.0); glVertex3f( 4.0,-3.0,-10.0)\n # glTexCoord2f(1.0,1.0); glVertex3f( 4.0, 3.0,-10.0)\n # glTexCoord2f(0.0,1.0); glVertex3f(-4.0, 3.0,-10.0)\n # glEnd()\n glPopMatrix()\n glMatrixMode(GL_MODELVIEW)\n glPopMatrix()\n glEnable(GL_DEPTH_TEST)\n #self.newFrameArrived = False", "def draw_app(self):\n \n # Start iterations\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n self.display.fill([255, 255, 255])\n self.grid.draw(self.display)\n pygame.display.update()", "def draw_background(self, verbosity=0):\n log.debug(\"Drawing background\")\n\n log.debug(\"Drawing 'rock' background\")\n pygame.draw.rect(self.surface, (127, 127, 127), self.surface.get_rect())\n\n log.debug(\"Drawing Region contents\")\n for region in self.dungeon_map.regions:\n coords = self.map_to_screen(region.coords)\n if verbosity > 0:\n # Color-code regions for convenience\n if region.kind == Region.ROOM:\n color = (255, 255, 240)\n elif region.kind == Region.CHAMBER:\n color = (255, 240, 255)\n elif region.kind == Region.PASSAGE:\n color = (240, 255, 255)\n else:\n raise LookupError(\"Unknown Region kind '{0}'\"\n .format(region.kind))\n else:\n color = (255, 255, 255)\n pygame.draw.polygon(self.surface, color, coords)\n\n if verbosity == 0:\n return\n log.debug(\"Drawing Connection contents\")\n for conn in self.dungeon_map.connections:\n coords = self.map_to_screen(conn.get_poly_coords())\n if (conn.kind == Connection.DOOR or\n conn.kind == Connection.SECRET or\n conn.kind == Connection.ONEWAY):\n if conn.is_incomplete():\n color = (127, 140, 127)\n else:\n color = (240, 255, 240)\n elif conn.kind == Connection.ARCH:\n if conn.is_incomplete():\n color = (127, 127, 140)\n else:\n color = (240, 240, 255)\n elif conn.kind == Connection.OPEN:\n if conn.is_incomplete():\n color = (140, 127, 127)\n else:\n color = (255, 240, 240)\n else:\n continue\n pygame.draw.polygon(self.surface, color, coords)", "def draw(self, screen):", "def background(self):\n sun = graphics.Circle(graphics.Point(200, 310), 50)\n sun.setFill('yellow')\n sun.draw(self.win)\n \n earth = graphics.Circle(graphics.Point(40, 250), 30)\n earth.setFill('blue')\n earth.draw(self.win)\n continent = graphics.Circle(graphics.Point(30, 265), 10)\n continent.setFill('green')\n continent.draw(self.win)\n cont_2 = graphics.Circle(graphics.Point(30, 235), 10)\n cont_2.setFill('green')\n cont_2.draw(self.win)\n cont_3 = graphics.Circle(graphics.Point(55, 245), 10)\n cont_3.setFill('green')\n cont_3.draw(self.win)\n \n stars = graphics.Circle(graphics.Point(250, 250), 5)\n stars.setFill('white')\n stars.draw(self.win)\n star1 = graphics.Circle(graphics.Point(100, 250), 5)\n star1.setFill('white')\n star1.draw(self.win)\n star2 = graphics.Circle(graphics.Point(150, 150), 5)\n star2.setFill('white')\n star2.draw(self.win)\n star3 = graphics.Circle(graphics.Point(50, 100), 5)\n star3.setFill('white')\n star3.draw(self.win)\n star3 = graphics.Circle(graphics.Point(100, 50), 5)\n star3.setFill('white')\n star3.draw(self.win)\n star4 = graphics.Circle(graphics.Point(250, 80), 5)\n star4.setFill('white')\n star4.draw(self.win)\n star4 = graphics.Circle(graphics.Point(200, 60), 5)\n star4.setFill('white')\n star4.draw(self.win)", "def draw():", "def on_draw_over_backgroundimage(self):", "def draw(self, win):\n pygame.draw.rect(win, self.color, self.rect)", "def drawBoard(self):\r\n self.outer.draw(self.surface)\r\n self.background.draw(self.surface)\r\n for point in self.points:\r\n point.draw(self.surface)\r\n point.drawCheckers(self.surface)\r\n self.dice.draw(self.surface)\r\n self.message.draw(self.surface)\r\n self.checkerBox.draw(self.surface)\r\n self.checkerBox.drawCheckers(self.surface)\r\n for bar in self.bar:\r\n bar.draw(self.surface)\r\n bar.drawCheckers(self.surface)\r\n pygame.display.flip()", "def draw(self):\n \n # Draw the background\n self.world.fill(BLUE)\n \n # Draw all the sprite lists that we have\n self.wall_list.draw(self.world)\n self.enemy_list.draw(self.world)\n self.sludge.draw(self.world)\n self.consumeable.draw(self.world)\n self.can_climb.draw(self.world)", "def draw_board(self):\n pygame.draw.rect(background, BLACK, self.outline, 3)\n # Outline is inflated here for future use as a collidebox for the mouse\n self.outline.inflate_ip(20, 20)\n for i in range(self.size-1):\n for j in range(self.size-1):\n rect = pygame.Rect(5+GRID_SIZE+(GRID_SIZE*i), 5+GRID_SIZE+(GRID_SIZE*j), GRID_SIZE, GRID_SIZE)\n pygame.draw.rect(background, COLOR[BLACK], rect, 1)\n if self.size >= 13:\n for i in range(3):\n for j in range(3):\n coords = (5+4*GRID_SIZE+(GRID_SIZE*6*i), 5+4*GRID_SIZE+(GRID_SIZE*6*j))\n pygame.draw.circle(background, COLOR[BLACK], coords, 5, 0)\n screen.blit(background, (0, 0))\n pygame.display.update()", "def draw(self):\n self.screen.fill(WHITE)\n self.color_invalid()\n self.draw_selected()\n self.shade_locked_cells()\n self.draw_grid()\n self.draw_buttons()\n self.draw_numbers()", "def draw(self, screen):\n pg.draw.rect(screen, self.bg_color, self.rect)\n\n for y, surf in enumerate(self.images):\n # Don't blit below the rect area.\n if y * self.font_height + self.font_height > self.rect.h:\n break\n screen.blit(surf, (self.rect.x, self.rect.y+y*self.font_height))", "def draw(self):\n self._background.draw(self.view)\n if self._state == STATE_INACTIVE:\n self._message.draw(self.view)\n if self._state == STATE_COUNTDOWN:\n self._game.draw(self.view)\n self._countdownMessage.draw(self.view)\n self._soundImage.draw(self.view)\n if self._state == STATE_ACTIVE:\n self._game.draw(self.view)\n self._soundImage.draw(self.view)\n if self._state == STATE_PAUSED:\n self._game.draw(self.view)\n self._pausedMessage.draw(self.view)\n if self._state == STATE_RESET:\n self._message.draw(self.view)\n if self._state == STATE_COMPLETE:\n self._game.draw(self.view)\n self._pausedMessage.draw(self.view)", "def __draw_game(self) -> None:\n self.__draw_window()\n self.pipes.draw(self.win)\n self.player.draw(self.win)\n pygame.display.update()", "def draw(self):\n self.screen_surf.fill(BKGD_COLOUR)\n self.all_tiles.draw(self.screen_surf) # Tiles before other sprites.\n self.nests.draw(self.screen_surf) # Nests before chipmunks.\n self.chipmunks.draw(self.screen_surf)\n self.acorns.draw(self.screen_surf)\n self.screen_surf.blit(self.acorn_surf, self.acorn_surf.get_rect())\n self.screen_surf.blit(self.timer_surf, self.timer_rect)", "def draw(self):\r\n self.screen.fill(self.color_bg) \r\n for t in self.thumbs: t.draw(self.screen) \r\n pygame.display.flip()\r\n self.clock.tick(60)", "def draw(self):\r\n\r\n self.screen.fill((0,0,0))\r\n self.sprite_group.draw(self.screen)\r\n pygame.display.flip()", "def __draw(self, screen):\n\n pygame.draw.rect(screen, (200, 255, 200), (self.x, self.y, self.width, self.height))", "def draw(self):\n # IMPLEMENT ME\n \"\"\"\n GRectangle(x=GAME_WIDTH/2,y=GAME_HEIGHT/2,\n width=GAME_WIDTH,height=GAME_HEIGHT,\n fillcolor=introcs.RGB(0,0,0)).draw(self.view)\n if self.getState() == STATE_INACTIVE:\n self.getText().draw(self.view)\n if self.getState() == STATE_PAUSED:\n self.getText().draw(self.view)\n if not self.getWave() is None:\n self.getWave().draw(self.view)\n if self.getState() == STATE_COMPLETE:\n self.getText().draw(self.view)\n if self.getState() == STATE_PAUSED or self.getState() == STATE_ACTIVE or self.getState() == STATE_COMPLETE:\n self.getText().draw(self.view)\n\n GRectangle(x=GAME_WIDTH/2,y=GAME_HEIGHT/2,\n width=GAME_WIDTH,height=GAME_HEIGHT,\n fillcolor=introcs.RGB(0,0,0)).draw(self.view)\"\"\"\n if not self.getText() is None:\n self.getText().draw(self.view)\n if not self.getWave() is None:\n self.getWave().draw(self.view)", "def draw(self):\n self.screen.fill((0,51,102))\n # get the new drawables\n self.drawables = (self.game_model.get_background_drawables()\n + self.game_model.get_plane_drawables()\n + self.game_model.get_bullet_drawables()\n + self.game_model.get_enemy_drawables())\n for d in self.drawables:\n rect = d.get_rect()\n surf = d.get_surface()\n surf.set_colorkey((255,255,255))\n self.screen.blit(surf, rect)", "def _bg_update(self):\n self.screen.fill(colour.BLACK)\n for star in self._stars:\n if star[2] + star[1] > self.s_height:\n star[1] = 0\n else:\n star[1] += star[2]\n self.screen.set_at((star[0], star[1]), colour.WHITE)", "def background_maker():\n background = GRect(window.width, window.height)\n background.filled = True\n background.fill_color = '0xFFFCEC'\n background.color = '0xFFFCEC'\n return background", "def draw_environment():\n rect(screen, LIGHT_GRAY, (0, 0, 800, 450)) # grey sky\n rect(screen, WHITE, (0, 450, 800, 1000)) # white ground", "def draw(self):\n if self.state == 'alive':\n for i in range(len(self.tail)):\n pygame.draw.rect(display, black, (squareToXPix(self.tail[-(i + 1)][0], objectSize), squareToYPix(self.tail[-(i + 1)][1], objectSize), objectSize, objectSize))\n\n pygame.draw.rect(display, black, (squareToXPix(self.x, objectSize), squareToYPix(self.y, objectSize), objectSize, objectSize))\n\n else:\n for i in range(len(self.tail)):\n pygame.draw.rect(display, red, (squareToXPix(self.tail[-(i + 1)][0], objectSize), squareToYPix(self.tail[-(i + 1)][1], objectSize), objectSize, objectSize))\n\n pygame.draw.rect(display, red, (squareToXPix(self.x, objectSize), squareToYPix(self.y, objectSize), objectSize, objectSize))", "def on_draw(self):\n arcade.start_render()\n background()\n self.player_sprite.draw()", "def draw(self):\n self.screen.fill(LIGHT_GRAY)\n\n r = pygame.Rect(self.model.slider.left,\n self.model.slider.top,\n self.model.slider.width,\n self.model.slider.height)\n pygame.draw.rect(self.screen, BLUE, r)\n pygame.display.update()", "def draw(self, window):\n \n window.fill(Colours.BG)\n\n for draw_obj in self.objects.values():\n draw_obj.draw(window)\n\n #Ensure text is drawn on top\n self.objects['introtext'].draw(window)\n self.objects['introtext2'].draw(window)\n self.objects['introtext3'].draw(window)\n \n if self.banner is not None:\n self.banner.draw(window)\n \n pygame.display.flip()", "def draw(self, screen):\n \n # Background drawing code can be put here\n \n \n \n # Draw all the sprite lists that we have\n self.platform_list.draw(screen)\n self.enemy_list.draw(screen)", "def draw(self, screen):\n \n # Draw the background\n screen.fill(CAVE)\n screen.blit(self.background,(self.world_shift // 3,0))\n \n # Draw all the sprite lists that we have\n self.platform_list.draw(screen)\n #self.enemy_list.draw(screen)\n self.enemy_list.draw(screen)", "def on_show(self): \n arcade.set_background_color(arcade.color.BLACK)", "def render(self):\n if self.main_menu.active:\n self.main_menu.draw()\n elif self.game_over.active:\n self.game_over.draw()\n else:\n self.screen.fill(BACKGROUND_COLOR)\n\n # Score\n score_surf = self.score.render(\n \"Score {} \".format(self.GAME_SCORE), True, (255, 255, 255)\n )\n self.screen.blit(score_surf, (self.screen_rect.width - 100, 5))\n\n self.timer.draw(self.screen)\n self.board.draw(self.screen)\n\n pg.display.update()", "def draw_kame(self):\r\n #pygame.draw.rect(self.screen, self.color, self.rect, self.image)\r\n self.screen.blit(self.image, self.rect)", "def game_draw(self):\n pass", "def draw(self):\r\n self.scr.fill(SCREEN_COLOR)\r\n self.label.draw()\r\n pygame.display.flip()", "def draw(self, screen):\n # Draw the backgrounds.\n self.background1.draw(screen)\n self.background2.draw(screen)\n\n # Update the girder manager.\n self.girderManager.draw(screen)\n\n # Draw the player.\n self.player.draw(screen)\n\n if self.gameEnd:\n # Draw the end game box.\n screen.blit(self.endGame, (110, 84))\n # Draw the score.\n screen.blit(self.displayScore, (265, 180))\n # Draw the buttons.\n self.retryButton.draw(screen)\n self.exitButton.draw(screen)\n # Draw the buttons text.\n screen.blit(self.retryText, (210, 240))\n screen.blit(self.exitText, (210, 300))\n else:\n # Draw the score.\n screen.blit(self.displayScore, (10, 10))", "def draw(self):\n self.screen.blit(self.bg_img4, (0,0))\n self.all_sprites.draw(self.screen)\n self.final_score = \"{0}\".format(self.wall.score)\n msg = self.font.render(\"Lives: {0}\".format(self.paddle.lives),1,pygame.Color(\"white\"))\n self.screen.blit(msg,(15,15))\n score = self.font.render(\"Score:\" + self.final_score, 1,pygame.Color(\"white\"))\n self.screen.blit(score,(Constant.screen_width - 150 ,15))", "def draw_background(imname, width, height):\n \n # load background image (should be .bmp) to OpenGL texture\n bg_image = pygame.image.load(imname).convert()\n bg_data = pygame.image.tostring(bg_image,\"RGBX\",1)\n \n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n \n # bind the texture\n glEnable(GL_TEXTURE_2D)\n glBindTexture(GL_TEXTURE_2D,glGenTextures(1))\n glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA,width,height,0,GL_RGBA,GL_UNSIGNED_BYTE,bg_data)\n glTexParameterf(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_NEAREST)\n glTexParameterf(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_NEAREST)\n \n # create quad to fill the whole window\n glBegin(GL_QUADS)\n glTexCoord2f(0.0,0.0); glVertex3f(-1.0,-1.0,-1.0)\n glTexCoord2f(1.0,0.0); glVertex3f( 1.0,-1.0,-1.0)\n glTexCoord2f(1.0,1.0); glVertex3f( 1.0, 1.0,-1.0)\n glTexCoord2f(0.0,1.0); glVertex3f(-1.0, 1.0,-1.0)\n glEnd()\n \n # clear the texture\n glDeleteTextures(1)", "def draw_board(self):\n self.window.fill(Colors.WHITE.value)\n self.draw_lines()\n self.draw_obstacles()", "def display_screen(self):\n self.screen.blit(self.bg, (0, 0))\n pygame.display.update()", "def draw(self):\n self.screen.fill(Color.BLACK)\n self.screen.blit(self.red_block, self.apple)\n [self.screen.blit(self.green_block, xy) for xy in self.snake]\n self.screen.blit(self.white_bar, (0, 0))\n self.draw_text(str(self.score), self.score_pos, size=32)\n pygame.display.flip()", "def draw_ground():\n for i in range(3):\n groundturtle.forward(1450)\n groundturtle.left(90)\n groundturtle.forward(25)\n groundturtle.left(90)\n groundturtle.forward(1450)\n groundturtle.right(90)\n groundturtle.forward(25)\n groundturtle.right(90)", "def draw(self):\n \n # First the background image\n self.screen.blit(self.bg_img, self.bg_img.get_rect())\n\n \"\"\"rect = pygame.Rect(0, 0, self.width, \n 2*self.pad+self.mode_imgs[0].get_height())\n surf = pygame.Surface((rect.width, rect.height), pygame.SRCALPHA)\n pygame.draw.rect(surf, (75, 75, 75, 75), rect)\n self.screen.blit(surf, surf.get_rect())\"\"\"\n\n # Then all the bubbles\n for b in self.bubbles:\n # draw this bubble\n rect = b.image.get_rect()\n rect = rect.move(int(b.position[0]), int(b.position[1]))\n self.screen.blit(b.image, rect)\n c = [int(x) for x in b.center()]\n\n # Then the mode\n self.screen.blit(self.mode_imgs[self.mode], self.get_mode_rect())\n\n # Then the bubble count\n self.screen.blit(self.bubble_imgs[len(self.bubbles)-1],\n self.get_bubbles_rect())\n\n # Finally, draw the announcement\n if self.state == Game.ANNOUNCE_STATE:\n letter = self.bubbles[self.target].letter.upper()\n text = self.announce_font.render(letter, True, Game.WHITE)\n rect = text.get_rect()\n rect = rect.move((self.width-rect.width)//2,\n (self.height-rect.height)//2)\n self.screen.blit(text, rect)\n elif self.state == Game.PLAY_STATE:\n letter = self.bubbles[self.target].letter.upper()\n text = self.reminder_font.render(letter, True, Game.WHITE)\n rect = text.get_rect()\n rect = rect.move(self.pad, self.pad)\n self.screen.blit(text, rect)", "def draw(self):\n self.win.fill(BLACK)\n title1 = self.messenger.text_format(\"La casa\", self.font, 58, WHITE)\n title2 = self.messenger.text_format(\"de\", self.font, 48, WHITE)\n title3 = self.messenger.text_format(\"Marcelo\", self.font, 58, WHITE)\n\n title_rect1 = title1.get_rect()\n title_rect2 = title2.get_rect()\n title_rect3 = title3.get_rect()\n\n self.win.blit(title1, (WIDTH / 3.4 - (title_rect1[2] / 2), 90))\n pygame.draw.rect(self.win, RED, (238, 92, 45, 45))\n self.win.blit(title2, (WIDTH / 2 - (title_rect2[2] / 2), 95))\n self.win.blit(title3, (WIDTH / 1.4 - (title_rect3[2] / 2), 90))\n self.draw_menu(WHITE, WHITE)", "def draw(self, screen):\n\n # Draw the background\n screen.fill(constants.WHITE)\n if self.theme == 'dirt':\n screen.blit(graphics.TILEDICT['dirt block wall'], graphics.TILEDICT['dirt block wall'].get_rect())\n elif self.theme == 'snow':\n screen.blit(graphics.TILEDICT['ice block wall'], graphics.TILEDICT['ice block wall'].get_rect())\n elif self.theme == 'castle':\n screen.blit(graphics.TILEDICT['castle wall'], graphics.TILEDICT['castle wall'].get_rect())\n\n # Draw all the sprite lists that we have\n self.platform_list.draw(screen)\n self.exit_sprite.draw(screen)\n self.bagGroup.draw(screen)", "def main():\n\n # Init pygame\n pygame.init()\n screen = pygame.display.set_mode((500, 310))\n pygame.display.set_caption(\"Black Jack by Hackiflette\")\n\n # Load background image\n bgd_tile = load_image(\"background_menu.png\")\n background = pygame.Surface((500, 310))\n background.blit(bgd_tile, (0, 0))\n\n # Prepare text\n title_font = pygame.font.Font(None, 36)\n text = title_font.render(\"Black Jack Project\", 2, (255, 255, 255))\n\n # Display on windows\n screen.blit(background, (0, 0))\n screen.blit(text, (80, 30))\n pygame.display.flip()\n\n # Init sprites\n all_sprites = pygame.sprite.RenderUpdates()\n clock = pygame.time.Clock()\n\n play = True\n while play:\n\n # Clear all the sprites\n all_sprites.clear(screen, bgd_tile)\n all_sprites.update()\n\n # Check for events\n for event in pygame.event.get():\n if event.type == QUIT:\n play = False\n\n # Update the scene\n dirty = all_sprites.draw(screen)\n pygame.display.update(dirty)\n\n clock.tick(40)\n\n pygame.quit()", "def draw(self):\n if pygame.time.get_ticks() - self.start_time < const.LEVEL_WAITING: \n \n self.screen.fill(const.BGCOLOR)\n self.draw_text(self.name, 48, const.WHITE, const.SCREEN_WIDTH / 2, const.SCREEN_HEIGHT / 4)\n \n \n else:\n # Draw the background\n self.screen.fill(const.BGCOLOR)\n\n # Draw all the sprite lists that we have\n self.active_sprite.draw(self.screen)\n if self.player.lost_life == False:\n self.screen.blit(self.player.image, self.player.rect)", "def render(self, game):\n pygame.draw.rect(game.screen,\n self.colour,\n (int(self.x), int(self.y), self.a, self.b))", "def draw(self):\n\t\tself.screen.fill(pygame.Color('black'))\n\t\tfor column in self.model.blocks:\n\t\t\tfor block in column:\n\t\t\t\tr = pygame.Rect(block.left,\n\t\t\t\t\t\t\t\tblock.top,\n\t\t\t\t\t\t\t\tblock.size,\n\t\t\t\t\t\t\t\tblock.size)\n\t\t\t\tpygame.draw.rect(self.screen, block.color,r)\n\t\tpygame.display.update()", "def renderall(self):\n\n if not self.isinitialized:\n return\n # clear display\n self.screen.fill(BGCOLOR)\n # draw the board\n self.drawBoard()\n # flip the display to show whatever we drew\n pygame.display.flip()", "def on_draw(self):\n arcade.start_render()\n arcade.draw_lrwh_rectangle_textured(0, 0,\n constants.SCREEN_WIDTH * 1, constants.SCREEN_HEIGHT * 1,\n self.background, alpha=50)", "def draw_me(self):\r\n\t\tself.image.fill((100, 200, 100))\r\n\t\tif self.active: pg.draw.rect(self.image, (100, 100, 200), self.frame, 3) #if active => draw frame around selected entity width 3\r\n\t\tself.display_surface.blit(self.image, self.rect)", "def draw(self):\n\n self.squares.draw(self.screen)\n if not self.hide_grid:\n self.draw_grid()\n self.fleas.draw(self.screen)\n pygame.display.flip()", "def draw(self):\n if self.master != None :\n fill = Cell.FILLED_COLOR_BG\n outline = Cell.FILLED_COLOR_BORDER\n\n if not self.fill:\n fill = Cell.EMPTY_COLOR_BG\n outline = Cell.EMPTY_COLOR_BORDER\n walls[self.ord][self.abs] = 0\n else:\n walls[self.ord][self.abs] = 1\n\n\n xmin = self.abs * self.size\n xmax = xmin + self.size\n ymin = self.ord * self.size\n ymax = ymin + self.size\n self.master.create_rectangle(xmin, ymin, xmax, ymax, fill = fill, outline = outline)", "def Render(self, background):\r\n ui_back = rpg_image.Load('data/ui/board_500.png')\r\n rpg_gfx.Draw(ui_back, background, (70,55))\r\n \r\n ui_back_slot = rpg_image.Load('data/ui/board_500_slot_61.png')\r\n ui_back_slot_small = rpg_image.Load('data/ui/board_500_slot_35.png')\r\n \r\n # Print option background borders\r\n if len(self.options) >= 1:\r\n rpg_gfx.Draw(ui_back_slot, background, (70+30,55+75))\r\n if len(self.options) >= 2:\r\n rpg_gfx.Draw(ui_back_slot, background, (70+30,55+145))\r\n if len(self.options) >= 3:\r\n rpg_gfx.Draw(ui_back_slot, background, (70+30,55+215))\r\n if len(self.options) >= 4:\r\n rpg_gfx.Draw(ui_back_slot, background, (70+30,55+285))\r\n \r\n # Draw the Prompt\r\n rpg_image.DrawText(self.dialogue.prompt, 30, (255, 255, 255), (100,80),\r\n background, outline=1)\r\n \r\n # Print all the option texts\r\n for count in range(0, len(self.options)):\r\n #TODO(g): Make the show_always data option check and force this option\r\n # to be the last option, if we have it set to be on\r\n \r\n # Only drawing 4 now\r\n if count >= 4:\r\n break\r\n \r\n option = self.options[count]\r\n \r\n x = 70+30 + 15\r\n y = 55+75 + (70 * count) + 15\r\n \r\n # If this option is selected, highlight it\r\n if self.selected_option == count:\r\n color = (255, 0, 0)\r\n else:\r\n color = (255, 255, 255)\r\n \r\n rpg_image.DrawText(option['text'], 20, color, (x,y), background,\r\n outline=1)", "def draw(self):\n\n State.screen.draw()", "def draw(self):\n self.screen.blit(self.image, self.rect)", "def draw_board(self):\n for i in range(0, 800, 80):\n if i == 80:\n pygame.draw.line(self.screen, 'black', (i, 80), (i, 800), width=3)\n pygame.draw.line(self.screen, (0, 0, 128), (0, i), (720, i), width=5)\n continue\n pygame.draw.line(self.screen, 'black', (i, 80), (i, 800), width=3)\n pygame.draw.line(self.screen, 'black', (0, i), (720, i), width=3)\n for j in range(240, 800, 240):\n pygame.draw.line(self.screen, (0, 0, 128), (j, 80), (j, 800), width=5)\n pygame.draw.line(self.screen, (0, 0, 128), (0, j + 80), (720, j + 80), width=5)\n pygame.draw.line(self.screen, (0, 0, 128), (0, 80), (0, 800), width=5)", "def draw(canvas):\n\n # draw the background graphic\n canvas.draw_image(background, (150, 150), (300, 300), (150, 150), (300, 300))\n\n # draw the stopwatch text\n canvas.draw_text(time, [centre_x(time, time_font_size, font_face_all), 143], time_font_size, colour_time, font_face_all)\n\n # draw the score as 'correct guesses / total guesses'\n canvas.draw_text(score_total, [200, 40], score_message_font_size, colour_score, font_face_all)\n\n # draw the score message\n if message_displayed:\n # modulus loops all the colours in rainbow\n # colour_count incremented in message_colour_tick\n rainbow_colour = colour_count % len(rainbow)\n canvas.draw_text(event_message, [message_x, 70], message_event_font_size, rainbow[rainbow_colour], font_face_all)\n\n # draw the animated stars (two white dots)\n canvas.draw_text(\".\", [star_x, 86], 12, colour_stars, font_face_all)\n canvas.draw_text(\".\", [star_x, 165], 12, colour_stars, font_face_all)\n\n # draw the achievement badges\n if my_score >= 5:\n canvas.draw_image(cheevo5, (10, 10), (21, 20), (12, 31), (21, 20))\n if my_score >= 10:\n canvas.draw_image(cheevo10, (10, 10), (21, 20), (35, 31), (21, 20))\n if my_score >= 25:\n canvas.draw_image(cheevo25, (10, 10), (21, 20), (58, 31), (21, 20))\n if my_score >= 50:\n canvas.draw_image(cheevo50, (10, 10), (21, 20), (81, 31), (21, 20))\n if my_score >= 100:\n canvas.draw_image(cheevo100, (10, 10), (21, 20), (104, 31), (21, 20))\n\n # draw the score streak counters\n canvas.draw_text(\"Current Streak:\", [30, 180], score_streak_font_size, colour_streak, font_face_all)\n canvas.draw_text(str(current_streak), [125, 180], score_streak_font_size, colour_streak, font_face_all)\n canvas.draw_text(\"Best Streak:\", [170, 180], score_streak_font_size, colour_streak, font_face_all)\n canvas.draw_text(str(best_streak), [247, 180], score_streak_font_size, colour_streak, font_face_all)", "def draw(self):\n self.bufferX = (self.appWidth/2) - self.viewX\n self.bufferY = (self.appHeight/2) - self.viewY\n anwp.sl.engine.clear()\n anwp.sl.engine.drawImage(0, 0, self.appWidth, self.appHeight, self.backgroundImage)\n self.drawWarpLines()\n \n # render engine\n anwp.sl.engine.render()\n self.drawSystemInfo()\n self.drawWarpGateInfo()\n self.drawWarpTradeInfo()", "def draw(self):\n arcade.draw_rectangle_filled(self.center.x,\n self.center.y,\n self.width,\n self.height,\n arcade.color.WHITE)", "def draw_grid():\r\n screen.fill((0,0,0))\r\n pygame.draw.line(screen, (255,255,255),(WIDTH/3,0),(WIDTH/3,HEIGHT))\r\n pygame.draw.line(screen, (255,255,255),(2*WIDTH/3,0),(2*WIDTH/3,HEIGHT))\r\n pygame.draw.line(screen, (255,255,255),(0,HEIGHT/3),(WIDTH,HEIGHT/3))\r\n pygame.draw.line(screen, (255,255,255),(0,2*HEIGHT/3),(WIDTH,2*HEIGHT/3))", "def draw_pavement():\n\n roberto.penup()\n roberto.goto(-345, -100)\n roberto.pendown()\n roberto.begin_fill()\n for i in range(4): # this loop draws a big black rectangle that is positioned at the bottom part of the screen\n roberto.forward(684)\n roberto.right(90)\n roberto.end_fill()", "def drawScreen(screen):\n screen.fill(BLACK) # Fill the screen with black.\n \n\n # Flip the display so that the things we drew actually show up.\n pygame.display.flip()", "def draw_cover():\n \n game_cover = games[selected_game].cover\n \n # Draw game cover art\n for i in range(len(game_cover)):\n for j in range(len(game_cover[0])):\n led_x = i + 1\n led_y = j + 2\n lp.led_ctrl_xy(led_x, led_y, *game_cover[i][j])", "def draw(self):\n self.screen.fill(App.current_scene.fill_color)\n\n # To turn on debug mode on pure simulation underneath\n # uncomment line below and comment one after\n # if isinstance(App.current_scene, GameScene):\n # App.current_scene.object_mgr.space.debug_draw(self.draw_options)\n\n App.current_scene.draw(self.screen)\n\n pygame.display.flip()", "def __draw_window(self) -> None:\n # draw background\n self.__draw_background()\n \n # draw highscore\n highscore_text = HIGHSCORE_FONT.render(\"Highscore: \" + str(round(self.highscore)), 1, RED)\n self.win.blit(highscore_text, ((10,10)))", "def TileBackground(self, dc):\r\n\r\n sz = self.GetClientSize()\r\n w = self._backgroundImage.GetWidth()\r\n h = self._backgroundImage.GetHeight()\r\n\r\n x = 0\r\n\r\n while x < sz.width:\r\n y = 0\r\n\r\n while y < sz.height:\r\n dc.DrawBitmap(self._backgroundImage, x, y, True)\r\n y = y + h\r\n\r\n x = x + w", "def draw(self, background, image_path):\n self.image = pygame.image.load(image_path).convert_alpha()\n background.blit(self.image, (self.x_pos, self.y_pos))", "def draw_sky(self):\r\n win.blit(self.sky, (0, 0))", "def draw_a50(self):\r\n\t\tpg.draw.rect(self.image, (100, 200, 100), self.rect)\r\n\t\r\n\t\t#self.display_surface.blit(self.image, self.rect)\r", "def render(self, screen):\n # print(\"Drawing scene {}\".format(self.imgname))\n screen.fill(self.color)" ]
[ "0.8316609", "0.82724404", "0.82724404", "0.8187887", "0.808807", "0.80437565", "0.8034377", "0.7890868", "0.7850961", "0.7809966", "0.7805984", "0.77891445", "0.7769102", "0.7744283", "0.76951754", "0.76865375", "0.75081706", "0.7474712", "0.74424314", "0.7403836", "0.7385327", "0.73766756", "0.7351175", "0.732576", "0.73091716", "0.72965795", "0.7296339", "0.7256934", "0.7240025", "0.7202386", "0.7147581", "0.7120395", "0.71097374", "0.70998234", "0.7081022", "0.70543325", "0.7049706", "0.7043737", "0.7005861", "0.70036244", "0.6990628", "0.6974642", "0.6957515", "0.6947874", "0.69459933", "0.69344217", "0.693047", "0.69284946", "0.6912459", "0.68977845", "0.6882665", "0.68700284", "0.686251", "0.6861989", "0.68554544", "0.6845556", "0.68455046", "0.68421835", "0.6841729", "0.68385124", "0.6830758", "0.6814934", "0.6800835", "0.6798483", "0.6796561", "0.6784821", "0.6775115", "0.676208", "0.67454225", "0.67450356", "0.6731429", "0.67286354", "0.672635", "0.6706605", "0.6706482", "0.6703202", "0.67014843", "0.66936773", "0.66748685", "0.66626686", "0.6657261", "0.6643423", "0.6643306", "0.6638028", "0.6617892", "0.66006744", "0.65975034", "0.658345", "0.65682065", "0.65664524", "0.65649664", "0.6563801", "0.6562864", "0.65605974", "0.6552513", "0.654643", "0.6545318", "0.6543653", "0.65426826", "0.6539001" ]
0.7090386
34
This Function Calculates distance between Food and Snake
def Distance(foodx,foody): di = ((snake.x - foodx)**2) + ((snake.y - foody)**2) d = int(math.sqrt(di)) return d
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def measure_distance(self):\n # set Trigger to HIGH\n GPIO.output(self.GPIO_TRIGGER, True)\n\n # set Trigger after 0.01ms to LOW\n time.sleep(0.00001)\n GPIO.output(self.GPIO_TRIGGER, False)\n\n start_time = time.time()\n stop_time = time.time()\n\n # save StartTime\n while GPIO.input(self.GPIO_ECHO) == 0:\n start_time = time.time()\n\n # save time of arrival\n while GPIO.input(self.GPIO_ECHO) == 1:\n stop_time = time.time()\n\n # time difference between start and arrival\n time_elapsed = stop_time - start_time\n # multiply with the sonic speed (343.00 m/s)\n # and divide by 2, because there and back\n distance = (time_elapsed * 343.00) / 2\n\n return distance", "def calculate_distance(asteroid_1: Asteroid, asteroid_2: Asteroid) -> float:\n dy = asteroid_2.y - asteroid_1.y\n dx = asteroid_2.x - asteroid_1.x\n return math.sqrt(dy * dy + dx * dx)", "def distance(self) -> int:\n return 0", "def distance_between_wheels():", "def distance(self, first_tape, second_tape):\n pairs = zip(first_tape, second_tape)\n return math.sqrt(abs(sum(map((lambda n: self.subsq(*n)), pairs))))", "def distance(self) -> float:\n return self._dist_two_wire() # at this time we only support 2-wire meausre", "def calc_distance(first: Waypoint, second: Waypoint) -> int:\n return int(distance.vincenty(first.coords(), second.coords()).m)", "def get_distance(self):\n print(\"voici la distance à l'obstacle\")", "def distances(self):", "def manhatam_distance(self) -> int:\n raise NotImplementedError", "def get_distance(self) -> int:\n return self.get_measurement_data().distance", "def distance(self):\n _, _, costs = self.calculate_costs()\n return np.sum(costs)", "def _get_distance_betweenitems(self, item_no1, item_no2):\n\n try:\n if item_no1 >= 0 and item_no2 >= 0:\n loc_current = self.page_current.item_onscreenlocs[item_no1]\n loc_potential = self.page_current.item_onscreenlocs[item_no2]\n distance = abs(loc_potential - loc_current)\n else:\n distance = 0\n\n except IndexError:\n distance = 0\n\n return distance", "def calc_distance(self, observation):\n actual_obs = observation[0]\n scrn_player = actual_obs.observation.feature_screen.player_relative\n scrn_select = actual_obs.observation.feature_screen.selected\n scrn_density = actual_obs.observation.feature_screen.unit_density\n\n state_added = scrn_select + scrn_density\n\n marine_center = np.mean(self.xy_locs(scrn_player == 1), axis=0).round()\n\n # first step\n if np.sum(scrn_select) == 0:\n marine_center = np.mean(self.xy_locs(scrn_player == 1), axis=0).round()\n # marine behind beacon\n if isinstance(marine_center, float):\n marine_center = np.mean(self.xy_locs(state_added == 2), axis=0).round()\n else:\n # normal navigation\n marine_center = np.mean(self.xy_locs(state_added == 2), axis=0).round()\n if isinstance(marine_center, float):\n marine_center = np.mean(self.xy_locs(state_added == 3), axis=0).round()\n\n beacon_center = np.mean(self.xy_locs(scrn_player == 3), axis=0).round()\n #\n # print(state_added)\n # print(\"---- Marine {} | {} Beacon ----\".format(marine_center, beacon_center))\n # time.sleep(0.2)\n distance = math.hypot(beacon_center[0] - marine_center[0],\n beacon_center[1] - marine_center[1])\n\n return beacon_center, marine_center, distance", "def _calculate_distance(self, passenger, driver):\n londriver, latdriver = driver['lon'], driver['lat']\n lonpassenger, latpassenger = passenger['lon'], passenger['lat']\n lon_p, lat_p, lon_d, lat_d = map(radians,\n [float(lonpassenger), float(latpassenger), float(londriver), float(latdriver)])\n lon_distance = lon_d - lon_p\n lat_distance = lat_d - lat_p\n a = sin(lat_distance / 2) ** 2 + cos(lat_p) * cos(lat_d) * sin(lon_distance / 2) ** 2\n c = 2 * asin(sqrt(a))\n km = 6367 * c\n return km", "def _calculate_distance(self, passenger, driver):\n londriver, latdriver = driver['lon'], driver['lat']\n lonpassenger, latpassenger = passenger['lon'], passenger['lat']\n lon_p, lat_p, lon_d, lat_d = map(radians,\n [float(lonpassenger), float(latpassenger), float(londriver), float(latdriver)])\n lon_distance = lon_d - lon_p\n lat_distance = lat_d - lat_p\n a = sin(lat_distance / 2) ** 2 + cos(lat_p) * cos(lat_d) * sin(lon_distance / 2) ** 2\n c = 2 * asin(sqrt(a))\n km = 6367 * c\n return km", "def calculate_distance(atom1,atom2): #dot string to show when you go into the help doc of this function\n x_distance = atom1[0]-atom2[0]\n y_distance = atom1[1]-atom2[1]\n z_distance = atom1[2]-atom2[2]\n distance = numpy.sqrt(x_distance**2+ y_distance**2+z_distance**2)\n return distance", "def distance():\n return str(us.get_distance())", "def manhatam_distance(self) -> int:\n return abs(self.north) + abs(self.east)", "def distance(self, keyOne, keyTwo):", "def distance(self, other_pt, is_lla=True):\n return 0.0", "def compute_distance(self):\n loc = np.extend_dims(self.state[:, :, Boids.Attr.LOC], axis=-1)\n m = np.tile(loc, (1, 1, self.num_boids))\n pos_diff = m-m.transpose(0, 2, 1)\n self.distance = np.linalg.norm(pos_diff, axis=0)", "def get_distance(self, star):\n if self.centroid == star.centroid:\n print(\"distance for same star\")\n return 0\n\n unitary_a = self.get_unitary_vector()\n unitary_b = star.get_unitary_vector()\n dab = math.degrees(math.acos(unitary_a[0] * unitary_b[0] +\n unitary_a[1] * unitary_b[1] +\n unitary_a[2] * unitary_b[2]))\n return dab", "def manhatam_distance(self) -> int:\n return abs(self.position[0]) + abs(self.position[1])", "def get_distance(self, star):\n if self == star:\n return 0\n\n a_car = self.get_cartesian_coords()\n b_car = star.get_cartesian_coords()\n dab = math.degrees(math.acos(a_car[0] * b_car[0] +\n a_car[1] * b_car[1] +\n a_car[2] * b_car[2]))\n return dab", "async def distance(self):\n return round(await self._rpc.distance(), 2)", "def getDistance(pos1, pos2):\r\n return ((pos1[0] - pos2[0]) ** 2 + (pos1[1] - pos2[1]) ** 2) ** 0.5", "def calcDistance(self, left, right):\n\n return math.fabs(right-left)", "def getDistance(self):\n return sqrt(self.state[0] * self.state[0] + self.state[2] * self.state[2])", "def get_distance(route, dists):\n cost = 0\n if route[0] != route[-1]:\n route.append(route[0])\n\n for i in range(len(route)-1):\n cost += dists[route[i], route[i+1]]\n # cost += dists[route[-1], route[0]]\n return cost", "def calc_cost(self, dx, dy):\n self.distance+=np.sqrt(dx**2+dy**2)", "def location_of_stops(self, choice, distance):\r\n avg_dist = 0\r\n min_dist = 1000\r\n max_dist = 0\r\n\r\n if choice == 1:\r\n #for dist_ in distance:\r\n # if int(dist_) < min_dist:\r\n # min_dist = dist_\r\n return min(distance)\r\n elif choice == 2:\r\n for dist_ in distance:\r\n if int(dist_) > max_dist:\r\n max_dist = dist_\r\n return max_dist\r\n elif choice == 3:\r\n for dist_ in distance:\r\n avg_dist += int(dist_)\r\n return avg_dist/9\r\n else:\r\n print \"Wrong Choice\"\r\n return None", "def test_distance_aed(self):\n railroad = trains.Railroad()\n self.assertEqual(railroad.distance('AED'), 'NO SUCH ROUTE')", "def _get_distance(a, b):\n return np.sqrt(np.sum((a - b) ** 2))", "def __get_distance(self, game_object):\n obj_x, obj_y = game_object.get_coordinates()\n self_x, self_y = self._coordinates\n\n inner = (obj_x-self_x)**2 + (obj_y-self_y)**2\n return math.sqrt(inner)", "def measure_distance(cell1, cell2):\n\n\tx1, y1 = cell1.location\n\tx2, y2 = cell2.location\n\tx_dist = abs(x1-x2)\n\ty_dist = abs(y1-y2)\n\n\tif x_dist > 5:\n\t\tx_dist = 10-x_dist\n\tif y_dist > 5:\n\t\ty_dist = 10-y_dist\n\n\treturn (x_dist**2 + y_dist**2)**.5", "def evaluate_distance(self):\n\n fitness = 0\n routes = split_to_routes(self)\n\n for route in routes:\n route = [home] + route + [home]\n for i in range(1,len(route)):\n # Calculates full distance, including from last city\n # to first, to terminate the trip\n pos_from = route[i - 1]\n pos_to = route[i]\n distance = dm[pos_from][pos_to]\n fitness += distance\n\n return int(fitness)", "def route_distance(self, route):\n\n dist = 0\n src = route[0]\n\n if src not in self.G:\n # don't return two diff types/meanings, throw exception instead. same below\n # TODO best impl?\n raise SGraph.NoSuchRoute('NO SUCH ROUTE')\n\n for city in route[1:]:\n if city not in self.G[src]:\n raise SGraph.NoSuchRoute('NO SUCH ROUTE')\n dist += self.G[src][city]\n src = city\n\n return dist", "def distance(self, coord1, coord2):\n return (abs(coord1.x - coord2.x) + abs(coord1.y - coord2.y) + abs(coord1.z - coord2.z))//2", "def getDistance(self):\n taBox = (self.thor * self.tvert)/(720*960) #box area as percentage of whole\n if(taBox==None or taBox<=0): return -1\n const = 4 * math.tan(0.471)*math.tan(0.3576)\n return math.sqrt((self.abox)/(const*taBox))", "def get_distance(start, end):\n\n\t\tloc_start, loc_end, dst_node = create_distance(start, end)\n\t\tdistance = cmds.getAttr(\"%s.distance\" % dst_node)\n\n\t\tcmds.delete([loc_start, loc_end, dst_node])\n\n\t\treturn distance", "def test_distance_aebcd(self):\n railroad = trains.Railroad()\n self.assertEqual(railroad.distance('AEBCD'), '22')", "def get_distance(self, anchor_in, pos_in):\n feed_dict = {self.anchor: anchor_in, self.pos: pos_in}\n distances = - self.sess.run(self.score_anchor_pos, feed_dict=feed_dict)\n return distances", "def test_distance(self):\n\n def f(a, b):\n if a == b:\n return 0\n if (a in \"UC\" and b in \"UC\") or (a in \"AG\" and b in \"AG\"):\n return 1\n else:\n return 10\n\n # uses identity function by default\n self.assertEqual(self.RNA(\"UGCUGCUC\").distance(\"\"), 0)\n self.assertEqual(self.RNA(\"UGCUGCUC\").distance(\"U\"), 0)\n self.assertEqual(self.RNA(\"UGCUGCUC\").distance(\"UCCCCCUC\"), 3)\n # case-sensitive!\n self.assertEqual(self.RNA(\"AAAAA\").distance(\"CCCCC\"), 5)\n # should use function if supplied\n self.assertEqual(self.RNA(\"UGCUGCUC\").distance(\"\", f), 0)\n self.assertEqual(self.RNA(\"UGCUGCUC\").distance(\"U\", f), 0)\n self.assertEqual(self.RNA(\"UGCUGCUC\").distance(\"C\", f), 1)\n self.assertEqual(self.RNA(\"UGCUGCUC\").distance(\"G\", f), 10)\n self.assertEqual(self.RNA(\"UGCUGCUC\").distance(\"UCCCCCUC\", f), 21)\n # case-sensitive!\n self.assertEqual(self.RNA(\"AAAAA\").distance(\"CCCCC\", f), 50)", "def _get_distance(reindeer, race_time):\n interval = reindeer.flight_time + reindeer.rest_time\n cycles = race_time // interval\n flight_time = min(reindeer.flight_time, race_time - interval * cycles)\n total_flying_time = reindeer.flight_time * cycles + flight_time\n return total_flying_time * reindeer.flight_speed", "def closestFood(pos, food, walls):\n fringe = [(pos[0], pos[1], 0)]\n expanded = set()\n while fringe:\n pos_x, pos_y, dist = fringe.pop(0)\n if (pos_x, pos_y) in expanded:\n continue\n expanded.add((pos_x, pos_y))\n # if we find a food at this location then exit\n if food[pos_x][pos_y]:\n return dist\n # otherwise spread out from the location to its neighbours\n nbrs = Actions.getLegalNeighbors((pos_x, pos_y), walls)\n for nbr_x, nbr_y in nbrs:\n fringe.append((nbr_x, nbr_y, dist+1))\n # no food found\n return None", "def closestFood(pos, food, walls):\n fringe = [(pos[0], pos[1], 0)]\n expanded = set()\n while fringe:\n pos_x, pos_y, dist = fringe.pop(0)\n if (pos_x, pos_y) in expanded:\n continue\n expanded.add((pos_x, pos_y))\n # if we find a food at this location then exit\n if food[pos_x][pos_y]:\n return dist\n # otherwise spread out from the location to its neighbours\n nbrs = Actions.getLegalNeighbors((pos_x, pos_y), walls)\n for nbr_x, nbr_y in nbrs:\n fringe.append((nbr_x, nbr_y, dist+1))\n # no food found\n return None", "def distance_traveled():\n user_distance = raw_input(\"How far is the ship going? Include unit label (m, km, au, or mi). \")\n distance = \"\"\n unit = \"\"\n for ch in user_distance:\n if ch in [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"]:\n distance = distance + str(ch)\n elif ch == \".\":\n distance = distance + \".\"\n elif ch == \",\":\n distance = distance + \".\"\n else:\n unit = unit + str(ch)\n unit = unit.strip(\" \")\n if distance[-1] == \".\":\n distance = distance.strip(\".\")\n while True:\n try:\n float(distance)\n except ValueError:\n distance = raw_input(\"Invalid input. Please enter a positive number with no other non-decimal characters. \")\n continue\n else:\n break\n while float(distance) <= 0:\n distance = raw_input(\"Invalid input. Please enter a positive number. \")\n try:\n float(distance)\n except ValueError:\n distance = raw_input(\"Invalid input. Please enter a positive number with no other non-decimal characters. \")\n continue\n else:\n break\n distance = float(distance)\n unit = unit.lower()\n expected_units_m = [\"m\", \"meters\"]\n expected_units_km = [\"km\", \"kilometers\"]\n expected_units_mi = [\"mi\", \"miles\"]\n expected_units_au = [\"au\", \"ua\", \"astronomical units\", \"astronomical unit\", \"astronomic units\", \"astronomic unit\"]\n while unit not in expected_units_m and unit not in expected_units_km and unit not in expected_units_au and unit \\\n not in expected_units_mi:\n unit = raw_input(\"Unexpected unit type or label. Valid units are meters (m), kilometers (km), \"\n \"astronomical units (au), or miles (mi). \")\n if unit in expected_units_m:\n distance = distance\n elif unit in expected_units_km:\n distance = distance * 1000\n elif unit in expected_units_au:\n distance = distance * 149597870700\n elif unit in expected_units_mi:\n distance = round((distance * 1609.344), 0)\n return distance", "def distance(self,pt1,pt2):\n #productive #frequent\n if frequent: profprint()\n d = ( ( float(pt1[0]) - float(pt2[0]) )**2 + ( float(pt1[1]) - float(pt2[1]) )**2 + ( float(pt1[2]) - float(pt2[2]) )**2 )**0.5\n return d", "def __calculate_average_distance(self):\n game = self.__game # type: Game\n all_icebergs = game.get_all_icebergs()\n all_icebergs_length = len(all_icebergs)\n sum_distances = 0\n for i in range(all_icebergs_length):\n for j in range(i + 1, all_icebergs_length):\n iceberg1 = all_icebergs[i]\n iceberg2 = all_icebergs[j]\n sum_distances += iceberg1.get_turns_till_arrival(iceberg2)\n\n return sum_distances / (all_icebergs_length * (all_icebergs_length - 1) / 2)", "def _get_distance_diff(self, input):\n nbatch = input.shape[0]\n in1 = input.unsqueeze(1).expand(\n nbatch, self.nelec, self.nelec, self.ndim)\n in2 = input.unsqueeze(2).expand(\n nbatch, self.nelec, self.nelec, self.ndim)\n dist = torch.pow(in1 - in2, 2).sum(3)\n return dist", "def calculateDistance(self, source, destination):\n dx = source.getX() - destination.getX();\n dy = source.getY() - destination.getY();\n return int(math.ceil(math.sqrt(dx * dx + dy * dy)))", "def distance(self, pt1, pt2):\r\n # productive #frequent\r\n if frequent: profprint()\r\n d = ((float(pt1[0]) - float(pt2[0])) ** 2 + (float(pt1[1]) - float(pt2[1])) ** 2 + (float(pt1[2]) - float(pt2[2])) ** 2) ** 0.5\r\n return d", "def get_maze_distance(self, pos1, pos2):\n d = self.distancer.get_distance(pos1, pos2)\n return d", "def distance(self, c1, c2):\r\n x = (c2.x - c1.x) ** 2\r\n y = (c2.y - c1.y) ** 2\r\n d = int(round(math.sqrt(x + y)))\r\n return d", "def distance(pt1, pt2):\n return (pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2", "def test_distance_ad(self):\n railroad = trains.Railroad()\n self.assertEqual(railroad.distance('AD'), '5')", "def get_distance_between_tiles(self, grid, tile1, tile2):\n cols = self.size[grid][1]\n # Calculate coordinates in grid:\n tile1_x = tile1 % cols\n tile2_x = tile2 % cols\n tile1_y = tile1 // cols\n tile2_y = tile2 // cols\n # Distances along x and y:\n delta_x = abs(tile1_x - tile2_x) * self.get_tile_width_d(grid)\n delta_y = abs(tile1_y - tile2_y) * self.get_tile_height_d(grid)\n return sqrt(delta_x**2 + delta_y**2)", "def get_distance(self):\n\n # Activate trigger\n self.trigger()\n\n # Detect rising edge of echo pin\n channel = GPIO.wait_for_edge(self.echo_pin, GPIO.RISING, timeout=2)\n if channel is None:\n # Timeout on wait of rising interrupt\n return None\n else:\n # Rising edge detected, save pulse start\n pulse_start = time.time()\n\n\n # Detect falling edge of echo pin\n channel = GPIO.wait_for_edge(self.echo_pin, GPIO.FALLING, timeout=2)\n if channel is None:\n # Timeout on wait of falling interrupt\")\n return None\n else:\n # Falling edge detected, save pulse end\n pulse_end = time.time()\n\n # Calculated pulse width in microseconds (x1mln)\n pulse_width = (pulse_end - pulse_start)*1000000\n\n # Return distance in cm\n return pulse_width / 58", "def CalculateDistanceFromDropPoint(self,endPosX,endPosY):\n\n\n R = 6371.0\n\n startLat = math.radians(float(self.dropX))\n startLong = math.radians(float(self.dropY))\n\n endLat = math.radians(float(endPosX))\n endLong = math.radians(float(endPosY))\n\n longDist = endLong - startLong\n latDist = endLat - startLat\n\n a = math.sin(latDist / 2) ** 2 + math.cos(startLat) * math.cos(endLat) * math.sin(longDist / 2) ** 2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n\n self.distanceFromDropPoint = R * c", "def dist(self,ipa_seg1,ipa_seg2):\n v1 = self.embed(ipa_seg1)\n v2 = self.embed(ipa_seg2)\n return np.sqrt(((v1-v2)**2).sum())", "def distance(gps1, gps2):\n return haversine(gps1.lng, gps1.lat, gps2.lng, gps2.lat)", "def getSafeFoodGoal(self, gameState):\n food = self.safeFood\n # print(food)\n myPos = self.getCurrentObservation().getAgentState(self.index).getPosition()\n if len(food) > 0:\n dis = 9999\n nearestFood = food[0]\n for a in food:\n temp = self.getMazeDistance(myPos, a)\n if temp < dis:\n dis = temp\n nearestFood = a\n return nearestFood, dis\n else:\n return None, None", "def distance(pos1, pos2):\n return math.sqrt((pos1[0] - pos2[0])**2. + (pos1[1] - pos2[1])**2.)", "def calculate_distance(srcLong, srcLat, dstLong, dstLat):\n return math.sqrt( (srcLong-dstLong) ** 2 + (srcLat - dstLat) ** 2)", "def calculateFoodPositions(self, state):\n foodPos = [9999999] * state.getNumFood()\n if(state.getNumFood() > 0):\n # minDistance = 900000\n # pacmanPosition = state.getPacmanPosition()\n counter = 0\n for i in range(state.data.layout.width):\n for j in range(state.data.layout.height):\n if state.hasFood(i, j):\n foodPos[counter] = (i,j)\n counter += 1\n # distance = util.manhattanDistance(pacmanPosition, foodPosition)\n # if distance < minDistance:\n # minDistance = distance\n return foodPos\n\n else:\n return None", "async def get_distance() -> int:\n\n _initialize_sensor()\n pulse_start, pulse_end = await _get_echo_time(False), await _get_echo_time(True)\n signal_delay = pulse_end - pulse_start\n distance = _compute_distance(signal_delay)\n return int(distance)", "def distance(self):\n return self.value * len(self.alignment.query)", "def get_distance(p1, p2):\n return ((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2) ** 0.5", "def feed(self, food, grid, snake_status, finder):\n head = snake_status[\"head\"]\n head_loc = grid.node(head[0], head[1])\n target = None\n food_path = []\n\n # sort the food by proximity to the snake\n closer_food = sorted(food, key=lambda apple:\n abs(head[0] - apple['x']) + abs(head[1] - apple['y']))\n # chose the closest reachable apple\n for chosen_apple in closer_food:\n apple = (chosen_apple[\"x\"], chosen_apple[\"y\"])\n apple_loc = grid.node(apple[0], apple[1])\n\n # verify that a path to the chosen apple exists\n path = finder.find_path(head_loc, apple_loc, grid)\n grid.cleanup()\n\n if len(path[0]) >= 2:\n danger = self.assess_danger(grid, apple_loc, snake_status, finder)\n if danger <= 2:\n target = apple\n food_path = path[0]\n break\n\n return target, food_path", "def distance(self):\n return self._distance", "def calcDist(indexLabel1,indexLabel2):\n x1=xCord[indexLabel1]\n x2=xCord[indexLabel2]\n y1=yCord[indexLabel1]\n y2=yCord[indexLabel2]\n z1=zCord[indexLabel1]\n z2=zCord[indexLabel2]\n distance=(((x1-x2)**2+(y2-y1)**2+(z2-z1)**2)**0.5)\n return distance", "def get_distance_metres(aLocation1, aLocation2):\n [dNorth, dEast, dDown] = get_position_error(aLocation1, aLocation2)\n \n return math.sqrt((dNorth*dNorth) + (dEast*dEast))", "def get_distance(pt1,pt2):\r\n x1 = pt1[1]\r\n y1 = pt1[0]\r\n x2 = pt2[1]\r\n y2 = pt2[0]\r\n d = np.sqrt((x2-x1)**2 + (y2-y1)**2)\r\n return d", "def get_distance(self, heuristic=\"\"):\n # If no heuristic is specified, used the default\n if(heuristic == \"\"):\n heuristic = BoardPath._heuristic\n\n if(heuristic == \"manhattan\"):\n return self.calculate_manhattan_dist()\n elif(heuristic == \"euclidean\"):\n return self.calculate_euclidean_dist()\n elif(heuristic == \"made_up\"):\n return self.calculate_made_up_dist()\n else:\n sys.exit()", "def computeDistance(self):\n distance = 0.0\n height = self.heightField.getNumber()\n ratio = self.indexField.getNumber()\n numBounces = self.bouncesField.getNumber()\n\n for bounce in range(numBounces):\n bounceHeight = height * ratio\n distance += height + bounceHeight\n height = bounceHeight\n\n self.distanceField.setNumber(distance)", "def get(cls, approach):\n return approach.distance", "def getDangerousFoodGoal(self, gameState):\n food = self.dangerousFood\n # print(food)\n myPos = self.getCurrentObservation().getAgentState(self.index).getPosition()\n if len(food) > 0:\n dis = 9999\n nearestFood = food[0]\n for a in food:\n temp = self.getMazeDistance(myPos, a)\n if temp < dis:\n dis = temp\n nearestFood = a\n return nearestFood, dis\n else:\n return None, None", "def distance_(origin, destination):\n lat1, lon1 = origin\n lat2, lon2 = destination\n radius = 6371 # km\n\n dlat = math.radians(lat2 - lat1)\n dlon = math.radians(lon2 - lon1)\n a = (\n math.sin(dlat / 2) **2 +\n math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) *\n math.sin(dlon / 2) **2 \n )\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = radius * c\n\n return d", "def test_get_distance() -> None:\n meters = location_util.distance(\n COORDINATES_PARIS[0],\n COORDINATES_PARIS[1],\n COORDINATES_NEW_YORK[0],\n COORDINATES_NEW_YORK[1],\n )\n\n assert meters / 1000 - DISTANCE_KM < 0.01", "def distance(p1,p2):\n return ((p1.x - p2.x)**2 + (p1.y - p2.y)**2)**0.5", "def distance(self,coord_1, coord_2):\n return np.sqrt(np.sum((np.array(coord_1)-np.array(coord_2))**2))", "def _calc_distance(self, checkpoint_loc):\n return N.sqrt((self.current_location[1] - checkpoint_loc[1])**2 \\\n + (self.current_location[0] - checkpoint_loc[0])**2)", "def get_distance(self, anchor_in, pos_in):\n feed_dict = {self.anchor: anchor_in, self.pos: pos_in}\n distances = self.sess.run(self.score_anchor_pos, feed_dict=feed_dict)\n return distances", "def estimated_distance(self, log=False):\n\t\t\n\t\tx0 = GRAVITY - self.thrust*.95 / (self.total_mass - self.fuel_consumption * 0.0)\n\t\tx1 = GRAVITY - self.thrust*.95 / (self.total_mass - self.fuel_consumption * 1.0)\n\n\t\t# Derivative at x=0 and x=1\n\t\tu = x0\n\t\tv = x1\n\t\t# Initial height at x=0\n\t\ty = abs(self.velocity)\n\n\t\tif log:\n\t\t\tprint(f'u: {u}, v: {v}, y: {y}\\nEstimated distance: {get_positive_area(u, v, y)}\\n')\n\t\t\n\t\treturn get_positive_area(u, v, y)", "def test_distance_adc(self):\n railroad = trains.Railroad()\n self.assertEqual(railroad.distance('ADC'), '13')", "def test_distance(self):\n self.assertTrue(np.allclose(self.vectors.distance('dog.n.01', 'mammal.n.01'), 4.5278745))\n self.assertEqual(self.vectors.distance('dog.n.01', 'dog.n.01'), 0)", "def virtual_distance(self):\n conflict_zone_radio = 384.0\n path_width = 172.0\n right_turn_radio = path_width / 4.0\n left_turn_radio = 3 * path_width / 4.0\n initial_straight_section = conflict_zone_radio - path_width / 2.0\n if self.get_intention() == \"s\":\n virtual_distance_value = self.get_virtual_x_position()\n elif self.get_intention() == \"r\":\n # Calculate real virtual distance\n if self.get_virtual_x_position() <= initial_straight_section:\n virtual_distance_value = self.get_virtual_x_position()\n elif self.get_virtual_y_position() > -right_turn_radio:\n virtual_distance_value = (\n initial_straight_section + atan(\n (\n self.get_virtual_x_position() -\n initial_straight_section\n ) / (right_turn_radio + self.get_virtual_y_position())\n ) * right_turn_radio\n )\n else:\n virtual_distance_value = (\n initial_straight_section + pi * right_turn_radio / 2.0 -\n self.get_virtual_y_position() - right_turn_radio\n )\n\n a = path_width / 2.0\n b = right_turn_radio + path_width / 4.0\n c = pi * right_turn_radio / 2.0\n # Scale virtual distance\n if virtual_distance_value <= initial_straight_section + c:\n virtual_distance_value *= (\n (initial_straight_section + a + b) /\n (initial_straight_section + c)\n )\n else:\n virtual_distance_value += a + b - c\n\n else:\n # Calculate real virtual distance\n if self.get_virtual_x_position() <= initial_straight_section:\n virtual_distance_value = self.get_virtual_x_position()\n elif self.get_virtual_y_position() < left_turn_radio:\n virtual_distance_value = (\n initial_straight_section + atan(\n (\n self.get_virtual_x_position() -\n initial_straight_section\n ) / (\n left_turn_radio -\n self.get_virtual_y_position()\n )\n ) * left_turn_radio\n )\n else:\n virtual_distance_value = (\n initial_straight_section + pi * left_turn_radio / 2 +\n self.get_virtual_y_position() - left_turn_radio\n )\n\n a = path_width / 2\n b = right_turn_radio + path_width / 4\n c = pi * left_turn_radio / 2\n # Scale virtual distance\n if virtual_distance_value <= initial_straight_section + c:\n virtual_distance_value *= (\n (initial_straight_section + a + b) /\n (initial_straight_section + c)\n )\n else:\n virtual_distance_value += a + b - c\n\n return virtual_distance_value", "def dist(pos1, pos2):\n a, b = pos1\n c, d = pos2\n \n return sqrt((a-c)**2 + (b-d)**2)", "def distance(self):\n return Distance(length_of(self.position.au))", "def compute_distance(location_1, location_2):\n x = location_2.x - location_1.x\n y = location_2.y - location_1.y\n z = location_2.z - location_1.z\n norm = np.linalg.norm([x, y, z]) + np.finfo(float).eps\n return norm", "def compute_distance(location_1, location_2):\n x = location_2.x - location_1.x\n y = location_2.y - location_1.y\n z = location_2.z - location_1.z\n norm = np.linalg.norm([x, y, z]) + np.finfo(float).eps\n return norm", "def distance_between_points(item1, item2):\n return math.sqrt((item2[1] - item1[1]) ** 2 + (item2[0] - item1[0]) ** 2)", "def makeDecision(snake, food):\n head = snake.container[0]\n neighbors = [\n (\n head.x + snake.velocity * Agent.DIRECTIONS[i][0],\n head.y + snake.velocity * Agent.DIRECTIONS[i][1],\n )\n for i in range(4)\n ]\n\n try:\n if not len(Agent.current_path):\n Agent.current_path = Agent.aStar(snake, food)\n try:\n for i, neighbor in enumerate(neighbors):\n if neighbor == Agent.current_path[-1]:\n snake.direction = Agent.DIRECTIONS[i]\n Agent.current_path.pop()\n return\n except Exception as e:\n pass # empty list\n except Exception as e:\n print(e)\n return\n if not len(Agent.current_path):\n direction = snake.direction\n distance = math.inf\n for i, path in enumerate(neighbors):\n if Agent.manhattenDistance(path, (food.x, food.y)) < distance:\n \"\"\" Check for turn around attempts \"\"\"\n\n \"\"\" Check for the snake colliding with itself \"\"\"\n\n if Agent.collision(path, snake):\n continue\n direction = Agent.DIRECTIONS[i]\n distance = Agent.manhattenDistance(path, (food.x, food.y))\n\n snake.direction = direction", "def distance(pt1, pt2):\n\tx1, y1 = pt1\n\tx2, y2 = pt2\n\tx = x2 - x1\n\ty = y2 - y1\n\ts = x**2 + y**2\n\treturn np.sqrt(s)", "def calculate_distance(self, other):\n return math.sqrt((self.center[0] - other.center[0]) ** 2 + (self.center[1] - other.center[1]) ** 2)", "def __get_distance(point1: np.ndarray, point2: np.ndarray) -> float:\n return np.sqrt(np.sum(np.square(point1 - point2)))", "def player_goal_distance(self) -> float:\n route = self.best_route\n return sum(route.values())", "def test_sense_distance(self):\n\n\t\tmeasurements = [29, 29, 28]\n\t\tself.driver.us_dist.side_effect = lambda x: measurements.pop()\n\t\texpected_measurement = int(ultrasonic_sensor_error(29))\n\n\t\tself.assertEqual(self.s.sense_distance(60), expected_measurement)\n\t\tself.mount.move.assert_called_once_with(x=60)", "def distance(a,b): \r\n return math.sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)" ]
[ "0.6700603", "0.6521232", "0.6514296", "0.63147444", "0.62802625", "0.6160367", "0.61424226", "0.6136788", "0.61203325", "0.6113802", "0.6087659", "0.6070107", "0.6063656", "0.6058297", "0.60548335", "0.60548335", "0.60385036", "0.6023612", "0.60140026", "0.5991973", "0.59842086", "0.59711033", "0.5960644", "0.5938914", "0.5929656", "0.5924037", "0.59235173", "0.59184366", "0.590708", "0.5900615", "0.58991873", "0.58936745", "0.5874591", "0.5853819", "0.58509123", "0.5847685", "0.5829569", "0.5824646", "0.58149254", "0.58146715", "0.58099365", "0.58021206", "0.57953674", "0.5788851", "0.5772373", "0.5772132", "0.5772132", "0.57600135", "0.5752084", "0.5741089", "0.57359385", "0.5729583", "0.5722286", "0.57150024", "0.5713738", "0.5713195", "0.57101494", "0.5706441", "0.5703186", "0.56998295", "0.56913114", "0.56863624", "0.56811", "0.5676258", "0.5675534", "0.56695175", "0.5661823", "0.56548184", "0.5652498", "0.56521815", "0.5642941", "0.5638979", "0.5633598", "0.56297684", "0.5621661", "0.56182677", "0.56161755", "0.5612969", "0.5610632", "0.5606202", "0.5605228", "0.5604776", "0.55962986", "0.55958444", "0.55928826", "0.5591332", "0.5591108", "0.5585481", "0.558259", "0.5580917", "0.55793905", "0.55793905", "0.5577092", "0.55750495", "0.5572628", "0.5567344", "0.5566235", "0.5560493", "0.5560404", "0.5559449" ]
0.8156581
0
This Function Shows the Start screen at the Start of the Game
def StartScreen(): dis.blit(Sl, (dis_width/2 - 200, 0)) message("Snake" , 90, (23, 252, 3), (dis_width/2 - 100, 400)) message("Press Enter To Start", 50, (143,250,3), (dis_width/2 - 190, 500)) pygame.display.update()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gamemode_startscreen(self) -> None:\n self.__draw_startscreen()", "def displayStartScreen(self):\n self.model.buttons.draw(self.screen)\n Title=myfont.render(\"THE WORLD ENDS WITH COINS\", 1, random.choice(all_color))\n self.screen.blit(Title, (550, 300))\n pygame.display.update()", "def start(self):\n # asserts preconditions are met\n #assert self.validGameSettings()\n\n #draws initial welcome screen\n #self._text = GLabel(text=\"Press 'S' to Play\")\n #self._text.draw(self.view)\n\n # initializing instance variables\n self.setState(STATE_INACTIVE)\n self.setWave(None)\n self.setText(None)\n self.lastkeys = 0 #ADD MORE ATTRIBUTES\n\n # draws iniital welcome screen\n self.welcomeScreen()", "def show_start_screen():\n title_font = pygame.font.Font('freesansbold.ttf', 80)\n title_surface = title_font.render('Snake!', True, WHITE)\n title_rect = title_surface.get_rect()\n title_rect.center = (WINDOWWIDTH / 2, WINDOWHEIGHT / 2)\n DISPLAYSURF.fill(BGCOLOR)\n DISPLAYSURF.blit(title_surface, title_rect)", "def start_game(self):\n print(\"hi there, game started!\")\n self.draw()", "def start(self):\n self.save_checkpoint(\"setup\")\n\n logging.info(\"Starting game...\")\n body = render_message(\n \"welcome.html\",\n game_name=self.name,\n night_end=self.night_end.strftime(\"%I:%M %p\"),\n day_end=self.day_end.strftime(\"%I:%M %p\"),\n players=self.game.players,\n )\n self.send_message(mafia.events.PUBLIC, \"%s: Start\" % self.name, body)\n self.game.begin()\n self.started = True\n\n self.save_checkpoint(\"start\")", "def start_game(self):\n\n\t\tpass", "def startGame():\n #roundnumber\n eel.updateRoundNumber()\n # start page\n eel.updateStartPage([startPage.getTitle(), startPage.getUrl()])\n eel.updateStartPageDescription(startPage.getFirstSentence())\n # goal page\n eel.updateGoalPage([goalPage.getTitle(), goalPage.getUrl()])\n eel.updateGoalPageDescription(goalPage.getFirstSentence())\n # ui updates\n eel.updateCurrentPage(\n [wikiPageStackTrace[-1].getTitle(), wikiPageStackTrace[-1].getUrl()])\n eel.updateCurrentPageDescription(wikiPageStackTrace[-1].getFirstSentence())\n eel.printInPageList(wikiPageStackTrace[-1].getOnlyLinksListJS())\n # loader\n time.sleep(0.5)\n eel.hideLoader()", "def show_start_screen(self) -> tuple[SimulationParameters, bool, bool, bool]:\n pass", "def welcomeScreen(self):\n # creates welcome screen if state is STATE_INACTIVE\n if self.getState() == STATE_INACTIVE:\n label = GLabel(text=\"press 'F' to start\", x = GAME_WIDTH/2,\n y = GAME_HEIGHT/2, font_size = 50, font_name = 'arcade',\n linecolor = introcs.RGB(0,0,0))\n label.halign = 'center'\n label.valign = 'middle'\n self.setText(label)\n # welcome screen is None if state is not STATE_INACTIVE\n else:\n self.setText(None)\n # draws the welcome screen\n #self.getText().x = consts.GAME_WIDTH / 2\n #self.getText().y = consts.GAME_HEIGHT / 2\n self.draw()", "def display_start_screen(self, demo):\r\n\r\n # Set a timer for displaying demo gameplay\r\n DEMOEVENT = pg.USEREVENT + 2\r\n pg.time.set_timer(DEMOEVENT, DEMO_GAMEPLAY_TIMER)\r\n\r\n # Set the while loop for the start screen\r\n while True:\r\n self.surface.fill(self.settings.bg_color)\r\n self.surface.blit(self.title_text, self.title_text_rect)\r\n\r\n for event in pg.event.get():\r\n # Reset the game state in response to user input in case of demo gameplay\r\n if event.type == pg.KEYDOWN or event.type == pg.MOUSEBUTTONDOWN:\r\n self.ai_game.reset_level()\r\n self.stats.reset_stats()\r\n self.sb.prep_images()\r\n return\r\n # Blink the start game text at every BLINKEVENT\r\n if event.type == self.BLINKEVENT:\r\n self.blink_surface = next(self.blink_surfaces)\r\n # Run demo gameplay in response to a DEMOEVENT\r\n if event.type == DEMOEVENT:\r\n demo.run_demo()\r\n\r\n self.surface.blit(self.blink_surface, self.blink_rect)\r\n self.screen.blit(self.surface, (0, 0))\r\n pg.display.flip()", "def start_of_game(self):\n pass", "def start(self):\n self.menu()", "def startMenu():\n \n screenInf=screenInfo()\n pygame.init()\n screen = pygame.display.set_mode(screenInf.SCREEN_SIZE, FULLSCREEN, 32)\n pygame.mouse.set_visible(False)\n splash = SplashMenu(screen)\n menu = IntroMenu(screen) #####Create IntroMenu objectu\n \n running=True\n \n splash.drawSplashMenu() ####Splash only drawn once at beginning of game\n \n while running:\n menu.drawIntroMenu(screen) ####Draws menu to screen, controls game during this time\n currentGame = menu.getGameInfo()\n if(currentGame == None):\n exit()", "def start_screen(self):\n overlay = rectangle.Rectangle((0, 0, 0), (S['screen-width'], S['screen-height']))\n self.out_of_game_objects.append(overlay)\n # #\n yield 1\n #\n the = pgzero.actor.Actor('logo_the', pos=(S['screen-width'] / 2, -100 + S['logo-y-offset']))\n self.out_of_game_objects.append(the)\n pgzero.animation.Animation(the,\n pos=(S['screen-width'] / 2, S['screen-height'] / 2 - 100 + S['logo-y-offset']),\n tween='bounce_end')\n #\n yield 2\n #\n godel = pgzero.actor.Actor('logo_godel', pos=(-100, S['screen-height'] / 2 + S['logo-y-offset']))\n self.out_of_game_objects.append(godel)\n pgzero.animation.Animation(godel,\n pos=(S['screen-width'] / 2, S['screen-height'] / 2 + S['logo-y-offset']),\n tween='bounce_end')\n #\n yield 2\n #\n sentence = pgzero.actor.Actor('logo_sentence', pos=(S['screen-width'] / 2, S['screen-height'] + 100 + S['logo-y-offset']))\n self.out_of_game_objects.append(sentence)\n pgzero.animation.Animation(sentence,\n pos=(S['screen-width'] / 2, S['screen-height'] / 2 + 100 + S['logo-y-offset']),\n tween='bounce_end')\n #\n yield 2\n #\n t1 = text.Text('A pyweek 20 entry by Paul Paterson',\n center=(S['screen-width'] / 2, S['screen-height'] / 2 + 100),\n fontname='computerfont', color='yellow',\n )\n #\n t2 = text.Text('This game can only be played once and takes about 12 minutes '\n 'to complete.\\n\\nChoose wisely!',\n center=(S['screen-width'] / 2, S['screen-height'] / 2 + 200),\n fontname='computerfont', color='green',\n )\n t3 = text.Text('v{0}'.format(S['version']),\n center=(S['screen-width'] - 50, S['screen-height'] - 40),\n fontname='computerfont', color='grey', fontsize=12,\n )\n\n self.out_of_game_objects.extend([t1, t2, t3])\n #\n start = button.Button('start', 'start_button', center=(S['screen-width'] / 2, S['screen-height'] / 2 + 300))\n self.out_of_game_objects.append(start)\n credits_button = button.Button('credits', 'credits_button', center=(80, S['screen-height'] - 50))\n self.out_of_game_objects.append(credits_button)\n #\n credits_overlay = pgzero.actor.Actor('credits', center=(S['screen-width'] / 2, S['screen-height'] / 2))\n credits_overlay.visible = False\n self.out_of_game_objects.append(credits_overlay)\n #\n while True:\n if self.buttons_clicked:\n if self.buttons_clicked[0][0] == start:\n break\n if self.buttons_clicked[0][0] == credits_button:\n credits_overlay.visible = True\n #\n for i in range(5):\n yield 1\n if self.buttons_clicked:\n break\n #\n credits_overlay.visible = False\n yield 0\n#\n #\n self.log.info('Starting up')\n self.started = True\n self.out_of_game_objects.remove(t1)\n self.out_of_game_objects.remove(t2)\n self.out_of_game_objects.remove(t3)\n self.out_of_game_objects.remove(start)\n self.out_of_game_objects.remove(credits_button)\n self.out_of_game_objects.remove(credits_overlay)\n #\n pgzero.animation.Animation(overlay, pos=(S['screen-width'] / 2, -S['screen-height']), tween='accelerate')\n #\n yield 2\n #\n pgzero.animation.Animation(the, pos=(S['screen-width'] / 2, S['screen-height'] + 100), tween='accelerate')\n pgzero.animation.Animation(godel, pos=(S['screen-width'] / 2, S['screen-height'] + 100), tween='accelerate')\n pgzero.animation.Animation(sentence, pos=(S['screen-width'] / 2, S['screen-height'] + 100), tween='accelerate')\n #\n self.nextState(self.preamble())", "def create_start_screen(self):\r\n titleFont = pg.font.Font(\r\n TITLE_FONT, int(self.scale_factor * LARGE_FONT))\r\n font = pg.font.SysFont(None, int(self.scale_factor * SMALL_FONT))\r\n\r\n # Create and position the title text\r\n self.title_text = titleFont.render(\r\n GAME_TITLE, True, DARK_GREEN, self.settings.bg_color)\r\n self.title_text_rect = self.title_text.get_rect()\r\n self.title_text_rect.center = self.screen.get_rect().center\r\n self.title_text_rect.centery -= self.title_text_rect.height\r\n\r\n # Create and position blinking \"press any key\" text\r\n # Render the text to display\r\n self.on_text = font.render(\r\n START_GAME_TEXT, True, BLACK, self.settings.bg_color)\r\n # Position the rect object for the blinking text\r\n self.blink_rect = self.on_text.get_rect()\r\n self.blink_rect.center = self.screen.get_rect().center\r\n self.blink_rect.centery += ELEMENT_SPACING * self.title_text_rect.height\r\n # Set the off_text surface and fill it in with the background color\r\n self.off_text = pg.Surface(self.blink_rect.size)\r\n self.off_text.fill(self.settings.bg_color)\r\n # Create an iterator that repreatedly iterates over the two elements \"on_text\" and \"off_text\"\r\n self.blink_surfaces = cycle([self.on_text, self.off_text])\r\n # Create a variable to store the next value from the iterator\r\n self.blink_surface = next(self.blink_surfaces)\r\n # Define a new event (events are represented by integers, with pygame.USEREVENT have the highest value pygame uses)\r\n self.BLINKEVENT = pg.USEREVENT + 1\r\n # Set a timer to trigger a BLINKEVENT\r\n pg.time.set_timer(self.BLINKEVENT, BLINK_DURATION)\r\n\r\n # Hide the mouse cursor\r\n pg.mouse.set_visible(False)", "def start(self):\n running = True\n while running:\n k=self.Game.playgame()\n if k=='Exit':\n running = False\n continue\n elif k=='resume':\n continue\n elif k=='GameOver':\n o=self.gameover()\n if o=='newgame':\n self.Game=Game(self.Display)\n else:\n running = False\n while k=='Won':\n o=self.won()\n if o=='newgame':\n self.Game=Game(self.Display)\n break\n elif o==\"Exit\":\n output = self.Game.popup()\n if output == 'resume':\n self.Game.GameBoard.display()\n continue\n else:\n running = True\n break", "def display_welcome_view(screen):\n pygame.time.Clock().tick(30)\n welcome_background = pygame.image.load('resources//pictures//startb.jpg').convert()\n screen.blit(welcome_background, (0, 0))\n pygame.display.flip()", "def livesScreen(self):\n # creates welcome screen if state is STATE_INACTIVE\n if self.getState() == STATE_ACTIVE or self.getState() == STATE_PAUSED \\\n or self.getState() == STATE_COMPLETE:\n label = GLabel(text=\"Lives:\", x = GAME_WIDTH-150,\n y = GAME_HEIGHT-55, font_size = 30, font_name = 'arcade',\n linecolor = introcs.RGB(0,0,0))\n label.halign = 'center'\n label.valign = 'middle'\n self.setText(label)\n # welcome screen is None if state is not STATE_INACTIVE\n else:\n self.setText(None)\n # draws the welcome screen\n #self.getText().x = consts.GAME_WIDTH / 2\n #self.getText().y = consts.GAME_HEIGHT / 2\n self.draw()", "def start(self):\n self._state = STATE_INACTIVE\n self._game = None\n self._last_key_press = False\n self._last_n_press = False\n self._last_lose_life = False\n self._mssg = (GLabel(text=START_MSSG, x=GAME_WIDTH/2, y=GAME_HEIGHT/2, font_size=24))\n self.time = None\n self._points_mssg = None\n self._falling_points = []\n self._FP_mssg = None", "def start(self):\n\n # Call the protected _turn method to start the game\n self._turn()", "def start_game(self) -> None:\n self.init_game()\n self.play()", "def setup_screen():\n screen = Screen()\n screen.setup(width=600, height=600)\n screen.bgcolor(\"black\")\n screen.title(\"My Snake Game\")\n screen.tracer(0)\n return screen", "def start(self):\n self.timeStart = pygame.time.get_ticks()", "def printStartMsg(self):\n\n print(\"\\nSTARING THE GAME\")\n print(\"HAVE FUN!\\n\")", "def start(self):\n pygame.init()\n self.screen = pygame.display.set_mode((self.width, self.height))\n pygame.display.set_caption(\"PyStroke\")\n self.engines = [GameEngine(self.screen, self.e_e)] # add others here\n self.engine = self.engines[0]\n self.run()", "def start_screen(screen,wof_settings):\n text = ['Collect bitcoins and kill monsters.',\n '',\n 'Arrow or WASD keys to move, Space to throw a bomb.',\n 'Backspace to reset level, Esc to quit.',\n 'N for next level, B to go back a level.',\n '',\n 'Press any key to start the game or Esc to quit.']\n \n title_image = 'images/bitcoin.bmp'\n \n text_font = 'fonts/Future TimeSplitters.otf'\n text_font_size = 26\n displayTextToScreen(wof_settings,screen,title_image,text,text_font,text_font_size)", "def prepare_screen():\n pygame.init()\n screen = pygame.display.set_mode((800, 800))\n screen.fill((0,0,0))\n pygame.display.set_caption(\"Shearing\")\n return screen", "def start_new_game(self):\r\n\r\n self.initialize_game_params()\r\n self.timer = Timer(self.screen)\r\n self.mine_counter = MineCounter(self.num_of_mines, self.screen)\r\n self.reset_button = ResetButton(self.screen)\r\n self.high_score = HighScore(self.rows, self.cols, self.num_of_mines, self.screen)\r\n self.board = Board(self.rows, self.cols, self.num_of_mines, self.screen)\r\n self.play_game()", "def startGame():\n session[\"game\"] = clueLogic.game()\n return render_template(\n \"StartGame.html\",\n suspects=clueLogic.cards[\"suspects\"],\n weapons=clueLogic.cards[\"weapons\"],\n rooms=clueLogic.cards[\"rooms\"]\n )", "def winScreen(self):\n # creates welcome screen if state is STATE_INACTIVE\n if self.getState() == STATE_COMPLETE:\n label = GLabel(text=\"Congratulations! You win!\", x = GAME_WIDTH/2,\n y = 50, font_size = 50, font_name = 'arcade',\n linecolor = introcs.RGB(0,0,0))\n label.halign = 'center'\n label.valign = 'middle'\n self.setText(label)\n # welcome screen is None if state is not STATE_INACTIVE\n else:\n self.setText(None)\n # draws the welcome screen\n #self.getText().x = consts.GAME_WIDTH / 2\n #self.getText().y = consts.GAME_HEIGHT / 2\n self.draw()", "def start_game():\n logger.info(\"Clicking play button\")\n mouseclick(coords_play_final_button[0], coords_play_final_button[1])", "def display_screen(self):\n self.screen.blit(self.bg, (0, 0))\n pygame.display.update()", "def _initScreen(self):\n\n print \"DEBUG: Initializing Screen\"\n os.environ['SDL_VIDEO_CENTERED'] = '1'\n Game.Screen = pygame.display.set_mode((Game.ScreenWidth, Game.ScreenHeight))", "def do_start_joined(self):\n\t\td = {\"state\": be.S_GAME,\n\t\t\t\t\"hosting\": False,\n\t\t\t\t\"uuid\": None,\n\t\t\t\t\"name\": self.game_name,\n\t\t\t\t\"nickname\": self.nickname,\n\t\t\t\t\"num_players\": self.num_players,\n\t\t\t\t\"boardsize\": self.boardsize}\n\t\tevent = pygame.event.Event(be.E_STATE, d)\n\t\tpygame.event.post(event)\n\n\t\tself.hide_all()\n\t\tself.renderer.color = (0, 0, 0, 0)", "def main():\n g = Game(800, 600)\n g.start()", "def show(self):\n screen_copy = screen.copy()\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n terminate()\n elif event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == pygame.BUTTON_LEFT:\n pos = event.pos\n pos = (pos[0] - self.rect.x, pos[1] - self.rect.y)\n if self.menu_btn.rect.collidepoint(pos):\n return MAIN_MENU\n elif self.restart_level_btn.rect.collidepoint(pos):\n return RESTART_LEVEL\n game_panel_group.draw(self.surface)\n screen_copy.blit(self.surface, self.rect.topleft)\n screen.blit(screen_copy, (0, 0))\n if pygame.mouse.get_focused():\n cursor.show(screen)\n pygame.display.flip()", "def start_game(self):\n self.word_view.next_word()\n self.greeterboard.reset(msg=i18n.OUT_MSG_LUCK.format(self.player_name))\n self.keyboard.reset()\n self.init_game_metrics()", "def pausedScreen(self):\n # creates welcome screen if state is STATE_INACTIVE\n if self.getState() == STATE_PAUSED:\n label = GLabel(text=\"press 'F' to resume\", x = GAME_WIDTH/2, y = 50,\n font_size = 50, font_name = 'arcade',linecolor = introcs.RGB(0,0,0))\n\n label.halign = 'center'\n label.valign = 'middle'\n self.setText(label)\n # welcome screen is None if state is not STATE_INACTIVE\n else:\n self.setText(None)\n # draws the welcome screen\n #self.getText().x = consts.GAME_WIDTH / 2\n #self.getText().y = consts.GAME_HEIGHT / 2\n self.draw()", "def game_start(self):\r\n\t\tself._comm_server.broadcast_message(\"game-start\")\r\n\t\tself._is_game_started = True\r\n\t\tself._handlers[\"game-start\"].invoke()\r\n\t\t_logger.info(\"Game is started.\")", "def start_scene():\n from Menus.select_team import SelectTeam # placing the import in a func makes python import that module only when needed\n from Menus.button import Button\n from Display.display import Display\n select_team = SelectTeam()\n display = Display()\n # start scene audio\n pygame.mixer.music.load('assets/audio/menu/main_menu.wav')\n pygame.mixer.music.set_volume(0.3)\n pygame.mixer.music.play(-1)\n\n # loading main menu background\n main_menu_img = pygame.image.load(\"assets/sprites/Backgrounds/main-menu.png\").convert()\n\n # creating start button\n start_btn = Button(pygame.image.load(\"assets/sprites/Buttons/start-game.png\").convert(), (410, 380), (200, 80))\n\n while True:\n display.display_background(main_menu_img)\n display.display_text(\"Soccer Match Simulator 2020\", display.font_title, (250, 300))\n start_btn.draw()\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n\n # if start game is clicked move to SelectTeam class\n start_btn.event_handler(event, select_team.select_home)\n \n pygame.display.update()", "def start(self) -> None:\n self.execute_startup_menu()\n self.execute_main_menu()", "def deathScreen(self):\n # creates welcome screen if state is STATE_INACTIVE\n if self.getState() == STATE_COMPLETE:\n label = GLabel(text=\"You Lose! Get dunked on!\", x = GAME_WIDTH/2,\n y = 50, font_size = 50, font_name = 'arcade',\n linecolor = introcs.RGB(0,0,0))\n label.halign = 'center'\n label.valign = 'middle'\n self.setText(label)\n # welcome screen is None if state is not STATE_INACTIVE\n else:\n self.setText(None)\n # draws the welcome screen\n #self.getText().x = consts.GAME_WIDTH / 2\n #self.getText().y = consts.GAME_HEIGHT / 2\n self.draw()", "def start(self):\n # Call the protected _turn method to start the game\n self._end_time = time.time() + 60\n self._turn()", "def initialize_screen(self):\r\n\r\n pygame.init()\r\n pygame.display.set_caption('Minesweeper')\r\n\r\n screen_width = max(display_params.RECT_SIZE * self.cols + 2 * display_params.MARGIN_SIDE,\r\n display_params.MIN_SCREEN_WIDTH)\r\n screen_height = display_params.RECT_SIZE * self.rows + display_params.MARGIN_TOP + \\\r\n display_params.MARGIN_BOTTOM\r\n self.screen = pygame.display.set_mode((screen_width, screen_height))\r\n self.screen.fill(colors.NAVYBLUE)\r\n\r\n pygame.display.update()", "def start(self):\n self.show_greeting()\n self.read_frame()", "def main():\n g = DemoGame(800, 600)\n g.start()", "def load_startup_screen(menu, play_button, high_scores_button):\r\n menu.start_menu(play_button, high_scores_button)", "def start(self):\n print(\"*\"*20)\n print(\"*\" + \" \"*18 + \"*\")\n print(\"*\" + \" \"*4 + \"Connect 4X\" + \" \"*4 + \"*\")\n print(\"*\" + \" \" * 18 + \"*\")\n print(\"*\" * 20)\n print(\"\\nConsole Version 1.0.0\\n\")\n self.print_menu()\n self.get_input()", "def on_start(self):\r\n # This adjust the recipe tiles to the correct starting width:\r\n self.update_tile_width()\r\n # This searches the database in order to find all recipes and generate Tiles:\r\n self.update_tile_menu()\r\n toast('Welcome!', 3)", "def setup(self):\n # Create your sprites and sprite lists here\n self.game: Game = Game(SCREEN_WIDTH, SCREEN_HEIGHT, TILE_SIZE, 1, grid_layers = 4)\n self.game.game_message = \"Lead the Rabbit home\"\n\n # show the menu so that we see the instructions\n self.game.menu.button_list[0].text = \"Start\"\n self.game.menu.is_visible = True", "def start():\n display_board()\n print(\"\\n\")\n y_n_prompt()", "def move_to_start(self):\n self.pos = (SCREEN_WIDTH / 2, SCREEN_HEIGHT - 64)", "def _start(self):\n\n print \"DEBUG: Starting Game\"\n nextFrameTime = 0\n deltaFrameTime = 1000 / Game.FPSLimit\n\n # Main Loop\n try:\n while True:\n self._handleEvents()\n\n currentTime = pygame.time.get_ticks()\n if ((nextFrameTime - currentTime) <= 0):\n pygame.display.flip()\n self.frameCount += 1\n self._nextFrame()\n self._drawFrame()\n nextFrameTime = currentTime + deltaFrameTime\n\n pygame.time.delay(1)\n finally:\n pygame.quit()", "def tellIfStarted(self):\n if self.game_number == 1:\n self.welcome()\n else:\n self.tellGameNumber()", "def home_screen(screen):\n start = pygame.image.load('start.png')\n font = pygame.font.Font(None, 32)\n logo = pygame.image.load(\"CVPAINTLOGO.png\")\n logo = pygame.transform.scale(logo, (250, 200))\n instructions = pygame.image.load('instructions.png')\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONDOWN: # exit home screen if user clicks start button\n if pygame.mouse.get_pos() >= (225, 350):\n if pygame.mouse.get_pos() <= (350,400):\n running = False\n return True\n if event.type == KEYDOWN: # can quit from the home screen with q\n if event.key == K_q:\n pygame.quit()\n cv2.destroyAllWindows()\n return False\n screen.fill(pygame.Color(255, 255, 255))\n screen.blit(logo, (190,50)) # draw images/buttons on home screen\n screen.blit(start, (225,350))\n screen.blit(instructions, (175, 275))\n pygame.display.update()", "def play(self):\n self.window.run(LevelMenuScreen())", "def start_game(self):\n return self.do_actions('before_game')", "def initialize(self):\n result = pygame.init()\n pygame.font.init()\n pygame.display.set_caption('gomoku TDD')\n self.screen = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))\n self.clock = pygame.time.Clock()\n self.smallfont = pygame.font.Font(None, 40)\n self.isinitialized = True", "def welcome_screen(self):\n print()\n print('*M*A*S*T*E*R*M*I*N*D*')\n print('Welcome to Mastermind!')\n print('The goal of this game is to guess the secret code.\\n' +\n 'You have as many guesses as you need.\\n' +\n 'After every guess you will see a result of that guess.\\n' +\n 'A result may look like this:\\n' +\n 'Your guess: 1,2,3,4\\n' +\n \"The result: ['1', '-', 'C', '-']\")\n print('This means the following:\\n' +\n 'The first number, 1, is in the correct position\\n' +\n 'The second number, 2, is not included in the secret code\\n' +\n 'The third number, 3,' + \n ' is in the code but is in the wrong position\\n' +\n 'The fourth number, 4, is not included in the code')\n print('When you have the correct numbers ' +\n 'in the right place, you win!\\n' +\n 'Try to beat the game in as few guesses as possible.\\n' +\n 'The first thing you will do is decide if' +\n 'you want standard or custom game.\\n' +\n 'Only the standard game can save you highscore')", "def startGame(anzahlSpieler):\n print(\"OK]\")\n print(\"INFO: verteile Karten an \",str(anzahlSpieler),\" Spieler\",end=\"..[\")\n verteileKarten(anzahlSpieler)\n print(\"OK]\")", "def open(self):\n windowFlags = self.getWindowFlags(self.settings)\n self.surface = pygame.display.set_mode(self._resolution, windowFlags)\n self._printVideoInfo(pygame.display.Info())\n logger.info(\"Initialized display with driver: \" + pygame.display.get_driver())\n\n self.surface.fill(self._skin.guiColor(\"Background\"))\n self._initializePanels(self._resolution, self._skin)\n pygame.display.flip()\n\n self._statusLoop.statusProvider = self.getStatusProvider(self.settings)", "def start_game(self):\n self._puzzle.get_puzzle()\n self._do_outputs()\n\n while self._keep_playing:\n print(\"\")\n print(\"+-----+-----+-----\")\n print(\"\")\n self._get_inputs()\n self._do_updates()\n self._do_outputs()\n print(\"+-----+-----+-----\")", "def preGameScreen(self) -> None:\n\n # load all images\n # created with: https://de.flamingtext.com/Free-Logo-Designs/\n logo = Image(Configuration.windowWidth / 2 - 750 / 2, Configuration.windowHeight / 4 - 120 / 2, (750, 120),\n \"PongLogo.png\", pathToImage=\"images/Pong/\")\n keys_player_one = Image(Configuration.windowWidth / 4 - 150, Configuration.windowHeight * 3 / 4 - 50,\n (300, 100),\n \"AandD.png\", pathToImage=\"images/Pong/\")\n keys_player_two = Image(Configuration.windowWidth * 3 / 4 - 150, Configuration.windowHeight * 3 / 4 - 50,\n (300, 100),\n \"ArrowLeftRight.png\", pathToImage=\"images/Pong/\")\n\n # draw text and images\n self.surface.fill(Colors.Black)\n self.drawImageOnSurface(logo)\n self.drawImageOnSurface(keys_player_one)\n if not self.hasComputerPlayer: # only draw the control of the second player, if he isn´t a computer player\n self.drawImageOnSurface(keys_player_two)\n self.drawTextOnSurface(\"First player that reaches 1000 points wins!\",\n (Configuration.windowWidth / 2, Configuration.windowHeight / 2), Colors.ByteGreen,\n font=self.font)\n\n self.drawTextOnSurface(\"Controls\",\n (Configuration.windowWidth / 2, Configuration.windowHeight * 3 / 4), Colors.ByteGreen,\n font=self.font)\n\n super().updateScreen() # display images on screen\n\n logger.info(\"Displaying prescreen animation\")\n\n sleep(4) # wait for seconds", "def welcomeScreen():\r\n\r\n playerx = int(SCREENWIDTH/5)\r\n playery = int((SCREENHEIGHT - GAME_SPRITES['player'].get_height())/2)\r\n messagex = int((SCREENWIDTH - GAME_SPRITES['message'].get_width())/2)\r\n messagey = int(SCREENHEIGHT*0.13)\r\n basex = 0\r\n while True:\r\n for event in pygame.event.get():\r\n # if user clicks on cross button, close the game\r\n if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):\r\n pygame.quit()\r\n sys.exit()\r\n\r\n # If the user presses space or up key, start the game for them\r\n elif event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP):\r\n return\r\n else:\r\n SCREEN.blit(GAME_SPRITES['background'], (0, 0)) \r\n SCREEN.blit(GAME_SPRITES['player'], (playerx, playery)) \r\n SCREEN.blit(GAME_SPRITES['message'], (messagex,messagey )) \r\n SCREEN.blit(GAME_SPRITES['base'], (basex, GROUNDY)) \r\n pygame.display.update()\r\n FPSCLOCK.tick(FPS)", "def start():\n # Have the car begin at a stop\n rc.drive.stop()\n # Print start message\n print(\">> Lab 4B - LIDAR Wall Following\")", "def start(self):\n pygame.init()\n self.screen = pygame.display.set_mode((self.width, self.height))\n pygame.display.set_caption(\"PyStroke Demo\")\n self.engines = [DemoGameEngine(self.screen, self.e_e)] # add others here\n self.engine = self.engines[0]\n self.run()", "def start_game(self) -> None:\n self.check_edgework_view_attached()\n self.timer.start_timing()\n self._edgework_view.start_timing()", "def startScreen():\n\n # Position the title image.\n titleRect = globals.IMAGESDICT['title'].get_rect()\n topCoord = 50 # topCoord tracks where to position the top of the text\n titleRect.top = topCoord\n titleRect.centerx = globals.HALFWINWIDTH\n topCoord += titleRect.height\n\n # Unfortunately, Pygame's font & text system only shows one line at\n # a time, so we can't use strings with \\n newline characters in them.\n # So we will use a list with each line in it.\n instructionText = ['Push the stars over the marks.',\n 'Arrow keys to move, WASD for camera control, P to change character.',\n 'Backspace to reset level, Esc to quit.',\n 'N for next level, B to go back a level.']\n\n # Start with drawing a blank color to the entire window:\n DISPLAYSURF.fill(BGCOLOR)\n\n # Draw the title image to the window:\n DISPLAYSURF.blit(globals.IMAGESDICT['title'], titleRect)\n\n # Position and draw the text.\n for i in range(len(instructionText)):\n instSurf = BASICFONT.render(instructionText[i], 1, TEXTCOLOR)\n instRect = instSurf.get_rect()\n topCoord += 10 # 10 pixels will go in between each line of text.\n instRect.top = topCoord\n instRect.centerx = globals.HALFWINWIDTH\n topCoord += instRect.height # Adjust for the height of the line.\n DISPLAYSURF.blit(instSurf, instRect)\n\n while True: # Main loop for the start screen.\n for event in pygame.event.get():\n if event.type == QUIT:\n terminate()\n elif event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n terminate()\n return # user has pressed a key, so return.\n\n # Display the DISPLAYSURF contents to the actual screen.\n pygame.display.update()\n FPSCLOCK.tick()", "def enable_start(self, *args):\n but_start.configure(state=GL.NORMAL)", "def play_game():\n\n _initial_deal()\n\n main_window.mainloop()", "def on_show(self):\n arcade.set_background_color(arcade.csscolor.DARK_SLATE_BLUE)\n\n # Reset the viewport, necessary if we have a scrolling game and we need\n # to reset the viewport back to the start so we can see what we draw.\n arcade.set_viewport(0, SCREEN_WIDTH - 1, 0, SCREEN_HEIGHT - 1)\n\n \n \n\n #self.player_music_intro.EOS_LOOP = 'loop'\n self.player_music_intro = arcade.play_sound(self.music_intro)\n \n\n print(\"type(self.player_music_intro) : \", type(self.player_music_intro))", "def start():\r\n introduction()\r\n score = duck_shooting1()\r\n dogs()\r\n play_again(score)", "def welcome_screen():\n playerx = int(SCREENWIDTH/5)\n playery = int((SCREENHEIGHT - GAME_SPRITES['player'].get_height()) / 2) # To display player on the center\n messagex = int((SCREENWIDTH - GAME_SPRITES['message'].get_width()) / 2)\n messagey = int(SCREENHEIGHT*0.13)\n basex = 0\n while True:\n for event in pygame.event.get(): # when user performs an event on game\n # if user clicks on cross (x) button, then close the game\n if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):\n pygame.quit()\n sys.exit()\n\n # if user presser space or up arrow key, start the game\n elif event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP):\n return\n else:\n SCREEN.blit(GAME_SPRITES['background'], (0, 0))\n SCREEN.blit(GAME_SPRITES['player'], (playerx, playery))\n SCREEN.blit(GAME_SPRITES['message'], (messagex, messagey))\n SCREEN.blit(GAME_SPRITES['base'], (basex, GROUNDY))\n pygame.display.update()\n FPSCLOCK.tick(FPS)", "def do_start_hosted(self):\n\t\tself.game_name = self.e_gamename.text\n\t\tself.num_players = (1, int(self.e_players.text))\n\t\tself.boardsize = (int(self.e_boardw.text), int(self.e_boardh.text))\n\n\t\td = {\"state\": be.S_GAME,\n\t\t\t\t\"hosting\": True,\n\t\t\t\t\"uuid\": None,\n\t\t\t\t\"name\": self.game_name,\n\t\t\t\t\"nickname\": self.nickname,\n\t\t\t\t\"num_players\": self.num_players,\n\t\t\t\t\"boardsize\": self.boardsize}\n\t\tevent = pygame.event.Event(be.E_STATE, d)\n\t\tpygame.event.post(event)\n\n\t\tself.hide_all()\n\t\tself.renderer.color = (0, 0, 0, 0)", "def loadingScreen(self):\n self.continueButton = pygame.image.load(Directory().get_directory() + '/images/intro/play.png')\n self.continueButton2 = pygame.image.load(Directory().get_directory() + '/images/intro/play2.png')\n\n # pygame.display.set_caption(\"Master of Thieves\")\n self.background_image = pygame.transform.scale(pygame.image.load(Directory().get_directory() + \"/images/backgrounds/background0.png\"), (self.WIN_WIDTH, self.WIN_HEIGHT)) # Tutorial background\n self.screen.blit(self.background_image, (0,0))\n self.showTimeTaken()\n pygame.mouse.set_visible(True)\n self.m1 = self.screen.blit(self.continueButton, (0, 75))\n self.loadingStatus = True\n while self.loadingStatus == True:\n for e in pygame.event.get():\n self.pos = pygame.mouse.get_pos()\n if e.type == QUIT:\n exit()\n if e.type == MOUSEMOTION:\n if self.m1.collidepoint(self.pos): # Scrolling over the Main Menu button, so change the image so the user knows they are on it\n self.screen.blit(self.continueButton2, (0, 75))\n else:\n self.screen.blit(self.continueButton, (0, 75)) # Change back to the normal image since the user is no longer on it\n if e.type == MOUSEBUTTONDOWN:\n if self.m1.collidepoint(self.pos):\n self.loadingStatus = False\n pygame.display.update()", "def start(self):\n if self.debug:\n print(\"%s start\" % self.name)", "def _blank_screen(self):\n self._screen.fill(self._bgcolor)\n pygame.display.update()", "def displayEndingScreen(self):\n self.screen.fill(blackColor)\n\n Title = myfont.render(\"Player %s Wins\"%(self.model.winner), 1, random.choice(all_color))\n self.screen.blit(Title, (670, 300))\n pygame.display.update()", "def play(self):\n print('Playing game...')", "def setupNewGame(self):\r\n self.level = 1\r\n self.num_cows = 2\r\n self.num_farmers = 1\r\n self.levelHeading = Text(self.gameDisplay, 120, 425, 175, self.light_orange, \"Farm 1\")\r\n self.shield_indicator.image = self.greenShield\r\n updatedHeading = self.levelHeading\r\n self.startUX[0] = updatedHeading", "def startGame(self):\n\n\t\tfor name in self.players.keys():\n\t\t\tself.startPlayerGame((name, 0))\n\t\tself.setupGuiSignals()", "def startGame():\n\n\tprint(\"\\nOK! Let's play!\")\n\tprint(\"--------------------------------------------------------------------------------------\")\n\tprint(\"Note:\")\n\tprint(\"\\tNow you must be kept in your mind a random integer from specific range and I must be guessing that number!\")\n\tprint(\"\\tIf you answer honestly all of my questions I certainly will guess that number!\")\n\tprint(\"--------------------------------------------------------------------------------------\\n\")\n\tgameLogic()", "def game_initiate(self, win):\n font = pygame.font.SysFont('comicsans', 70)\n\n pygame.draw.rect(win, white, (0, screen_height // 4, screen_width, screen_height // 2))\n text = font.render('ROCK!', 1, black)\n win.blit(text, (180, 300))\n self.display_player('rock', win)\n self.display_computer('rock', win)\n pygame.display.update()\n pygame.time.delay(500)\n\n pygame.draw.rect(win, white, (0, screen_height // 4, screen_width, screen_height // 2))\n text = font.render('PAPER!', 1, black)\n win.blit(text, (170, 300))\n self.display_player('paper', win)\n self.display_computer('paper', win)\n pygame.display.update()\n pygame.time.delay(500)\n\n pygame.draw.rect(win, white, (0, screen_height // 4, screen_width, screen_height // 2))\n text = font.render('SCISSOR!', 1, black)\n win.blit(text, (140, 300))\n self.display_player('scissor', win)\n self.display_computer('scissor', win)\n pygame.display.update()\n pygame.time.delay(500)\n\n pygame.draw.rect(win, white, (0, screen_height // 4, screen_width, screen_height // 2))\n text = font.render('SHOOT!', 1, black)\n win.blit(text, (165, 300))", "def start_game(self, **kwargs):\n\n success, info = self.gms.start_game(\n player=kwargs.get('player', 'x'),\n first_turn=raw_input('Would you like to go first? y/n\\n') == 'y'\n )\n if success:\n if info['status_code'] == core_constants.GAME_STATUS_HUMAN_MOVE_REQUIRED:\n print(self.gms.game.get_board_state_pretty())\n self.play_human_move()\n else:\n print(info['messages'][0])", "def begin(self):\n # Add the close listener.\n engine.Engine.input_handler.add_listener(engine.CloseOnEscapeOrQuit())\n # Play the game music.\n engine.Engine.stop_sound(\"MenuMusic\")\n engine.Engine.play_sound(\"GameMusic\", -1, 50)", "def welcome_screen(self):\n print()\n print('P*O*K*E*R')\n print('Welcome to a 5-card poker game,\\n' +\n 'The goal is the get a better hand than the AI.')\n print('To do this you get one chance to swap cards' +\n 'that are in your hand')\n print('You swap like this:\\n' +\n '1. Choose how many cards you want to swap\\n' +\n '2. Write the number of the card(s) you want to swap, like this:\\n' +\n 'If you want to swap card 2, type in 2.\\n' +\n 'If you want to swap card 1 and 4, type 1,4')\n print('Next both your and AI hand is shown,\\n' +\n 'and the winner is declared.')\n print('For information on what hand beats what, \\n' +\n 'and what happens when both players have an equally good hand,\\n' +\n 'please follow the link below:\\n' +\n 'https://github.com/oljung/portfolio-project-three\\n' +\n 'NOTE! Ctrl + c will terminate the app, use right click to copy')\n message = 'Would you like to play a round? Y(es) or N(o): '\n answer = InputHandler.input_bool(message)\n if answer:\n self.run_game()", "def draw_screen(self):\n\t\tself.current_screen.draw_screen(self.master_screen)", "def start_game() -> None:\n rows = get_int()\n cols = get_int()\n state = game.GameState(rows, cols)\n\n line = next_line()\n if line == 'CONTENTS':\n rowList = []\n for i in range(rows):\n row = []\n line = raw_next_line()\n for index in range(cols):\n row.append(line[index])\n rowList.append(row)\n state.set_board_contents(rowList)\n\n while True:\n _display_board(state)\n line = next_line()\n if line == 'Q':\n return\n if line == '':\n if state.tick():\n _display_board(state)\n break\n else:\n _process_command(line, state)\n print('GAME OVER')", "def welcome_screen():\r\n # player_position_at_x = int(SCREENWIDTH/5)\r\n # player_position_at_y = int(SCREENHEIGHT - GAME_SPRITES['player'].get_height())/2 # (H-h)/2\r\n message_screen_at_x = int(SCREENWIDTH - GAME_SPRITES['message'].get_height())/2+40\r\n # 40 is offset value which I have set after running game\r\n message_screen_at_y = int(SCREENHEIGHT * 0.25)\r\n base_at_x = 0\r\n\r\n while True:\r\n for event in pygame.event.get():\r\n # if user clicks on cross button, close the game\r\n if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):\r\n pygame.quit()\r\n sys.exit()\r\n # If the user presses space or up key, start the game for them\r\n elif event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP):\r\n return\r\n else:\r\n SCREEN.blit(GAME_SPRITES['background'], (0, 0))\r\n # SCREEN.blit(GAME_SPRITES['player'], (player_position_at_x, player_position_at_y))\r\n SCREEN.blit(GAME_SPRITES['message'], (message_screen_at_x, message_screen_at_y))\r\n SCREEN.blit(GAME_SPRITES['base'], (base_at_x, GROUND_Y))\r\n pygame.display.update()\r\n FPS_CLOCK.tick(FPS)", "def mainmenu_background():\n gameDisplay.fill((40, 0, 40))", "def mainmenu_background():\n gameDisplay.fill((40, 0, 40))", "def start(self):\n for game_object in self.game_objects:\n game_object.start()\n # end for\n self.time = time.time()\n self.paused = False\n self.running = True\n print 'GAME STARTED'", "def start_new_game(self, mode): \n self.display.clear() \n #self.ui.hide()\n if self.selected_speed == \"speed Slow\":\n self.game_manager.set_players_speed(1.9)\n elif self.selected_speed == \"speed Medium\":\n self.game_manager.set_players_speed(3)\n elif self.selected_speed == \"speed Fast\":\n self.game_manager.set_players_speed(5)\n self.game_manager = GameManager(self.display, self.ui, mode, GameState.Running, self.game_manager.player1, self.game_manager.player2)", "def menu(self):\n from mainmenu import Menu\n gm = Menu(self.screen)\n gm.run()", "def start():", "def start():", "def start():", "def start():", "async def on_start(self):\n m = \"**{}** has started a game of {}! To participate, say `I`! **{} players needed.**\".format(\n self.message.author.display_name, self.name, self.num)\n await client.say(self.message, m)" ]
[ "0.8430267", "0.8425534", "0.8040839", "0.80370176", "0.7912492", "0.7737424", "0.76517355", "0.739627", "0.7327332", "0.7277575", "0.72326416", "0.7161845", "0.71366405", "0.7117598", "0.7070147", "0.70684344", "0.70596254", "0.70382", "0.70099735", "0.69944596", "0.69934815", "0.6976644", "0.6948135", "0.69395345", "0.69348013", "0.6933037", "0.6890955", "0.68597776", "0.6840683", "0.6833574", "0.682478", "0.6801066", "0.67930126", "0.6770538", "0.67591363", "0.6757105", "0.6756786", "0.67050904", "0.6677936", "0.6672787", "0.6665311", "0.66615206", "0.66572654", "0.66398543", "0.66317624", "0.66299385", "0.6607976", "0.6603957", "0.66009885", "0.6598867", "0.6589794", "0.6586749", "0.6576357", "0.65746874", "0.65740836", "0.65618837", "0.65553474", "0.65517324", "0.65368783", "0.653155", "0.6527041", "0.6526772", "0.6509947", "0.65082556", "0.6492175", "0.647244", "0.64672226", "0.6465931", "0.6463138", "0.64463675", "0.64447516", "0.64435524", "0.64358705", "0.64348143", "0.6428776", "0.64280254", "0.642678", "0.6426657", "0.6423616", "0.6421728", "0.6415885", "0.64096206", "0.6405029", "0.6404742", "0.6399829", "0.6395506", "0.63904834", "0.6388624", "0.6378749", "0.63659656", "0.63639605", "0.63639605", "0.63543", "0.6344479", "0.633912", "0.63286406", "0.63286406", "0.63286406", "0.63286406", "0.6324091" ]
0.7316806
9
This function Show an End Screen If Player Losses the Game
def EndScreen(score): message("Game Over", 100, (255, 16, 16), (dis_width/2 - 210 , dis_height/2 -100) ) message(" Press Enter to Try Again and Esc to Exit", 30 , (50,255,100), (dis_width/2 - 250, dis_height/2 + 50)) last_score = "Your Score is : " +str(score - 4) message(last_score, 50, (255,255,255), (dis_width/2 - 170, dis_height/2))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_end_game(self):\n game_view = self.get_view.get_game_view\n character = self.model.get_character\n\n if character.alive:\n game_view.game_win()\n else:\n game_view.game_over()\n\n game_view.update_display()", "def end_game(self):\n pygame.event.clear()\n self.screen.fill(BLACK)\n self.show_on_screen(\"GAME OVER\", (500, 600), font_size=50)\n self.show_on_screen(\"Press \\\"N\\\" to start a new game\", (500, 650), font_size=30)\n self.show_on_screen(\"Press \\\"ESC\\\" to exit\", (500, 710), font_size=30)\n self.show_on_screen(\"SCORE: \" + str(self.score), (500, 560), font_size=50)\n pygame.display.flip()\n\n # clears previously pressed key\n pygame.event.wait()\n while True:\n event = pygame.event.wait()\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n pygame.quit()\n sys.exit()\n elif event.key == pygame.K_n:\n self.reset_lander()\n self.play()", "def end_game(self,player,color):\n black = (0, 0, 0)\n font = pygame.font.Font(os.path.join(os.path.dirname(os.path.realpath(__file__)),'TRON.TTF'), 25)\n label1= font.render(player + \" WINS!\", 1, color)\n label2 = font.render(\"Press Space to Restart\", 1, (255,255,255))\n self.screen.fill(black)\n self.screen.blit(label1,(185,100))\n self.screen.blit(label2,(43,200))\n pygame.display.flip()\n self.game_over = True\n for player in self.players:\n player.dir = \"None\"", "def endGame(self):\n pass", "def EndGame(self):\n check_endgame = not self.player.getPlayer().isGeneralExist()\n\n return check_endgame", "def endScreen(self, screen):\n\t\tmyfont = pygame.font.SysFont(\"Calibri\", 69)\n\t\tendLine1 = myfont.render(\"GAME OVER\", 1, (0, 0, 0))\n\t\tmyfont = pygame.font.SysFont(\"Arial\", 39)\n\t\tendLine2 = myfont.render(\"Final Score: \" + str(self.points), 1, (0, 0, 0))\n\t\tmyfont = pygame.font.SysFont(\"Arial\", 30)\n\t\tprintTime = myfont.render(\"TIME: \" +str(self.minutes) + \":\" + str(self.seconds), 1, (0,0,0))\n\t\tif self.seconds < 10:\n\t\t\tprintTime = myfont.render(\"TIME: \" +str(self.minutes) + \":0\" + str(self.seconds), 1, (0,0,0))\n\t\tscreen.fill((195,0,0))\n\t\tscreen.blit(endLine1, (150, 200))\n\t\tscreen.blit(endLine2, (175, 260))\n\t\tscreen.blit(printTime, (210, 320))\n\t\tpygame.display.flip()", "def displayEndingScreen(self):\n self.screen.fill(blackColor)\n\n Title = myfont.render(\"Player %s Wins\"%(self.model.winner), 1, random.choice(all_color))\n self.screen.blit(Title, (670, 300))\n pygame.display.update()", "def leave(self):\n p = GameOverPopup(self)\n p.open()", "def end(self):\n winners = mafia.str_player_list(self.game.winners())\n logging.info(\"Game over! Winners: %s\" % winners)\n\n subject = \"%s: The End\" % self.name\n body = \"Game over!\\n\\nCongratulations to %s for a well \" \\\n \"(or poorly; I can't tell) played game!\" % winners\n self.send_message(mafia.events.PUBLIC, subject, body)", "def end_of_game(self):\n end_game = pyip.inputYesNo(f'\\nDo you want to play again?: ')\n\n if end_game == 'no':\n print('\\n-- GAME OVER --')\n sys.exit()\n elif end_game == 'yes':\n self.game_counter += 1", "def end_game(self):\n\n # End the game\n if game.game_over is False:\n\n correct = game.endscore()\n self.update()\n self.time = self.timer()\n\n # Loop through every space and reveal atoms and guess results.\n won = 0\n missed = 0\n board = self.ids.board\n for item in board.children:\n # Disable spaces to prevent them from being pressed.\n item.disabled = True\n item.disabled_color = item.color\n if isinstance(item, Space) is True:\n number = int(item.number)\n space = game.spacelist[number]\n # Guessed right\n if space.correct is True:\n item.text = 'O'\n item.color = item.disabled_color = scheme.green\n # Guessed wrong\n elif space.guess is True and space.correct is False:\n item.text = 'X'\n item.color = item.disabled_color = scheme.black\n missed += 1\n # Missed atom\n elif space.atom is True and space.guess is False:\n item.text = 'O'\n item.color = item.disabled_color = scheme.red\n\n for i in range(correct):\n self.ids['tracker' + str(i + 1)].color = scheme.green\n\n # Update end button\n if missed == 0:\n won = 1\n text = 'you found them all!'\n elif missed == 1:\n text = 'you missed an atom!'\n elif missed == 5:\n text = 'you missed \\'em all!'\n else:\n text = 'you missed ' + str(missed) + ' atoms!'\n\n self.ids.end_button.text = text\n\n # Prep and send to end screen\n elif game.game_over is True:\n end_screen = sm.get_screen('end_screen')\n end_screen.ids.end_score.text = str(game.score)\n end_screen.ids.time.text = str(self.time)\n\n sm.current = 'end_screen'", "def endGame(self, msg):\n title = \"Game Over\"\n QMessageBox.information(self, title, msg)\n self.reset()", "def end_game(self):\n self.game.stop_running()", "def end(self):\n # Update all the things.\n end_font = pygame.font.SysFont(*END_FONT)\n final_score = self.player.nest.acorn_count\n message = \"Game over! Final score: {0}\".format(final_score)\n text_surf = end_font.render(message, True, FONT_COLOUR)\n text_rect = text_surf.get_rect()\n text_rect.center = (SCREEN.width // 2, SCREEN.height // 2)\n\n # Draw all the things.\n self.screen_surf.fill(BKGD_COLOUR)\n self.screen_surf.blit(text_surf, text_rect)\n\n # Render the screen.\n pygame.display.update()\n\n # The main game loop.\n while self.mode is WorldMode.end:\n self.handle_events()", "def end_screen(win):\n\tpass", "def endgame(winner):", "def endGame(self, msg, win):\n elapsedTime = time.time() - self.startTime\n readableTime = str(int((elapsedTime / 60) / 60))\n readableTime += \":\" + str(int(elapsedTime / 60))\n readableTime += \":\" + str(elapsedTime % 60)[0:6]\n msg +=\"Time: \" + readableTime\n self.revealBombs(win)\n self.flagLabel.configure(text=\"Flags: \"+str(self.numFlags))\n messagebox.showinfo('Game Over', msg)", "def endGame(self, message):\n print(self.board)\n print(\"Game over! \" + message)\n self.gameOver = True", "def check_end(self, player):\n if all(tile.player==player for tile in board):\n board.draw()\n print(nl, self.winmsg % player)\n sys.exit()", "def end_game(self):\n self.end_writer.write(f\"You have won with {self.score} points! \" \\\n \"Do you want to play again? (y/n)\", font=(\"Arial\", 16, \"normal\"))\n turtle.Screen().onkey(None, \"Left\")\n turtle.Screen().onkey(None, \"Right\")\n turtle.Screen().onkey(None, \"Up\")\n turtle.Screen().onkey(None, \"Down\")", "def endGame():\n return render_template(\"endGame.html\")", "def end_game_dialog(self):\n retry_button = pygame.Rect((self.settings[\"screen_width\"] / 2 - 75, 170), (150, 25))\n play_again = self.font.render('Play Again!', True, (255, 250, 106))\n\n quit_button = pygame.Rect((self.settings[\"screen_width\"] / 2 - 75, 250), (150, 25))\n quit_text = self.font.render('Quit!', True, (255, 250, 106))\n while True:\n self.clock.tick(10)\n self.draw_grid(0.4) # darken shade for every block\n pygame.draw.rect(self.surface, (0, 0, 0), retry_button)\n pygame.draw.rect(self.surface, (0, 0, 0), quit_button)\n self.screen.blit(self.surface, (0, 0))\n\n current_score = self.font.render(\"Score {0}\".format(self.score), True, (6, 255, 43))\n best_score = self.font.render(\"Max Score {0}\".format(self.maxScore), True, (6, 255, 43))\n\n self.screen.blit(current_score, (self.settings[\"screen_width\"] / 4 - 50, 100))\n self.screen.blit(best_score, (self.settings[\"screen_width\"] / 2 + 50, 100))\n\n self.screen.blit(play_again, (retry_button.left +\n (retry_button.width / 2 - play_again.get_width() / 2),\n retry_button.top +\n (retry_button.height / 2 - play_again.get_height() / 2))\n )\n\n self.screen.blit(quit_text, (quit_button.left +\n (quit_button.width / 2 - quit_text.get_width() / 2),\n quit_button.top +\n (quit_button.height / 2 - quit_text.get_height() / 2))\n )\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return 0\n if event.type == pygame.MOUSEBUTTONDOWN:\n mouse_pos = event.pos\n if retry_button.collidepoint(mouse_pos):\n return 1\n if quit_button.collidepoint(mouse_pos):\n return 0", "def death_screen():\n return False", "def leave_win_game(self):\n self.end = True\n self.canevas.config(bg='black')\n self.canevas.itemconfig(self.ball.ball, fill='black')\n self.canevas.itemconfig(self.paddle.paddle, fill='black')\n self.canevas.update()\n time.sleep(2)\n self.canevas.config(bg='light blue')\n self.canevas.itemconfig(self.ball.ball, fill='red')\n self.canevas.itemconfig(self.paddle.paddle, fill='grey')\n self.brick.next_level()", "def check_game_end(self):\r\n\r\n if np.all(self.remaining == -1): # end of game\r\n self.show_results() # show the final results\r\n sys.exit() # exit the program\r", "def stand(self):\n self.endgame()", "def event_game_over(self):\n print('Game over!')\n self._cmd_exit()", "def end_of_game(self):\n self.log.info('The game has ended')\n #\n end_callout = callout.FinishCallout(\n 'callout',\n 'finish_callout',\n ['exit_button'],\n S['end-game-callout'],\n self.deaths,\n )\n end_callout.show()\n #\n self.objects.append(end_callout)\n #\n while True:\n if end_callout.dismiss_button:\n music.fadeout(2)\n yield 2\n break\n yield 0\n #\n sys.exit(0)", "def end_game(self, game_state: str):\n if game_state == \"win\":\n end_message = \"{0} wins! Would you like to play again?\".format(self.players[self.game.whose_turn])\n else:\n end_message = \"Cat's game! Would you like to play again?\"\n play_again = messagebox.askyesno(title='Game over', message=end_message)\n if play_again:\n self.game.reset_game_data()\n self.reset_game_ui()\n else:\n self.window.destroy()", "def end_game(self, end_msg):\n self.word_view.reveal_word()\n self.greeterboard.update_gallows()\n self.greeterboard.greets(end_msg)", "def end_game(self):\n print(str(self.__players[0]._Player__name) + \" score is: \"\n + str(self.__fields[0].score))\n print(str(self.__players[1]._Player__name) + \" score is: \"\n + str(self.__fields[1].score))\n Game.play = False", "def end(self, is_won):\n if is_won:\n self.blit_background()\n self.screen.blit(self.won, (0, 0))\n self.render_screen()\n self.render_font()\n else:\n self.blit_background()\n self.screen.blit(self.lost, (0, 0))\n self.render_screen()\n self.render_font()", "def leave_loose_game(self):\n self.update_json_file()\n self.end = True\n self.root.destroy()\n GameOver()", "def keyboard_end_game_control(self, app):\n mx, my = pg.mouse.get_pos()\n click = False\n\n game_view = self.get_view.game_view\n\n for event in pg.event.get():\n if event.type == pg.QUIT:\n pg.quit()\n sys.exit()\n\n if event.type == pg.MOUSEBUTTONDOWN:\n if event.button == 1:\n click = True\n\n if game_view.back_menu_button.collidepoint((mx, my)):\n if click:\n app.end_game_running = False", "def endGame(self):\n #self.active = False\n self.inGame = False\n self.hand = []\n self.position = None", "def end_game(self):\n controller = self.controller\n self.end_game_running = True\n\n while self.end_game_running:\n controller.keyboard_end_game_control(self)\n controller.display_end_game()\n\n self.reset_game()\n self.run()", "def gamemode_lostscreen(self) -> None:\n self.__draw_lostscreen()", "def deathScreen(self):\n # creates welcome screen if state is STATE_INACTIVE\n if self.getState() == STATE_COMPLETE:\n label = GLabel(text=\"You Lose! Get dunked on!\", x = GAME_WIDTH/2,\n y = 50, font_size = 50, font_name = 'arcade',\n linecolor = introcs.RGB(0,0,0))\n label.halign = 'center'\n label.valign = 'middle'\n self.setText(label)\n # welcome screen is None if state is not STATE_INACTIVE\n else:\n self.setText(None)\n # draws the welcome screen\n #self.getText().x = consts.GAME_WIDTH / 2\n #self.getText().y = consts.GAME_HEIGHT / 2\n self.draw()", "def end_game():\n pygame.quit()\n exit()", "def end_of_game(self, winner):\n pass", "def isGameOver(self):\n pass", "def end_pygame(self):\n pygame.quit()", "def end_game(self):\n self.game_over = True\n self.put()\n # Add the game to the score 'board'\n score = Score(user=self.user, date=date.today(), won=self.win,\n tiles_remaining=self.tiles_remaining,\n difficulty=self.difficulty)\n score.put()", "def display_end_screen(win, text, game_status):\n if game_status == 'win':\n text = 'You win!'\n ROUTIER = pygame.image.load(\"../images/routier.png\")\n GLASSES_width = GLASSES.get_rect().size[0]\n elif game_status == 'lose':\n text = 'You lose!'\n ROUTIER = pygame.image.load(\"../images/routier-cry.png\")\n\n pygame.display.set_caption(text)\n width, height = pygame.display.get_surface().get_size()\n font = pygame.font.Font('freesansbold.ttf', 32)\n text = font.render(text, True, BLACK)\n textRect = text.get_rect()\n textRect.center = (width // 2, height // 2)\n\n ROUTIER_width = ROUTIER.get_rect().size[0]\n\n if game_status == 'win':\n for y in range(220):\n pygame.time.delay(5)\n win.fill(WHITE)\n win.blit(text, textRect)\n win.blit(ROUTIER, (width // 2 - ROUTIER_width // 2, 100))\n win.blit(GLASSES, (width // 2 - GLASSES_width // 2 + 20, y))\n pygame.display.update()\n else:\n win.fill(WHITE)\n win.blit(text, textRect)\n win.blit(ROUTIER, (width // 2 - ROUTIER_width // 2, 100))\n pygame.display.update()\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()", "def show_game_over_screen(self) -> None:\n\n if self.score > self.high_score:\n with open(HIGHSCORE_FILE, 'w') as f:\n f.write(str(self.score))\n\n message = f\"Well done - new high score: {self.score}\"\n\n elif self.score == self.high_score:\n\n if self.score == 0:\n message = \"What is this nonsense - you scored nothing!\"\n else:\n message = f\"Well done - you equalled high score: {self.score}\"\n\n else:\n\n message = f\"Sad times - no new high score ({self.high_score}), you got {self.score}\"\n\n self.screen.fill(RED)\n self.draw_text(TITLE, 48, WHITE, WIDTH / 2, HEIGHT / 4)\n\n self.draw_text(\"Game Over\", 22, WHITE, WIDTH / 2, HEIGHT / 2)\n self.draw_text(message, 22, WHITE, WIDTH / 2, HEIGHT / 2 + 50)\n\n\n self.draw_text(\"Press a key to return to menu\", 22, WHITE, WIDTH / 2, HEIGHT * 3 / 4)\n self.draw_text(\"Esc to Quit\", 22, WHITE, WIDTH / 2, HEIGHT * 3 / 4 + 30)\n pg.display.flip()\n\n # Wait for a key press.\n waiting = True\n while waiting:\n self.clock.tick(FPS)\n\n for event in pg.event.get():\n if event.type == pg.QUIT:\n self.quit()\n elif event.type == pg.KEYDOWN:\n if event.key == pg.K_ESCAPE:\n self.quit()\n else:\n waiting = False\n self.state = State.MENU", "def game_over(self):\n if self.alive:\n return\n\n self.screen.fill(Color.BLACK)\n self.draw_text(\n \"GAME OVER\", WIN_CENTER, font=FONT_M, size=48, color=Color.WHITE\n )\n again = \"Press any key to play again\"\n again_pos = CENTER_W, WIN_H - BLOCK_H\n self.draw_text(again, again_pos, color=Color.WHITE)\n\n pygame.display.flip()\n self.wait_keydown()\n\n if self.running:\n self.reset()", "def gameover(self):\n font = pygame.font.Font(None, CASE_SIZE)\n text = font.render('Game over!', True,(255, 255, 255), (0, 0, 0))\n self.screen.blit(text,(CASE_SIZE * 6, CASE_SIZE * 7))\n self.try_again()\n pygame.display.flip()", "def game_exit(self):\n self.set_state(GameState.EXITING)\n self.game_stop()\n self.game_log_statistics()", "def _end_game(self, winner_id=0):\r\n if winner_id == 0:\r\n # print(\"The game was a tie!\")\r\n pass\r\n else:\r\n # print(\"{0} has won the game!\".format(winner_id))\r\n pass\r\n self.winner = winner_id", "def set_end_game(self):\n # For now, we just need to set a flag indicating we should end\n # the game. When we check whether we should load another story\n # or repeat a repeating script, this flag will be used to skip\n # back to the main session script, to the end of the game.\n self._end_game = True", "def endGame(self):\r\n Game.__instance = None", "def end_game(self, show_flag=True):\n winner, p1_score, p2_score = self.find_winner_scores()\n\n if show_flag:\n self.show_board()\n\n go_str = \"Game Over!\\n\"\n if winner == 1:\n go_str += f\"{'#' * 24}\\n## Player 1 wins! ##\\n\"\n elif winner == 2:\n go_str += f\"{'#' * 24}\\n## Player 2 wins! ##\\n\"\n else:\n go_str += f\"{'#' * 24}\\n## Draw. ##\\n\"\n go_str += f\"## Player 1 score: {self.p1_store():2d} ##\\n\"\n go_str += f\"## Player 2 score: {self.p2_store():2d} ##\\n{'#' * 24}\\n\"\n print(go_str)\n return winner", "async def end(ctx):\n if ctx.message.channel.name.lower() not in tod_channels:\n return\n\n room = ctx.message.channel.name.lower()\n if room not in tod_games:\n await amor_manager.say(\"Truth Or Dare not in progress in {}\".format(room))\n else:\n host = tod_games[room]['host']\n await amor_manager.say(\"Game Over in {}! Thank you to {} for hosting this game!\".format(room, host))\n del tod_games[room]", "def handleEnd(winner):\n if winner != 0:\n if winner == 1: print(\"human win\")\n if winner == 2: print(\"cpu win\")\n if winner == 3: print(\"draw game\")\n return True\n return False", "def end_game(self):\n if self._waiting_for_players:\n [p.client.cancel_interactions() for p in self.all_players]\n self._waiting_for_players = False\n\n super().end_game()\n\n self.states[self.state][\"next\"] = \"STOP\"\n self._run_next_state()", "def game_over(self):\n self.end_of_level()\n self.message_holder.add_widget(self.you_lose_label)\n Clock.schedule_once(self.goto_menu, 5)", "def is_endgame_state(self) :\n raise NotImplementedError", "async def endGame(self, ctx):\n print(\"Ending game ...\")\n await self.movePlayer(ctx=ctx, voiceChannel=self.lastVoiceChannel, reason=\"Fin de partie.\")\n await self.deleteCategory(ctx=ctx, reason=\"Fin de partie.\")\n await self.deleteRole(ctx=ctx, reason=\"Fin de partie.\")\n print(\"Game ended\")\n await self.delete()", "def draw_game_over(self):\n arcade.draw_rectangle_filled(SCREEN_WIDTH // 2, SCREEN_HEIGHT // 2,\n SCREEN_WIDTH // 2,\n SCREEN_HEIGHT // 1.5, arcade.color.BRONZE)\n arcade.draw_rectangle_filled(SCREEN_WIDTH // 2, 410, 600, 140, arcade.color.COOL_GREY)\n arcade.draw_rectangle_filled(SCREEN_WIDTH // 2, 275, 550, 100, arcade.color.COOL_GREY)\n\n output = \"Oops, You Lost :(\"\n arcade.draw_text(output, 360, 381, arcade.color.BLACK, 66)\n\n output = \"Click Anywhere To Restart\"\n arcade.draw_text(output, 375, 258, arcade.color.BLACK, 40)", "def bcp_game_end(self, **kwargs):\n self.player = None\n self.events.post('game_ended', **kwargs)", "def endgame_winner(self) :\n raise NotImplementedError", "def die(self):\r\n GrimReaper=games.Message(value=\"GAME OVER\", size=100, color=color.red, x=games.screen.width/2, y=games.screen.height/2, lifetime=1*games.screen.fps, after_death=games.screen.quit, is_collideable=False)\r\n games.screen.add(GrimReaper)", "def determineEndGame(self):\n\n print(\"noWinners: \" + str(self.noWinners) + \", noTotKids: \" + str(self.noTotKids))\n\n # TODO scegliere come determinare la fine del gioco\n # if self.noWinners == self.noTotKids - 1: # end-game test\n if self.noWinners == self.noTotKids:\n print(\"ho determinato la fine del gioco\")\n return True\n else:\n print(\"colore toccato ma la partita non e' finita\")\n return False", "def checkGameState(self, fpsclock, screen):\n if self.isWin() or self.isLost():\n if self.exitMenu(fpsclock, screen):\n return True\n return False", "def handle_game_end(self, winner_color, win_reason): # possible GameHistory object...\n\n # TODO: implement this method\n print('\\--------------Game End--------------/')\n print(winner_color)\n print(win_reason)\n pass", "def __quit(self):\n self.clear_screen()\n self.__print_logo()\n print('\\n'*3)\n self.cool_print(\"THANKS FOR PLAYING!\")\n sleep(2)\n self.stop_game = True", "def EndTurn(self):\n\n if self.defender.health <= 0:\n print(\"{0} ran out of health and passed out, {1} is the victor!\".format(self.defender.name, self.attacker.name))\n return True\n self.currentTurn += 1\n if self.currentTurn >= self.maxTurns:\n print(\"Both parties got bored of the fight and parted ways, draw!\")\n return True\n self.attacker, self.defender = self.defender, self.attacker\n return False", "def displayGameOver():\n screen.fill(pygame.Color('black'), (0, 0, SCREEN_SIZE[0], SCREEN_SIZE[1]))\n goText, rect1 = GAME_FONT.render('Game Over', WHITE)\n paText, rect2 = GAME_FONT.render('Press [Enter] to play again', WHITE)\n width1 = goText.get_width()\n height1 = goText.get_height()\n width2 = paText.get_width()\n height2 = paText.get_height()\n posX1 = (SCREEN_SIZE[0] - width1) / 2\n posY1 = (SCREEN_SIZE[1] - height1) / 2\n posX2 = (SCREEN_SIZE[0] - width2) / 2\n posY2 = (SCREEN_SIZE[1] - height2) / 2 + 30\n screen.blit(goText, (posX1, posY1))\n screen.blit(paText, (posX2, posY2))", "def quit_game(self):\n self.done = True", "def game_lose(self):\n self.lose = True\n self.player.reset_animations()\n self.player.reset_actions()\n self.msg.set_text(u'YOU LOSE <Press Space>')\n self.msg.show(True)", "def round_end_screen(self, board, p1, ai):\n if self.number == 0:\n if p1.score > ai.score:\n print(f'Round {self.last_game} {p1.score}:{ai.score}\\n{board}\\n\\nYou Won! ', end='')\n elif ai.score > p1.score:\n print(f'Round {self.last_game} {p1.score}:{ai.score}\\n{board}\\n\\nYou lost. ', end='')\n else:\n print(f\"Round {self.last_game} {p1.score}:{ai.score}\\n{board}\\n\\nIt's a draw. \", end='')\n elif self.number == 1:\n print(f'Round {self.last_game} {p1.score}:{ai.score}\\n{board}\\n\\n1 game left! ', end='')\n else:\n print(f'Round {self.last_game} {p1.score}:{ai.score}\\n{board}\\n\\n{self.number} games left! ', end='')", "def __end_turn(self):\n self._turn_counter += 1\n # päivittää pelaajan\n self._player %=2\n self._player += 1\n if (self._gui):\n self.__status[\"text\"] = self._player_names[self._player-1]\n self.__turn_counter_label[\"text\"]= \"{}. Turns taken\".format\\\n (self._turn_counter)\n self.__turn_counter_label.update()\n\n # This will cause stackoverflow in training.\n if (self._state == PLAY and self.__check_ai_turn()):\n self.__ai_turn()", "def end(self) -> None:\n unicurses.endwin()", "def breakout_loop(self):\n while self.playing:\n self.handle_events()\n self.update()\n if self.game_over:\n self.current_menu = self.fail_menu\n self.playing = False\n self.reset()\n self.draw()", "def game_over_screen(self):\n self.font = pygame.font.SysFont(\"comicsansms\", 72)\n i = 0\n pygame.draw.rect(self.window, (60,50,20), (200, 200, 450, 300))\n text = self.font.render(\"Game Over\", True, (0, 0, 0))\n self.window.blit(text, (240, 220))\n text = self.font.render(self.winner+\" wins\", True, (0, 0, 0))\n self.window.blit(text, (270, 330))\n pygame.display.flip()\n while not self.quit:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.quit = True\n return\n if event.type == pygame.QUIT:\n self.quit = True\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_r:\n self.play()\n elif event.key == pygame.K_t:\n self.tip()\n elif event.key == pygame.K_m:\n self.__init__()", "def end_of_game(self):\n try:\n play_again = input(\"Would you like to play again?[y]es/[n]o: \").lower()\n except ValueError:\n print(\"That is is not a valid value please use either y or n.\")\n self.end_of_game()\n if play_again == \"y\":\n # Phrase(self.player_guess, new_game=True, run_extend=True)\n # Character(self.player_guess, self.selected_phrase, life_check=True, new_game=True)\n Game()\n elif play_again == \"n\":\n print(\"\\n\"\"Thank you for playing, see y'all next time.\"\"\\n\")\n sys.exit()\n else:\n print(\"That is is not a valid value please use either y or n.\")\n self.end_of_game()", "def lost():\n\tforeground_module.foreground_speed = 0\n\tbackground_module.background_speed = 0\n\tdisplay_fail_msg(win)\n\n\tif player_module.player.y > foreground_module.ground_y:\n\t\ttry:\n\t\t\tprocess_object.terminate()\n\t\texcept: pass\n\n\t\treturn True\n\treturn False", "def endGame(self):\n\t\thue = 0\n\t\thueInc = 360 / len(self.scores.items())\n\n\t\tfor e in self.scores.items():\n\t\t\tcolor = QColor()\n\t\t\tcolor.setHsv(hue, 255, 240)\n\t\t\thue += hueInc - 1\n\t\t\tself.scores[e[0]] = (e[1][0], color.getRgbF())\n\n\t\t# actually ending the game here...\n\t\tfor player in self.players.values():\n\t\t\t\tplayer[0].endGame()\n\t\tself.gameEnded.emit()", "def is_end_game(self):\n win = self.is_game_won()\n tie = self.game_is_tied()\n return win or tie", "def check_for_end_game(self):\n if self.grid.snake_died():\n self.scores.append(self.score)\n if self.score >= 1:\n self.averages.append(sum(self.scores) / (len(self.averages) + 1))\n # self.plot_scores()\n self.reset()", "def draw_game_over(self):\n output = \"Game Over!\"\n arcade.draw_text(output, 250, 400, arcade.color.BLACK, 54)\n\n output = \"Click to restart\"\n arcade.draw_text(output, 330, 200, arcade.color.BLACK, 24)", "def __draw_lostscreen(self) -> None:\n self.__draw_background()\n line1 = LOOSE_FONT.render(\"You loose!! Highscore: \" + str(round(self.highscore)), 1, RED)\n line2 = LOOSE_FONT.render(\"Press enter to play again\", 1, RED)\n self.win.blit(line1, (round((WIDTH/2) - line1.get_width()/2), round(HEIGHT/2 - line1.get_height()/2)))\n self.win.blit(line2, (round((WIDTH/2) - line2.get_width()/2), round(HEIGHT/2 - line1.get_height()/2) + line1.get_height() + 5)) \n pygame.display.update()", "def handle_game_over(self, winner, end_state):\n #############################\n #\n #\n # YOUR CODE HERE\n #\n #\n ##############################\n print(\"Game over, these are the stats:\")\n print('Winner: ' + str(winner))\n print('End state: ' + str(end_state))", "def end_game(self, won=False):\n self.game_over = True\n self.put()\n # Add the game to the score 'board'\n score = Score(user=self.user, \n date=date.today(), \n won=won,\n attempts_remaining=self.attempts_remaining, \n answer=self.answer\n )\n score.put()", "def exit_game(self):\n for i in range(1, 4, 1):\n self.ids['door' + str(i)].source = \\\n 'door_closed.jpg'\n self.ids['button' + str(i)].disabled = False\n setattr(self, 'door'+str(i)+'_counter', 0)\n self.manager.current = 'MainMenu'\n self.ids['score'].text = 'SCORE: 0'\n self.score = 0", "def game_end(self):\n win, winner = self.has_a_winner()\n if win:\n return True, winner\n elif not len(self.availables): #\n return True, -1\n\n return False, -1", "def exitMenu(self, fpsclock, screen):\n screen.fill((0, 0, 0))\n self.rect = pygame.Rect(0, 0, self.gs[0] * (self.ts + self.ms) + self.ms,\n self.gs[1] * (self.ts + self.ms) + self.ms)\n self.pic = pygame.transform.smoothscale(pygame.image.load('blurredImage.png'), self.rect.size)\n screen.blit(self.pic, self.rect)\n if self.isWin():\n self.draw_text(screen, \"You won !\", 50, 250, 80, 0, 0, 0, True)\n self.draw_text(screen, \"Congratulations !\", 50, 250, 160, 0, 0, 0, True)\n else:\n self.draw_text(screen, \"You lost !\", 50, 250, 80, 0, 0, 0, True)\n self.draw_text(screen, \"Better luck next time !\", 50, 250, 160, 0, 0, 0, True)\n self.draw_text(screen, \"Moves : \" + str(self.nb_move), 40, 500, 10, 255, 255, 255, False)\n self.draw_text(screen, \"Shortcuts\", 40, 500, 40, 255, 255, 255, False)\n self.draw_text(screen, \"Restart : y\", 40, 500, 70, 255, 255, 255, False)\n self.draw_text(screen, \"Quit : n\", 40, 500, 100, 255, 255, 255, False)\n\n pygame.display.flip()\n while True:\n fpsclock.tick(FPS)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_y:\n self.reset()\n return False\n if event.key == pygame.K_n:\n self.exit()", "def end_stimulus(win,end_stim):\n #start core clock\n clock = core.Clock()\n\n #while space bar is not pressed continue to show end stimulus\n #if 50 seconds pass, then stop showing end stimulus\n end_stim.setAutoDraw(True)\n while not event.getKeys(['space']):\n win.flip()\n if int(clock.getTime()) > 50:\n break\n end_stim.setAutoDraw(False)", "def leaveGame(game, player): # is also called in register player if THE UNPROBABLE happens (e.g. there was a crash and bobby can't come in again)\n\t#check if player is in game and game exists, if the player is the creator close the game\n\tgame_key = game.key()\n\tplayer_key = player.key()\n\n\tif game != None and player != None:\t\t\t\n\t\tif game.creator.key() == player.key():\n\t\t\t#TODO: close game\n\n\t\t\tplayer.currentGame = None\n\t\t\tplayer.put()\n\n\t\t\tgame.status = 2\n\t\t\tgame.players.remove(player.key())\n\t\t\tgame.playerCount -= 1\n\t\t\tgame.put()\n\n\t\t\tlogging.info('Creator %s left game %s, game stopped'%(player_key,game_key))\n\t\t\tvalue = \"done\"\n\t\telif player.key() in game.players:\n\t\t\tplayer.currentGame = None\n\t\t\tplayer.put()\n\n\t\t\tgame.players.remove(player.key())\n\t\t\tgame.playerCount -= 1\n\t\t\tgame.put()\n\n\t\t\tlogging.info('Player %s left game %s, game has now %s players left'%(player_key,game_key,game.playerCount))\n\n\t\t\t#TODO: deal with the horrible aftermath\n\t\t\t#maybe if only 2 left start showdown, give 2 minutes then set marker in between them\n\t\t\tvalue = \"done\"\n\t\telse:\n\t\t\tlogging.error('Attempt to leave game %s by player %s failed, not in list apparently and not creator'%(game_key,player_key))\t\t\t\n\t\t\tvalue = \"error\"\t\t\n\telse:\n\t\tlogging.error('Attempt to leave game %s by player %s failed, no game or player'%(game_key,player_key))\t\t\t\n\t\tvalue = \"error\"\n\n\treturn value", "def updateComplete(self):\n self.livesScreen()\n if self.getWave().getLives() == 0:\n self.deathScreen()\n else:\n self.winScreen()", "def process_before_unload(self, chosen_position):\n if chosen_position == 1:\n # Go back to day screen\n return True\n # Chosen position == 0.\n # The user wants to play the game.\n # Is there enough day hours left to play?\n if cf.gs.game.current_day.day_hours < 1:\n # Alert: Not enough hours left.\n # Alert is: Message, Buttons, Callback_Function\n # Then return False to stay here.\n self.alert(\n ('Warning: There are not enough hours left to play!\\n'\n 'Try again tomorrow.'), ['OK'])\n return False\n elif cf.gs.game.current_day.day_hours >= 1:\n # Remove one hour from the day.\n # Then return True to leave here and go to the game.\n cf.gs.game.mod_hours(-1)\n return True", "def leave(self):\n self.pleaseQuit=1", "def endgame(self):\n #reveals the dealer's first card then the dealer hits until the dealer's hand's value is above 16\n self.dealer_hand[0].face_up()\n if self.dealer_hand[0].value in FACE_CARDS:\n self.dealer_value += 10\n elif self.dealer_hand[0].value == \"A\":\n self.dealer_value += 11\n self.dealer_ace_count += 1\n else:\n self.dealer_value += int(self.dealer_hand[0].value)\n\n if self.dealer_value > 21:\n if self.dealer_ace_count > self.dealer_almost_bust:\n #To prevent a Bust, the Dealer's Ace became a one\n self.dealer_value -= 10\n self.dealer_almost_bust += 1\n else:\n self.player_win()\n #House always wins Ties\n elif self.dealer_value == 21:\n self.player_lose()\n\n while self.dealer_value < 17:\n self.hit(\"dealer\")\n\n if (self.player_value - self.dealer_value) > 0:\n self.player_win()\n else:\n self.player_lose()", "def resign_game(self):\n if self._current_player == \"BLACK\":\n self._game_state = \"WHITE_WON\"\n\n else:\n self._game_state = \"BLACK_WON\"", "def play_game(self):\n self.welcome()\n while (self.winner is None) and (not self.exit_flag) and (not self.board.full()):\n self.play_round()\n self.exit_game()", "def game_over(screen, black, red, brown, player):\r\n screen.fill(black)\r\n\r\n f1 = pygame.font.Font(None, 102)\r\n s1 = f1.render(\"GAME OVER\", True, red)\r\n screen.blit(s1, [100, 100])\r\n\r\n f2 = pygame.font.Font(None, 70)\r\n s2 = f2.render(\"score: %d\" % player.score, True, brown)\r\n screen.blit(s2, [200, 240])\r\n\r\n pygame.display.update()\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n pygame.quit()\r\n sys.exit()", "def end_game(self, won=False):\n\n self.game_over = True\n if not won:\n self.message = \"Game Over\"\n else:\n self.message = \"Winner\"\n game_win = Win(\n user=self.user,\n date=datetime.utcnow(),\n won=True,\n attempts_used=self.attempts_used,\n score=self.score\n )\n game_win.put()\n self.put()\n # Add the game to the score 'board'", "def end_game(self, won=False):\n self.game_over = True\n self.put()\n # Add the game to the score 'board'\n score = Score(user=self.player, date=date.today(), won=won,\n bombs=len(self.player_bombs))\n score.put()", "def endMyTurn(self):\n try:\n result = self.game.server.endEmpireTurn(self.game.authKey)\n if result == 0:\n if self.game.myEmpire['roundComplete'] == 1:\n self.modeMsgBox('You have now un-ended your turn')\n self.game.myEmpire['roundComplete'] = 0\n else:\n self.modeMsgBox('Your turn has been ended, thankyou')\n self.game.myEmpire['roundComplete'] = 1\n self.mainmenu.writeTextRoundEnds()\n elif type(result) == types.StringType:\n self.modeMsgBox(result)\n else:\n \"\"\"End Turn and wait for it to end\"\"\"\n result = self.game.server.endRound(self.game.authKey)\n self.game.server.logout(self.game.authKey)\n from anw.modes.modelogin import ModeLogin\n newMode = ModeLogin(self.game, 200)\n self.game.enterMode(newMode)\n except:\n self.modeMsgBox('endMyTurn->Connection to Server Lost')", "def game_end(self):\n win, winner = self.has_a_winner()\n if win:\n return True, winner\n elif not len(self.availables):\n return True, -1\n return False, -1" ]
[ "0.79576516", "0.7893134", "0.7839257", "0.7556048", "0.7502281", "0.7455614", "0.74543214", "0.7444883", "0.7424693", "0.7401899", "0.7302308", "0.72807413", "0.72732544", "0.7265182", "0.7250664", "0.7242742", "0.72027653", "0.71485794", "0.714262", "0.7102258", "0.70943487", "0.70891726", "0.7055152", "0.7054914", "0.70455086", "0.70446324", "0.70349014", "0.70175797", "0.6994958", "0.69684297", "0.69678146", "0.69585013", "0.6929497", "0.69186604", "0.68867683", "0.6873515", "0.6867752", "0.6853899", "0.68522954", "0.6836176", "0.68297476", "0.6805486", "0.6800292", "0.6781847", "0.6776769", "0.6775794", "0.6774854", "0.6753432", "0.6728577", "0.6712757", "0.67057467", "0.67046946", "0.6677195", "0.667638", "0.66723925", "0.6633819", "0.6622364", "0.6607112", "0.66067463", "0.6606573", "0.66029716", "0.65887725", "0.6552795", "0.65526587", "0.6543233", "0.6533525", "0.65317136", "0.6527949", "0.6527438", "0.6512263", "0.65035313", "0.6496968", "0.6475135", "0.64719987", "0.64701295", "0.6466285", "0.64472437", "0.644498", "0.643556", "0.64271605", "0.64149743", "0.6412287", "0.64081883", "0.64000994", "0.6386906", "0.63852406", "0.6378759", "0.6375087", "0.6373244", "0.6366103", "0.6358414", "0.635528", "0.63536257", "0.6346913", "0.63406616", "0.63363206", "0.63343513", "0.6326969", "0.63240856", "0.6317462" ]
0.70578074
22
If the user presses the close button in the Title Bar This Function Closes the Game
def Quit(): for event in pygame.event.get(): if event.type == pygame.QUIT: return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def OnClose(self, event):\r\n pos.app.main.Exit()", "def end_game():\n pygame.quit()\n exit()", "def __window_close(self):\n pass", "def onBtnCloseClicked(self):\n self.close()", "def exit_game(root):\n root.destroy()", "def end_pygame(self):\n pygame.quit()", "def exit(self):\n if self.window:\n self.window.close()", "def close(self):\n self.state = False\n self.mainwindow.sendMessage('a')\n print(\"closing \" + self.name)", "def quit_game(self):\n pg.quit()\n sys.exit()", "def OnExit(self, event):\r\n self.Close(True)", "def close(self):\n self._screen = None\n pygame.display.quit()", "def endGame(self):\n pass", "def close_UI(self):", "def end_game(self):\n self.game.stop_running()", "def ev_windowclose(self, event: WindowEvent) -> None:", "def my_quit_function():\n pygame.quit()\n sys.exit()", "def quit(self):\n return pygame.event.Event(pygame.QUIT)", "def end_game(self, game_state: str):\n if game_state == \"win\":\n end_message = \"{0} wins! Would you like to play again?\".format(self.players[self.game.whose_turn])\n else:\n end_message = \"Cat's game! Would you like to play again?\"\n play_again = messagebox.askyesno(title='Game over', message=end_message)\n if play_again:\n self.game.reset_game_data()\n self.reset_game_ui()\n else:\n self.window.destroy()", "def OnExit(self, event):\n self.Close(True)", "def OnExit(self, event):\n self.Close(True)", "def OnExit(self, event):\n self.Close(True)", "def OnExit(self, event):\n self.Close(True)", "def close(self):\n #title()\n self.experiment.pause = True\n if self.running:\n self.running = False\n\n self._unregisterCallbacks()", "def quit(self):\n self._pygame.quit()", "def close(self):\n\n\t\tself._window.close()", "def leave(self):\n p = GameOverPopup(self)\n p.open()", "def _close(self, event):\n self.EndModal(wx.ID_OK)", "def exit_game(self):\n for i in range(1, 4, 1):\n self.ids['door' + str(i)].source = \\\n 'door_closed.jpg'\n self.ids['button' + str(i)].disabled = False\n setattr(self, 'door'+str(i)+'_counter', 0)\n self.manager.current = 'MainMenu'\n self.ids['score'].text = 'SCORE: 0'\n self.score = 0", "def received_CLOSING(self):\n\n\t\tself.player_frame.notify_rival_closing()\n\t\tself.player_frame.master.go_to_previous_screen(False)", "def close_menu_all(game_event):\n wire_menu.close()", "def window_close(self, item, e=None):\n\tif self.handler:\n self.handler.win_close(\"about\", None)\n\treturn 1", "def onCloseWindow(self, event):\r\n\r\n self.Destroy()", "def close(self):\n\n cv2.destroyWindow(winname=self.title)", "def close(self):\n self.window.destroy()\n self.buttons_window.destroy()", "def OnClose(self, event = None):\n ##Close.\n self.Hide()\n self.Destroy()", "def _onExit(self, event):\n self.Close(True)", "def close(event):\n event.widget.destroy()", "def close_menu(game_event):\n wire_menu.close(index_from_userid(game_event.get_int('userid')))", "def close(self):\n self.dismiss()\n screenmanager.change_to('main_menu')", "def quit_game(self):\n self.done = True", "def closeEvent(self,event):\r\n title = self.windowTitle()\r\n if title[-1] == \"*\":\r\n msg = 'Are you sure you want to close the Text Editor without Saving the file?'\r\n reply = QMessageBox.question(self, 'Window Close',msg,\r\n QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\r\n if reply == QMessageBox.Yes:\r\n sys.exit()\r\n else:\r\n event.ignore()", "def onQuit(self, event):\n\n\t\tself.onClose(None)", "def __onclosing(self):\n self.window.destroy()", "def play_game(self):\n self.window.destroy()\n Game()", "def __quit(self):\n self.clear_screen()\n self.__print_logo()\n print('\\n'*3)\n self.cool_print(\"THANKS FOR PLAYING!\")\n sleep(2)\n self.stop_game = True", "def landlord_button_close(self):\n return self.write({'state': 'close'})", "def Quit(self, event):\n pass", "def onExitButtonClicked(self, widget):\n self.getGtkTopObject().close()", "def end_game(self):\n pygame.event.clear()\n self.screen.fill(BLACK)\n self.show_on_screen(\"GAME OVER\", (500, 600), font_size=50)\n self.show_on_screen(\"Press \\\"N\\\" to start a new game\", (500, 650), font_size=30)\n self.show_on_screen(\"Press \\\"ESC\\\" to exit\", (500, 710), font_size=30)\n self.show_on_screen(\"SCORE: \" + str(self.score), (500, 560), font_size=50)\n pygame.display.flip()\n\n # clears previously pressed key\n pygame.event.wait()\n while True:\n event = pygame.event.wait()\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n pygame.quit()\n sys.exit()\n elif event.key == pygame.K_n:\n self.reset_lander()\n self.play()", "def on_closingWindow(self):\n if messagebox.askokcancel(\"Quit\",\n \"Do you want to quit grading the assignment?\\n File will be saved\",\n parent=window):\n self.submitAssignment()", "def OnClose(self, event):\n self._is_open = False\n wx.PostEvent(self, wxDockPaneClosedEvent())", "def Close_Event_By_X_Button():\r\n # Explain Of The Function #\r\n\r\n print(\"===========================================================================\")\r\n print(\"\\t\\t\\tThe User Press On - 'X' / 'Close' Button !\")\r\n print(\"===========================================================================\")\r\n\r\n sys.exit(0)\r\n pass", "def close_exit(self):\n self.close_video()\n sys.exit()", "def window_close():\n response = messagebox.askokcancel(title=\"Exit?\",\n message=\"Are you sure you want to close the program?\")\n if response == True:\n root.destroy() # Closes the window\n # Else: The program continues as normal", "def if_quit(self):\n answer = helper.quit_popup()\n if answer:\n self.parent.destroy()", "def OnClose(self, event):\n\t\tself.Show(False)", "def keyboard_end_game_control(self, app):\n mx, my = pg.mouse.get_pos()\n click = False\n\n game_view = self.get_view.game_view\n\n for event in pg.event.get():\n if event.type == pg.QUIT:\n pg.quit()\n sys.exit()\n\n if event.type == pg.MOUSEBUTTONDOWN:\n if event.button == 1:\n click = True\n\n if game_view.back_menu_button.collidepoint((mx, my)):\n if click:\n app.end_game_running = False", "def close(self):\n if(screen == self):\n screen = None", "def endGame(self, msg):\n title = \"Game Over\"\n QMessageBox.information(self, title, msg)\n self.reset()", "def closeEvent(self,event):\n\n try:\n if self.globalSession.running:\n event.ignore()\n return\n except:\n pass\n\n\n quit_msg = \"Are you sure you want to exit the program?\"\n reply = QtGui.QMessageBox.question(self, 'Message', \n quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)\n\n if reply == QtGui.QMessageBox.Yes:\n \n self.scanner.stopProcesses()\n self.globalSession.stopProgram = True\n\n try:\n self.globalSession.currentCapture.saveSession()\n self.statusTimer.stop()\n except AttributeError:\n pass # Gets thrown if no captures have been started when closing the program\n\n event.accept()\n else:\n event.ignore()", "def onClose (self):\n \n pass", "def quit(self):\n self.running = False\n pygame.quit()", "def closeEvent(self, event):\n log.info(\"Received window close event.\")\n self.main.app_is_exiting()\n super().closeEvent(event)\n return", "def quit_everything():\r\n \r\n pygame.quit()\r\n sys.exit()", "def handle_quit( self ):\n print \"bye!\"\n\n # really close the window\n return True", "def close(self):\n self.send(ActorExit)", "def close_pop_up_windows(self):\n self.button_click(self.DECLINE_BUTTON)\n self.button_click(self.CLOSE_POPUP_BUTTON)", "def _quit(self) -> None:\n self._show_bag(True)\n print(\"Thanks for playing!\")\n exit()", "def on_exit(self):\n if tkMessageBox.askyesno(\"Exit\", \"Do you want to quit the application?\"):\n self.logger(\"Program shutdown properly..\\n\")\n self.master.destroy()\n self.endCommand()", "def close_window(self):\n # Window - END\n self.root.destroy()", "def on_actionQuit_triggered(self):\n\t\texit()", "def event_handler(self, event):\n if event.type == pygame.QUIT:\n self.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n self.exit()", "def handle_close(self):\n self.active = False\n self.close()", "def force_close(self):\n\n\t\tself._window.force_close()", "def event_handler(self, event):\n if event.type == pygame.QUIT:\n # close window event\n self.exit()\n elif event.type == pygame.KEYDOWN:\n # keyboard event on press ESC\n if event.key == pygame.K_ESCAPE:\n self.exit()", "def event_handler(self, event):\n if event.type == pygame.QUIT:\n # close window event\n self.exit()\n elif event.type == pygame.KEYDOWN:\n # keyboard event on press ESC\n if event.key == pygame.K_ESCAPE:\n self.exit()", "def On_Exit(self):\n GUI_element.On_Exit(self)\n if self.generic_button:\n self.generic_button_text_object.Kill()", "def close(self):\n\n title = self.titleEntry.get()\n if not tkhelpers.isEmptyOrWhitespace(title):\n rating = self.ratingEntry.getValue()\n if rating is not None and rating <= 5 and rating >= 0:\n self.closeCallback(title, rating)\n self.master.destroy()\n else:\n tkinter.messagebox.showerror(\"Whoops?\", \"The rating field must contain a real number between 0 and 5.\")\n else:\n tkinter.messagebox.showerror(\"Whoops?\", \"The title field may be empty.\")", "def closeEvent(self, event):\n reply = QtWidgets.QMessageBox.question(\n self,\n 'Message',\n \"Are you sure to quit?\",\n QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,\n QtWidgets.QMessageBox.No)\n\n if reply == QtWidgets.QMessageBox.Yes:\n self.onclick_exit()\n event.accept()\n else:\n event.ignore()", "def OnFrameClose(self, event):\r\n\t\tself.Hide()", "def on_cleanup(self):\n\n pygame.quit()", "def leave_loose_game(self):\n self.update_json_file()\n self.end = True\n self.root.destroy()\n GameOver()", "def cb_close(self, *args):\n Gtk.main_quit()", "def terminate():\n pygame.quit()\n sys.exit(0)", "def close_window() -> bool:\n global WINDOW\n global IMPL\n global ACTIVE_CALLBACK\n try:\n cast(PygletRenderer, IMPL).shutdown()\n cast(pyglet.window.Window, WINDOW).close()\n\n WINDOW = None\n IMPL = None\n ACTIVE_CALLBACK = update\n return True\n except Exception as e:\n unrealsdk.Log(e)\n return False", "def quit(self):\n\n self.main_window.destroy()", "def endgame(winner):", "def terminate():\r\n pygame.quit()\r\n os._exit(1)", "def end_of_game(self):\n end_game = pyip.inputYesNo(f'\\nDo you want to play again?: ')\n\n if end_game == 'no':\n print('\\n-- GAME OVER --')\n sys.exit()\n elif end_game == 'yes':\n self.game_counter += 1", "def ev_windowclose(self, event: tcod.event.WindowEvent) -> T | None:", "def closeCurse():\n #On détruit les fenêtres curses\n curses.endwin()\n #On sort du script\n exit()", "def close(self):\n closeI1Display()", "def closeEvent(self, event):\n event.accept() # let the window close\n self.returnHome()", "def on_main_win_close(self):\n child_list = self.nb.winfo_children()\n for i in range(len(child_list)):\n index = self.nb.index('current')\n if self.get_current().edit_modified():\n ans = mb.askyesnocancel('Confirm on close', 'Do you want to save this file?')\n if ans:\n if self.save_file():\n self.tab_close(index, child_list)\n else:\n return\n elif ans is False:\n self.tab_close(index, child_list)\n else:\n return\n else:\n self.tab_close(index, child_list)\n self.main_window.destroy()", "def OnClose(self, event):\n self.Show(False)", "def onClose(self, event): \n \n self.Destroy()\n return", "def click_close_button(self):\n self.click_img(target_img=SETTINGS['img_paths']['buttons']['close'])", "def aboutClose(self):\r\n self.tlAbout.destroy()", "def game_exit(self):\n self.set_state(GameState.EXITING)\n self.game_stop()\n self.game_log_statistics()", "def close(self):\n curses.nocbreak()\n self.screen.keypad(0)\n curses.echo()\n curses.endwin()" ]
[ "0.78174365", "0.7448609", "0.73944855", "0.7382604", "0.7349683", "0.7257558", "0.7246158", "0.72404736", "0.72394633", "0.7183465", "0.71531284", "0.7136925", "0.7078766", "0.70734334", "0.70675", "0.7057641", "0.7057267", "0.70546734", "0.7045907", "0.7045907", "0.7045907", "0.7045907", "0.7039085", "0.70350575", "0.7024915", "0.70235556", "0.7013138", "0.7002207", "0.6998771", "0.699309", "0.698147", "0.69727695", "0.6966735", "0.6964825", "0.69614553", "0.6950238", "0.6945977", "0.69385713", "0.69254977", "0.6923534", "0.69153106", "0.690571", "0.68754154", "0.6860532", "0.6845323", "0.68342686", "0.6809719", "0.6808847", "0.6802955", "0.680191", "0.6777207", "0.67673254", "0.67442364", "0.6739615", "0.67375976", "0.67308706", "0.67283684", "0.6725762", "0.6719182", "0.67127556", "0.6709607", "0.6703337", "0.66976744", "0.66964877", "0.66958475", "0.66949224", "0.6693159", "0.669185", "0.66876304", "0.6682568", "0.6679199", "0.66640246", "0.6663502", "0.66582686", "0.6655885", "0.6655885", "0.6652902", "0.6652585", "0.66299605", "0.66283065", "0.66266006", "0.66180176", "0.6615806", "0.6595711", "0.6589546", "0.65851724", "0.6581729", "0.6579519", "0.6578857", "0.65772074", "0.65737796", "0.65658784", "0.6561005", "0.6559889", "0.6557172", "0.6556912", "0.65548176", "0.65530103", "0.6542735", "0.65419626" ]
0.7133421
12
Main_Game function which includes all the functions and commands to run the game Returns None.
def main_game(): #All the initial perimeters required to Run the Game Game_quit = False Loss = False foodx = round(random.randrange(50, dis_width - 50)) foody = round(random.randrange(50, dis_height - 50)) snake_Body = [] score = 4 redraw(dis) while not Game_quit: keys = pygame.key.get_pressed() Game_quit = Quit() if not snake.Start: StartScreen() if keys[pygame.K_RETURN]: snake.Start = True if Loss: EndScreen(score) if keys[pygame.K_RETURN]: main_game() elif keys[pygame.K_ESCAPE]: Game_quit = True snake.x = dis_width/2 snake.y = dis_height/2 #Playing Game...... while snake.Start and not Loss and not Game_quit : Clock.tick(20) Game_quit = Quit() Movement() snake.x += snake.x_change snake.y += snake.y_change if snake.x < 0 or snake.x > dis_width - snake.width or snake.y < 0 or snake.y > dis_height - snake.height: Loss = True #For Drawing Snake Body having different x and y with each part of snake snake_Part = [] snake_Part.append(snake.x) snake_Part.append(snake.y) snake_Part.append(Mid) snake_Part.append(snake.ang) snake_Body.append(snake_Part) if len(snake_Body) > score: del snake_Body[0] #For Drawing the Head of the snake snake_x = snake.x snake_y = snake.y snake_x += snake.x_change snake_y += snake.y_change headposx = snake_x headposy = snake_y #Collision Detection between head and body for track in snake_Body[:-1]: if track[0] == headposx and track[1] == headposy: Loss = True snake.draw_snake(dis, snake_Part, snake_Body) fooddraw(foodx, foody) d = Distance(foodx,foody) #Collision Detection between Snake and the Food if d < 40: foodx = round(random.randrange(50, dis_width - 50)) foody = round(random.randrange(50, dis_height - 50)) score += 1 scoredraw(score) redraw(dis)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\r\n gameclass = data.game.GameClass()\r\n gameclass.main_loop()", "def main():\n\n name, game = select_game(vgc.KNOWN_GAMES)\n print('---- Launching: %s -----'%name)\n game.game.main()\n sys.exit(0)", "def main():\n g = Game(800, 600)\n g.start()", "def main():\n if \"cli\" in sys.argv:\n run_cli_game()\n else:\n run_gui_game()", "def main():\n game = RiichiMahjongApp()\n game.run()", "def main():\n pygame.init()\n os.environ['SDL_VIDEO_CENTERED'] = '1'\n pygame.display.set_caption('8-Puzzle game')\n screen = pygame.display.set_mode((800, 500))\n fpsclock = pygame.time.Clock()\n program = SlidePuzzle((3, 3), 160, 5, difficulty=10) # program is also the gym environment\n\n choice = program.selectPlayerMenu(fpsclock, screen)\n if choice == \"AI\":\n pygame.display.quit()\n trainAI(program)\n elif choice == \"human\":\n launchWithGUI(program, fpsclock, screen)\n del program", "def main() -> None:\r\n game = advanced_game(MAP_FILE)\r\n\r\n root = tk.Tk()\r\n root.title('EndOfDayz')\r\n if TASK == 1:\r\n gui = BasicGraphicalInterface\r\n elif TASK == 2:\r\n gui = ImageGraphicalInterface\r\n # else:\r\n # gui = MastersGraphicalInterface\r\n app = gui(root, game.get_grid().get_size())\r\n app.play(game)\r\n root.mainloop()", "def main():\n game = Game(TIMES, HARDNESS)\n game.start()\n game.print_score()", "def main():\n g = DemoGame(800, 600)\n g.start()", "def main():\r\n\r\n #set the display, caption, and timer\r\n pygame.init()\r\n mainClock = pygame.time.Clock()\r\n windowSurface = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT), 0, 32)\r\n pygame.display.set_caption(\"Cat's Big Adventure\")\r\n\r\n #Display a menu, choose a level and instantiate a game\r\n display_menu(windowSurface)\r\n\r\n #initialize the game\r\n stats = [6]\r\n game = Game(stats)\r\n \r\n # run the game loop until the user quits\r\n while True:\r\n # Process events (keystrokes, mouse clicks, etc)\r\n game.process_events(windowSurface)\r\n\r\n # Update object positions, check for collisions\r\n game.run_logic()\r\n \r\n # Draw the current frame\r\n game.display_frame(windowSurface)\r\n\r\n #draw background image\r\n background_image = pygame.image.load(\"sky.png\").convert()\r\n windowSurface.blit(background_image, [0, 0])\r\n \r\n mainClock.tick(FRAMERATE)", "def main():\n args = get_parser().parse_args()\n players = prepare_game(\n decks_count=args.decks,\n auto_mode=args.auto_mode,\n player_one_name=args.name_player,\n players_count=args.players,\n )\n game(players=players)", "def main():\n boba_blast_game.main()", "def main():\n \n # load_and_initialize_func()\n\n loop_and_update_forever()\n\n pygame.quit()", "def main():\n #Initialize pygame\n pygame.init()\n\n #Set up the display and draw it to screen\n display = DisplayUpdater()\n display.generate_display()\n\n #Set up the audio player\n sound = AudioPlayer()\n\n sound.play_menu_music()\n #Set up the controls\n controls = PlayerInput()\n\n #Start off in the main menu, can go to credits, leaderboard, or game\n display.show_main_menu()\n user_input = controls.get_menu_input(sound)\n #While the user hasn't quit from the main menu\n while user_input != INPUT.ESCAPE:\n #If the player hits ENTER, launch the game\n if user_input == INPUT.ENTER:\n play_demon_music = game(display, sound, controls)\n sound.play_menu_music(play_demon_music)\n controls.clear_menu_input()\n user_input = INPUT.SPACE\n\n #If the player hits SPACE, go to the leaderboard\n if user_input == INPUT.SPACE:\n display.show_leaderboard()\n user_input = controls.get_menu_input(sound)\n #If the player hits C, go to the credits\n if user_input == INPUT.C:\n display.show_credits()\n user_input = controls.get_menu_input(sound)\n\n #If the player hits ESC, return to the main menu.\n #must be in own if statement so we don't quit\n if user_input in (INPUT.ESCAPE, INPUT.SPACE, INPUT.C):\n display.show_main_menu()\n user_input = controls.get_menu_input(sound)", "def main(argv):\n config_options = parse_config(CONFIG_FILE_NAME)\n arguments_options = parse_args(argv, **config_options)\n playgame.main(arguments_options)", "def main():\n\n # Create logging file, rotate if filesize exceeds 1MB\n logger.add(\"logs/{time}.log\", rotation=\"1 MB\")\n\n GameContainer()\n logger.info(\"Started the game launcher. Make sure to support pygame!\")", "def main():\n pygame.init()\n\n try:\n filename = sys.argv[1]\n except IndexError:\n usage()\n\n game = Game.from_file(filename)\n grid = game.get_grid()\n width, height = grid.get_width(), grid.get_height()\n win = pygame.display.set_mode((width*CELL_SIZE, height*CELL_SIZE))\n selected = 0 # default selected player\n select_player(selected)\n render(win, grid)\n\n while not (game.winning() or game.losing()):\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONUP:\n selected = handle_click_event(grid, selected)\n\n elif event.type == pygame.KEYUP and event.key in KEY_DIRECTIONS:\n game.next_step(selected, KEY_DIRECTIONS[event.key])\n render(win, grid)\n\n # QUIT\n elif event.type == pygame.QUIT:\n pygame.quit()\n\n if game.winning():\n text = \"You win!\"\n game_status = \"win\"\n elif game.losing():\n text = \"You lose!\"\n game_status = \"lose\"\n\n display_end_screen(win, text, game_status)", "def main():\n global lp\n global games, selected_game\n \n # Create a Launchpad instance and start it up\n lp = Launchpad()\n lp.open()\n lp.reset()\n \n populate_games()\n draw_main_menu()\n \n playing = True\n\n # Timekeeping\n last_time = 0\n anim_timer = 0\n\n while playing:\n # Get delta_time this loop\n cur_time = perf_counter()\n delta_time = cur_time - last_time\n last_time = cur_time\n\n # Get next input\n button = lp.button_state_xy()\n \n # Animate border\n anim_timer += delta_time\n if anim_timer > BORDER_ANIM_DELAY:\n anim_timer = 0\n draw_border()\n \n # Handle input (if any)\n if button:\n x = button[0]\n y = button[1]\n \n # If button pressed\n if button[2]:\n # Write red to pressed button if circle button\n if x == 8 or y == 0:\n lp.led_ctrl_xy(x, y, 3, 0)\n \n if (x, y) == BUT_LEFT:\n selected_game = (selected_game + 1) % len(games)\n draw_cover()\n \n if (x, y) == BUT_RIGHT:\n selected_game = (selected_game - 1) % len(games)\n draw_cover()\n \n # If button released\n if not button[2]:\n # Write menu color to released button if circle button\n if y == 0:\n lp.led_ctrl_xy(x, y, *menu_leds[x]) # Top row\n if x == 8:\n lp.led_ctrl_xy(x, y, *menu_leds[7 + y]) # Right column\n \n if (x, y) == BUT_START:\n lp.clear_input()\n lp.reset()\n games[selected_game].play()\n draw_main_menu()\n \n if (x, y) == BUT_QUIT:\n playing = False\n \n lp.draw()\n \n lp.reset()\n lp.close()", "def main():\r\n if check_argv():\r\n if len(sys.argv) == 3:\r\n gui = GUI(sys.argv[1], int(sys.argv[2]), True, ip=None)\r\n gui.create_board()\r\n gui.root.title(\"Server\")\r\n if not gui.is_human():\r\n gui.ai.find_legal_move(gui.game, gui.update_game)\r\n gui.run_game()\r\n elif len(sys.argv) == 4:\r\n ip = socket.gethostbyname(socket.gethostname())\r\n gui = GUI(sys.argv[1], int(sys.argv[2]), False, ip)\r\n gui.create_board()\r\n gui.root.title(\"Client\")\r\n if not gui.is_human():\r\n gui.ai.find_legal_move(gui.game, gui.update_game)\r\n gui.run_game()", "def main():\n even_game()", "def main():\n game_of_life(10, 20)", "def main():\n display, clock = game.init_pygame()\n highscores = HighScores(display, clock)\n highscores.run()", "def main():\n\n # Fix crackling audio\n util.set_environment('PULSE_LATENCY_MSEC', '60')\n\n # Replace launcher with game exe in proton arguments\n util.replace_command('FF9_Launcher.exe', 'x64/FF9.exe')", "def main():\n game = TinkerGame()\n game.setup()\n while game.calculate_points() > 0 and not game.game_over:\n game.play()\n game.end()", "def main():\n run_it = scene.Control()\n state_dict = {\"TITLE\" : title.Title(),\n \"INTRO\" : cutscene.Cutscene0(),\n \"GAMEPLAY\" : gameplay.gamePlay(),\n \"ENDING\" : cutscene.Cutscene1()\n }\n run_it.setup_states(state_dict, \"TITLE\")\n run_it.main()", "def start_game(self):\n\n\t\tpass", "def main():\n play_game(progression)", "def main():\n global levels\n difficulty = select_difficulty()\n start_game(difficulty)", "def main():\n secret_word = get_word()\n play_game(secret_word)", "def main():\n field = Field(10, 10)\n snake = Snake((0, 0))\n game = Game(field, snake)\n game.start()", "def run_gui_game():\n # Set up game\n view = GuiView()\n game = GameEngine(view)", "def game_main():\n DebugMessage(\"\"\"def:game_main\"\"\")\n gamelevels = (list(GameLevels.__members__))\n\n # Loop though game menu till user quits\n spacing = 13\n while True:\n print(gui_bar)\n print(\" \" * spacing + \"FILL IN THE BLANKS GAME\")\n print(gui_bar)\n\n selection = menu_selection(\"Select a difficulty level:\", gamelevels)\n game_session(selection)", "def setup_game(self):", "def main(self):\n _age = info.getInfo(self)\n _flag = game.check_age(self, _age)\n if _flag == False:\n exit()\n game.wants_to_play(0)", "def main():\n initialize()\n inputs = InputsTemp()\n\n ui_font = pygame.font.SysFont(\"Comic Sans MS\", 50)\n\n assets_library = AssetsLibrary((Path(__file__).parent / \"Assets\"))\n\n # todo: create display class to wrap display from pygame\n window = setup_display(inputs.width_height)\n\n background_img = assets_library.assets.bg_black\n\n run = True\n FPS = 60\n lives = 5\n level = 1\n clock = pygame.time.Clock()\n\n ui_margin = {\n \"left\": 10,\n \"right\": 10,\n \"top\": 10,\n \"bottom\": 10,\n }\n\n def redraw_window():\n window.blit(background_img.get_image(inputs.width_height), (0, 0))\n\n lives_label = ui_font.render(f\"lives: {lives}\", 1, (255, 255, 255))\n level_label = ui_font.render(f\"level: {level}\", 1, (255, 255, 255))\n\n window.blit(lives_label, (ui_margin[\"left\"], ui_margin[\"top\"]))\n window.blit(level_label, (inputs.width_height[0] - level_label.get_width() - ui_margin[\"right\"], ui_margin[\"top\"]))\n pygame.display.update()\n\n while run:\n clock.tick(FPS)\n\n redraw_window()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n\n print(\"Game ended\")", "def main(**kwargs):\n print('Start')\n agent = initAgent(**kwargs)\n kwargs['agent'] = agent\n result = []\n\n def mainsub(*args):\n game = Game(**kwargs)\n game.display(kwargs['noshow'])\n while True:\n # get_input = getch(\"Enter direction (w/a/s/d): \")\n get_input = game.action()\n if get_input in keypad:\n game.move(keypad.index(get_input))\n game.update()\n # elif get_input == \"q\":\n # break\n # else:\n # print(\"\\nInvalid choice.\")\n # continue\n if game.end:\n game.savegame()\n game.display(kwargs['noshow'])\n print(\"Result:\", game.nturn, game.score)\n break\n game.display(kwargs['noshow'])\n result.append((game.score, game.nturn))\n game.agent.replay()\n if kwargs['train']:\n game.agent.save()\n game.reset()\n if kwargs['train']:\n np.save('result.%s' % game.agent.algo, np.array(result))\n\n map(mainsub, range(kwargs['n']))\n print(\"Thanks for playing.\")", "def play_game():\n pass", "def main():\n game = Blackjack()\n game.play()", "def main():\n\n # This is for text mode.\n\n if len(sys.argv) == 2 and sys.argv[1] == '-t':\n model.main()\n sys.exit(0)\n\n # Do initialization.\n\n pygame.init()\n screen = pygame.display.set_mode(DISPLAY_MODE)\n pygame.display.set_caption(TITLE)\n clock = pygame.time.Clock()\n background = pygame.Surface(screen.get_size()).convert()\n background.fill(BACKGROUND)\n pygame.display.flip()\n\n game_model = model.Game()\n board_view = view.Board(game_model)\n score_board = view.ScoreBoard(game_model)\n rendering_groups = [board_view, score_board]\n\n while True:\n\n clock.tick(FRAMES_PER_SEC)\n scheduler.tick()\n\n # Handle user input.\n\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key in (K_ESCAPE, K_q) or event.type == QUIT:\n sys.exit(0)\n elif event.key == K_h:\n url = \"file://\" + os.path.abspath(data.find(\"help.html\"))\n webbrowser.open(url, new=True)\n elif event.key == K_r:\n game_model.reset()\n elif event.type == MOUSEBUTTONDOWN:\n for square_view in board_view:\n if square_view.rect.collidepoint(*pygame.mouse.get_pos()):\n xyz = square_view.square_model.xyz\n try:\n game_model.move(xyz)\n except ValueError:\n pass\n break\n\n # Provide the simulation and render it.\n\n for i in rendering_groups:\n i.update()\n i.clear(screen, background)\n pygame.display.update(i.draw(screen))", "def run_game_logic(self):\n pass", "def main():\n\n # after first round it will ask if want to change word list\n first_round = True\n\n # 1.\n print_game_logo()\n\n # will break out of loop when the player wouldn't want another round\n while True:\n if not first_round:\n if get_yes_no(\"Would you like to switch to a different word-list?\"):\n # 2.1.\n print_game_logo()\n word_list_path = change_word_list()\n else:\n sys_comment(\"Playing with the same word-list\")\n else:\n # 2.1.\n word_list_path = change_word_list()\n first_round = False\n\n # 2.2.\n secret_word = change_secret_word(word_list_path)\n\n # starting the game\n hangman(secret_word)\n\n # finished the game - ask if want another round\n if get_yes_no(\"Would you like to play another game?\"):\n sys_comment(\"Starting another game\")\n else:\n sys_comment(\"Quitting\")\n break\n\n return None", "def main():\n run_it = tools.Control(prepare.ORIGINAL_CAPTION)\n state_dict = {\"SPLASH\" : splash.Splash(),\n \"MENU\" : menu.Menu(),\n \"DEMO\" : demo.Demo(),\n \"GAME\" : game.Game()}\n run_it.setup_states(state_dict, \"SPLASH\")\n run_it.main()", "def main():\r\n app = application.Application()\r\n app.initializeLayer(menu.Menu())\r\n app.run()", "def game_main_loop():\n game_quit = False\n\n while not game_quit:\n #TODO get player input\n events_list = pygame.event.get()\n\n #TODO process input\n for event in events_list:\n if event.type == pygame.QUIT:\n game_quit = True\n draw_game()\n\n pygame.quit()\n exit()", "def main():\r\n # Initialize words from specific file\r\n words_list = hangman_helper.load_words()\r\n # Run single game with given word list to choose from\r\n run_single_game(words_list)\r\n # Ask the user if he would like to play again\r\n request = hangman_helper.get_input()\r\n if request[INPUT_TYPE] == hangman_helper.PLAY_AGAIN:\r\n if request[INPUT_VALUE]:\r\n run_single_game(words_list)", "def run_game(self) -> None:\n decision = 0\n if self._initial:\n self._initial = False\n while decision != 1:\n try:\n display_no_combat_init(self.hero)\n decision = get_user_input([1, 2, -1])\n if decision == -1:\n self._quit()\n elif decision == 2:\n self._show_bag()\n else:\n break\n except KeyboardInterrupt:\n print(\"[!] If you want to quit, use the provided user interface\")\n\n while not self.hero.is_dead:\n try:\n self._load_map()\n except KeyboardInterrupt:\n print(\"[!] If you want to quit, use the provided user interface\")", "async def main(self):\n\n # Fetching the game memory\n memory = await self._read_memory()\n\n if not memory:\n log.warn(\"Could not find GD. Program will wait until it is found.\")\n gdsb.wait_for_gd()\n return\n\n if memory.is_in_level():\n\n if memory.get_level_id() != self.previous_level_id:\n await self.seal_embed()\n\n # Fetch gamestate data\n self.current_level = await self._fetch_level_info()\n \n # Gets wether or not the level being played is the exact same level that was played just previously, so that it will continue to use the same embed\n if self.previous_level_id != self.current_level.id:\n\n self.embed_message = None\n self.previous_embed_message = None\n\n self.session.start_attempts = self.current_level.attempts\n self.session.old_best = self.current_level.best_percent\n self.session.best = 0\n\n else:\n\n if self.previous_embed_message is not None:\n self.embed_message = self.previous_embed_message\n\n self.previous_level_id = self.current_level.id\n\n # So for some reason, the main levels all have their creator blank, so we just set it to RobTop\n if self.current_level.creator == \"\":\n self.current_level.creator = \"RobTop\"\n\n # Getting if you are playing a main level or not. If so, we have to manually set the difficulty using the list I made earlier\n if self.current_level.id in range(1,22):\n \n self.current_level.difficulty = const.MAIN_LEVEL_DIFFICULTIES[self.current_level.id-1]\n\n # Checks if the player is in practice mode or not. If they are, it will display a different color\n if self.current_level.is_practice_mode():\n title = \"Practicing: {0}\"\n color = discord.Color.from_rgb(59, 223, 245)\n else:\n title = \"Playing: {0}\"\n color = discord.Color.from_rgb(18, 219, 31)\n\n # A few little extra texts that go next to the title\n extra_text = \"\"\n if self.current_level.percent == 100:\n\n if self.current_level.is_practice_mode():\n extra_text=\" - PRACTICE COMPLETE!\"\n else:\n extra_text=\" - LEVEL COMPLETE!\"\n color = discord.Color.from_rgb(237, 220, 28)\n\n elif self.current_level.best_percent > self.session.old_best:\n extra_text = \" - New Best!\"\n self.session.old_best = self.current_level.best_percent\n\n # Saving the best percent of the session\n if self.current_level.percent > self.session.best and not self.current_level.is_practice_mode():\n self.session.best = self.current_level.percent\n\n # Calculating the current attempts on a level\n self.current_level.attempts = (self.current_level.attempts - self.session.start_attempts) + 1\n\n rating_text = self._get_rating_text()\n category = self._get_category_text()\n\n self.embed.title = title.format(self.current_level.name)\n self.embed.description = f\"By {' | '.join((self.current_level.creator, rating_text, category))}\"\n self.embed.color = color\n\n self.embed.set_thumbnail(url=const.FACES[const.DIFFICULTIES.index(self.current_level.difficulty)])\n\n # Getting user\n user = self.bot.get_user(conf.user)\n\n self.embed.set_author(name=user.display_name, icon_url=user.avatar_url)\n\n progress_bar_state = self._get_progress_bar(self.current_level.percent)\n\n fields = (\n {\"name\": \"Attempt:\", \"value\": self.current_level.attempts, \"inline\": True},\n {\"name\": \"Best %:\", \"value\": f\"{self.current_level.best_percent}%\", \"inline\": True},\n {\"name\": \"Current Progress:\", \"value\": f\"{self.current_level.percent}%{extra_text}\\n{progress_bar_state}\", \"inline\": False}\n )\n\n for i, field in enumerate(fields):\n\n if len(self.embed.fields) < len(fields):\n self.embed.add_field(**field)\n else:\n self.embed.set_field_at(i, **field)\n \n self.embed.set_footer(text=\"Level ID: {0}\".format(self.current_level.id))\n \n # Sending embed\n\n channel = self.bot.get_channel(conf.channel)\n\n if not channel:\n log.error(f\"Could not find channel with id: {conf.channel}. Use '{conf.prefix}set_channel' to set the channel.\")\n else:\n #If the channel is found, edit the message the embed has been sent to, and if it dosent exist, create it.\n if self.embed_message is None:\n self.embed_message = await channel.send(embed=self.embed)\n else:\n await self.embed_message.edit(embed=self.embed)\n \n else:\n\n if memory:\n await self.seal_embed()\n\n #Sets some globals so that the embed can be reused if the same level is played again\n self.previous_embed_message = self.embed_message\n self.embed_message = None", "def main(cls):\n parser = optparse.OptionParser()\n parser.add_option('-c', '--columns', type=\"int\", default=16)\n parser.add_option('-r', '--rows', type=\"int\", default=16)\n parser.add_option('-m', '--mines-density', type=\"float\", default=0.2,\n help=\"percent of mines: 0.15 is trivial, 0.2 good [default], 0.25 hard\")\n (options, args) = parser.parse_args()\n if args:\n parser.error(\"unexpected arguments: \" + \" \".join(args))\n \n game = cls(options.columns, options.rows, options.mines_density)\n game.window.mainloop()", "def main():\r\n\r\n pygame.init()\r\n pygame.display.init()\r\n\r\n # Set the pygame clock\r\n clock = pygame.time.Clock()\r\n\r\n pygame.display.set_caption(\"Blackbox game\")\r\n current_game = BlackBoxGame()\r\n clock = pygame.time.Clock()\r\n\r\n while True:\r\n current_game.check_events()\r\n clock.tick(60)\r\n current_game.update_screen()\r\n\r\n pygame.quit()", "def start_menu():\n\n # Print main menu.\n print \"-\" * 8 + \"Main Menu\" + \"-\" * 8\n print \"1. Start Game\"\n print \"2. Instructions\"\n print \"3. Credits\"\n print \"4. Extensions\"\n print \"5. Quit\"\n choice = valid(\"\\nWhat would you like to do? \", 1, 5)\n\n if choice == 1: # Choose game settings.\n\n # Setup the game.\n print \"\\n\" + \"-\" * 8 + \"Select Game Type\" + \"-\" * 8\n print \"1. Quick Game\"\n print \"2. Manual\"\n game_type = valid(\"\\nSelect your game type: \", 1, 2)\n\n if game_type == 1:\n size = 10\n elif game_type == 2:\n size = valid(\"\\nSelect a board size from 2 to 100000: \", 2, 100000)\n\n # Select Player 1.\n print \"\\n\" + \"-\" * 8 + \"Select Player\" + \"-\" * 8\n print \"1. Human\"\n print \"2. Computer\"\n hero_type = valid(\"\\nSelect player type: \", 1, 2)\n\n if hero_type == 1: # Input human player name.\n name1 = raw_input(\"\\nWhat is Player 1's name: \")\n player1 = player.Player(name1, size)\n elif hero_type == 2: # Select AI type.\n print \"\\n\" + \"-\" * 8 + \"Select AI\" + \"-\" * 8\n print \"1. Random\"\n print \"2. Random+\"\n print \"3. SmartRandom\"\n name1 = valid(\"\\nChoose the AI: \", 1, 3)\n player1 = player.Ai(name1, size)\n\n # Select Player 2.\n print \"\\n\" + \"-\" * 8 + \"Select Opponent\" + \"-\" * 8\n print \"1. Human\"\n print \"2. Computer\"\n opp_type = valid(\"\\nSelect your oppenent: \", 1, 2)\n\n if opp_type == 1: # Input human player name.\n name2 = raw_input(\"\\nWhat is Player 2's name: \")\n player2 = player.Player(name2, size)\n elif opp_type == 2: # Select AI type.\n print \"\\n\" + \"-\" * 8 + \"Select AI\" + \"-\" * 8\n print \"1. Random\"\n print \"2. Random+\"\n print \"3. SmartRandom\"\n name2 = valid(\"\\nChoose the AI: \", 1, 3)\n player2 = player.Ai(name2, size)\n\n if game_type == 1: # Basic game with random placement.\n player1.board.random_board(1, 2, 2, 1)\n player2.board.random_board(1, 2, 2, 1)\n elif game_type == 2:\n n = _fleet(size) # Helper function asks for the size of the fleet.\n\n # Select type of placement.\n print \"\\n\" + \"-\" * 8 + \"Select Type of Placement\" + \"-\" * 8\n print \"1. Random\"\n print \"2. Manual\"\n place = valid(\"\\nChoose placement: \", 1, 2)\n\n if place == 1 or hero_type == 2: # Random placement for all.\n player1.board.random_board(n[0], n[1], n[2], n[3])\n player2.board.random_board(n[0], n[1], n[2], n[3])\n elif place == 2: # Manual placement.\n if opp_type == 2: # Random placement of computer ships.\n player2.board.random_board(n[0], n[1], n[2], n[3])\n else:\n\n # Ask for manual placement of Player 1\"s ships.\n print \"*\" * 50\n print \"\\n%s, please place your ships.\\n\" % name1\n print \"*\" * 50\n _place(player1, size, 2, n[0])\n _place(player1, size, 3, n[1])\n _place(player1, size, 4, n[2])\n _place(player1, size, 5, n[3])\n\n # Switch players for placement.\n print \"\\n%s and %s, please switch.\" % (name1, name2)\n print \"Press Enter to continue. \"\n cont()\n\n # Asks for manual placement of Player 2\"s ships.\n print \"*\" * 50\n print \"\\n%s, please place your ships.\\n\" % name2\n print \"*\" * 50\n _place(player2, size, 2, n[0])\n _place(player2, size, 3, n[1])\n _place(player2, size, 4, n[2])\n _place(player2, size, 5, n[3])\n\n run_game(player1, player2) # Play the game!\n\n elif choice == 2: # Read the instructions.\n print_file(\"./media/instructions.txt\", True)\n start_menu()\n elif choice == 3: # Read the credits.\n print_file(\"./media/credits.txt\", True)\n start_menu()\n elif choice == 4: # Read about the extensions.\n print_file(\"extensions.txt\", True)\n start_menu()\n elif choice == 5: # Quit the game.\n quit()\n print \"\"\n start_menu()", "def main():\n global CURRENT_GAME_LEVEL\n app = QtGui.QApplication(sys.argv)\n\n file_existence = os.path.exists(\"Level.txt\")\n\n # If file exist read level from file to restore previous level.\n if file_existence is True:\n file_object = open(\"Level.txt\", \"r\")\n level = int(file_object.read())\n file_object.close()\n # If file doesn't exist, assume default level as beginner and start game.\n else:\n file_object = open(\"Level.txt\", \"w\")\n level = DifficultyLevel.BeginnerLevel\n file_object.write(str(level))\n file_object.close()\n\n # save current game level in global which can be used by others.\n CURRENT_GAME_LEVEL = level\n (length, width, minecount) = get_grid_size(level)\n\n GameUI(length, width, minecount)\n sys.exit(app.exec_())", "def main():\n\n # Init pygame\n pygame.init()\n screen = pygame.display.set_mode((500, 310))\n pygame.display.set_caption(\"Black Jack by Hackiflette\")\n\n # Load background image\n bgd_tile = load_image(\"background_menu.png\")\n background = pygame.Surface((500, 310))\n background.blit(bgd_tile, (0, 0))\n\n # Prepare text\n title_font = pygame.font.Font(None, 36)\n text = title_font.render(\"Black Jack Project\", 2, (255, 255, 255))\n\n # Display on windows\n screen.blit(background, (0, 0))\n screen.blit(text, (80, 30))\n pygame.display.flip()\n\n # Init sprites\n all_sprites = pygame.sprite.RenderUpdates()\n clock = pygame.time.Clock()\n\n play = True\n while play:\n\n # Clear all the sprites\n all_sprites.clear(screen, bgd_tile)\n all_sprites.update()\n\n # Check for events\n for event in pygame.event.get():\n if event.type == QUIT:\n play = False\n\n # Update the scene\n dirty = all_sprites.draw(screen)\n pygame.display.update(dirty)\n\n clock.tick(40)\n\n pygame.quit()", "def play_game():\n pass", "def basic_begin_game(game_context) :\n game_context.world.set_game_defined()\n execute_context(game_context)", "def main():\n #Check any hardward you are using is present and initialise it here\n print(\"Initialising hardware\")\n \n #Use a try..finally structure so that the program exits gracefully on hitting any\n #errors in the callback functions and cleans up any hardware you are using.\n try:\n cnt = RobotController(\"Game Controller Template Program\", initStatus,\n leftTriggerChanged = analogueTriggerChangeHandler,\n leftStickChanged = analogueStickChangeHandler,\n hatChanged = hatHandler,\n startBtnChanged = btnHandler,\n triangleBtnChanged = triangleBtnHandler\n )\n \n #Check if the controller class initialised successfully\n if cnt.initialised :\n keepRunning = True\n #You can put any code here you want to use to indicate that a supported game\n #controller is connected and the program is ready. (e.g. Flash some LEDs green)\n else:\n keepRunning = False\n #You can put any code here you want to use to indicate that no supported game\n #controller was detected (the program will exit afterwards). e.g. Flash some LEDs red\n \n # -------- Main Program Loop -----------\n while keepRunning == True :\n #You have to call the controllerStatus function in a loop, as this is what\n #triggers any callback functions for controller state changes. It also checks for\n #the quit event (occurs when the pygame window is closed by a user), and returns\n #False if this happens. So the return value is used to exit the loop on quit.\n keepRunning = cnt.controllerStatus()\n \n finally:\n #Clean up pygame and any hardware you may be using\n #(e.g. Turn off LEDs and power down motors)\n pygame.quit()\n if cnt.initialised :\n #Put any clean up code for your program here\n print(\"Cleaning up\")", "def main():\n game = Hangman()\n game.play_hangman()", "def main():\n # Add your main code here\n display_menu()\n pass", "def main():\n global TURRET\n os.environ['SDL_VIDEO_CENTERED'] = '1'\n pg.init()\n pg.display.set_caption(CAPTION)\n pg.display.set_mode(SCREEN_SIZE)\n TURRET = pg.image.load(\"turret.png\").convert()\n TURRET.set_colorkey(COLOR_KEY)\n Control().main_loop()\n pg.quit()\n sys.exit()", "def game_initialize():\n\tglobal SURFACE_MAIN, SURFACE_MAP, PLAYER, FOV_CALCULATE, CLOCK, ASSETS, CAMERA, PREFERENCES\n\n\tpygame.init()\n\tpygame.key.set_repeat(555, 85)\t\n\n\tlibtcod.namegen_parse(\"data\\\\namegen\\\\jice_celtic.cfg\")\n\tlibtcod.namegen_parse(\"data\\\\namegen\\\\jice_fantasy.cfg\")\n\tlibtcod.namegen_parse(\"data\\\\namegen\\\\jice_mesopotamian.cfg\")\n\tlibtcod.namegen_parse(\"data\\\\namegen\\\\jice_norse.cfg\")\n\tlibtcod.namegen_parse(\"data\\\\namegen\\\\jice_region.cfg\")\n\tlibtcod.namegen_parse(\"data\\\\namegen\\\\jice_town.cfg\")\n\tlibtcod.namegen_parse(\"data\\\\namegen\\\\mingos_demon.cfg\")\n\tlibtcod.namegen_parse(\"data\\\\namegen\\\\mingos_dwarf.cfg\")\n\tlibtcod.namegen_parse(\"data\\\\namegen\\\\mingos_norse.cfg\")\n\tlibtcod.namegen_parse(\"data\\\\namegen\\\\mingos_standard.cfg\")\n\tlibtcod.namegen_parse(\"data\\\\namegen\\\\mingos_town.cfg\")\n\n\tCLOCK = pygame.time.Clock()\n\n\tCAMERA = obj_Camera()\n\n\tSURFACE_MAIN = pygame.display.set_mode((CAMERA.width, CAMERA.height))\n\n\tSURFACE_MAP = pygame.Surface((constants.GAME_WIDTH, constants.GAME_HEIGHT))\n\n\tFOV_CALCULATE = True\n\ttry:\n\t\tload_preferences()\n\t\tprint(\"Preferences Loaded\")\n\texcept:\n\t\tPREFERENCES = struc_Preferences()\n\t\tprint(\"Preferences generated\")\n\tASSETS = struc_Assets()", "def main():\n \n games = 'chess simon puzzle chess go slide go sudoku snake'.split()\n gi = 0\n game = games[gi]\n board = set_game(game)\n board.keys[K_t] = test\n \n while board.active:\n for event in pygame.event.get():\n board.do_event(event)\n if event.type == KEYDOWN:\n if event.key == K_g:\n gi = (gi + 1) % len(games)\n board = set_game(games[gi])\n \n board.update()\n \n pygame.quit()", "def play_game():\n\n _initial_deal()\n\n main_window.mainloop()", "def main():\n winning_score = 100\n counter = 1\n game_state_list = []\n\n # Enable command-line arguments\n parser = argparse.ArgumentParser()\n # Add command-line argmuemnt\n parser.add_argument('--numPlayers', type=int)\n args = parser.parse_args()\n\n # Get number of games from user input\n num_of_games = input_int(\"How many games do you want to play?: \")\n\n # Get number of players in each game\n for x in range(num_of_games):\n # Note. Use this commented code below if you want to also let the user define the number of players in each game\n #game_state_list.append((Game(6) ,(input_int(\"How many players in Game {}?: \".format((x + 1))))))\n\n # list of tuples (Game class instnace, num_of_plauyers)\n game_state_list.append((Game(6) ,args.numPlayers))\n\n # Play all games. Note that the games are not aware of each other\n for game_state, num_users in game_state_list:\n print \"\\nStarting Game\",counter\n game_loop(game_state,num_users,winning_score)\n counter += 1\n\n print \"Completed all the games!\"", "def main():\n Main()", "def start_game(self):\n self.board = Board(num_tableaus=self.tableau_qty, num_decks=self.decks, deal_3=self.deal_3)\n self.board.init_move_dict()\n self.board.deal(self.deck)\n\n if self.api_use:\n self.init_game_api()\n elif self.commandline:\n self.init_cl_game()\n else:\n self.init_pygame()", "def main():\n dealCards().mainloop()", "def load_game(self):\n game = Game(self.w, self.h, self.screen)\n game.run()", "def main():\n\tGame = TicTacToe()\n\tprint(\"Welcome to Tic-Tac-Toe\")\n\twhile True:\n\t\tprint(\"Player%d, take your move.\" % Game.turn)\n\t\trow = int(input(\"Enter row of move... \"))\n\t\tcol = int(input(\"Enter col of move... \"))\n\t\tGame.move(Game.turn, row, col)\n\t\tGame.printBoard()\n\t\tif Game.win:\n\t\t\trestart = int(input(\"Enter 1 to restart the game, 0 to end game... \"))\n\t\t\tif restart == 1:\n\t\t\t\tGame.restartGame()\n\t\t\telse:\n\t\t\t\tprint(\"Closing Tic-Tac-Toe Game...\")\n\t\t\t\treturn", "def main():\n\n window = ArcadeButWithStuff(screen_h=920, screen_w=1080)\n\n window.setup()\n arcade.run()", "def run_game():\n\n # Initialize pygame, settings, and screen object\n pygame.init()\n ai_settings = Settings()\n screen = pygame.display.set_mode((ai_settings.screen_width,\n ai_settings.screen_height))\n pygame.display.set_caption(\"Galaga\")\n\n # Make a Play button\n play_button = Button(screen, \"Play\")\n\n # Create an instance to store game statistics\n stats = GameStats(ai_settings)\n\n # Make a scoreboard\n scoreboard = Scoreboard(ai_settings, screen, stats)\n\n # Make a Ship, bullet group, alien group\n ship = Ship(ai_settings, screen)\n bullets = Group()\n aliens = Group()\n\n # Creating an enemy fleet of aliens\n gf.create_fleet(ai_settings, screen, ship, aliens)\n\n # Start the main loop for the game\n while True:\n # Checking for keyboard events \n gf.check_events(ai_settings, screen, stats, scoreboard,\n play_button, ship, aliens, bullets)\n\n if stats.game_active:\n # Update group objects\n ship.update()\n\n gf.update_bullets(ai_settings, screen, stats,\n scoreboard, ship, aliens, bullets)\n gf.update_aliens(ai_settings, screen, stats, scoreboard, ship, aliens, bullets)\n \n gf.update_screen(ai_settings, screen, stats,\n scoreboard, ship, aliens, bullets, play_button)", "def main() -> None:\n # the current game is initialized with 1, 3, 5, 7 matches on the 4 rows.\n game: List[int] = [1, 3, 5, 7]\n\n print(\"\\nGame of Nim\")\n print( \"===========\")\n display_game(game)\n start = input(\"Do you want to start? (y/n) \")\n print()\n if start==\"y\" or start==\"Y\":\n print(\"Your turn\")\n user_turn(game)\n display_game(game)\n while True:\n print(\"My turn\")\n computer_turn(game)\n display_game(game)\n if is_finished(game):\n print(\"I WON\\n\")\n break\n print(\"Your turn\")\n user_turn(game)\n display_game(game)\n if is_finished(game):\n print(\"YOU WON\\n\")\n break", "def Gameloop():", "def main():\n if len(sys.argv) < 3:\n print(\"2 arguments are required: input png path and turn [white | black]. Optional: chess AI think time expressed in seconds, oppponent skill level [0 - 20]\")\n return\n\n png_path = sys.argv[1]\n turn = sys.argv[2].lower()\n\n if len(sys.argv) < 4:\n think_time = 1.0\n else:\n try:\n think_time = float(sys.argv[3])\n if think_time <= 0:\n raise ValueError()\n except:\n print(\"Think time must be a positive number\")\n return\n\n if len(sys.argv) < 5:\n opponent_skill = 20.0\n else:\n try:\n opponent_skill = float(sys.argv[4])\n if opponent_skill < 0 or opponent_skill > 20:\n raise ValueError\n except:\n print(\"Opponent skill must be a number between 0 and 20\")\n return\n\n if not png_path.lower().endswith(\".png\"):\n print(\"Invalid png path!\")\n return\n\n if turn != \"white\" and turn != \"black\":\n print(\"Turn must be 'white' or 'black'\")\n return\n\n print(\"Reading board state from image...\")\n chess_board = board_from_png(png_path)\n print(\"Done! Opening GUI...\")\n solve_chess_problem(chess_board, turn == \"white\", think_time, opponent_skill)", "def main():\n\n\tif len(sys.argv) > 1 and sys.argv[1]:\n\t\t_, _, hash = read_file(sys.argv[1])\n\t\toffset_x = 0\n\t\toffset_y = 0\n\telse:\n\t\toffset_x, offset_y, hash = screenshot()\n\n\tprint(hash)\n\tgame = eliza_logic.Game(0)\n\tgame.exact_setup(hash)\n\tprint(game)\n\tresult = game.global_solve(-1)\n\tprint(result)\n\n\t# If it was a screen grab, we can actually do this -- just type n/q/c to quit or anything else to continue\n\tif result is not None and offset_x and offset_y:\n\t\tx = six.moves.input(\"Ready for automated solution? \")\n\t\tif x.lower() in [\"n\", \"q\", \"c\"]:\n\t\t\treturn\n\n\t\texecute_solution(offset_x, offset_y, result)", "def main():\n doctest.testmod()\n game()", "def main(self,Surf):\n while True:\n if self.state == \"GAME\":\n self.event_loop()\n self.update(Surf)\n elif self.state == \"QUIT\":\n break\n pg.display.update()\n self.Clock.tick(65)", "def main():\n arcade.open_window(WINDOW_WIDTH, WINDOW_HEIGHT, \"Snake.exe\")\n # Set the window background colour\n arcade.set_background_color(light_green)\n\n # Calls the on_draw method every 1/3(20 seconds) of a second\n arcade.schedule(on_draw, 1/3)\n # Keeps the window open until closed by the user\n arcade.run()", "def start_game(self):\n print(\"hi there, game started!\")\n self.draw()", "def main():\n print(\"Call your main application code here\")", "def main():\n print(\"Call your main application code here\")", "def main():\n print(\"Call your main application code here\")", "def main():\n # Initialize the event manager.\n event_manager = events.EventManager()\n AppState.get_state().set_event_manager(event_manager)\n\n # Initialize and register the application heartbeat.\n heart_beat = HeartBeat()\n event_manager.register_listener(heart_beat)\n\n # Initialize and register the world.\n basic_experiment = experiment.basic.BasicExperiment()\n world = basic_experiment.get_world()\n event_manager.register_listener(world)\n AppState.get_state().set_world(world)\n\n # Initialize pygame.\n surface = init()\n\n # Initialize and register the view.\n main_view = view.View(surface)\n event_manager.register_listener(main_view)\n\n # Initialize and register the controller.\n main_controller = controller.Controller()\n event_manager.register_listener(main_controller)\n\n # Start the heartbeat.\n heart_beat.run()", "def main() -> int:\n setup(\"main\", WIDTH, HEIGHT)\n init()\n for _ in range(randint(10, 10)):\n add_rand_circ()\n for _ in range(randint(10, 10)):\n add_rand_rect()\n mainloop()\n update_leaderboard()\n return 0", "def run_game():\r\n pygame.init()\r\n ai_settings = Settings()\r\n screen = pygame.display.set_mode(\r\n (ai_settings.screen_width, ai_settings.screen_height))\r\n pygame.display.set_caption(\"Stars\")\r\n\r\n # Make a group of stars.\r\n stars = Group()\r\n\r\n # Create a star system\r\n gf.create_star_system(ai_settings, screen, stars)\r\n \r\n # Main game loop.\r\n while True:\r\n \r\n # Let's player quit the game.\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n\r\n gf.update_screen(ai_settings, screen, stars)", "def test_runGame(self):\n # this is tested by playing the game. No good way to unit test this.\n pass", "def main():\n secret_word = get_word()\n print(secret_word)\n play_game(secret_word)", "def main():\r\n print(WELCOME_MESSAGE)\r\n\r\n playing = True\r\n while playing:\r\n\r\n # Valid inputs that the user can use\r\n move_actions = (UP, DOWN, LEFT, RIGHT)\r\n other_actions = (GIVE_UP, HELP)\r\n\r\n grid_size = int(input(BOARD_SIZE_PROMPT))\r\n\r\n # Get the puzzle and its solution\r\n solution = get_game_solution(WORDS_FILE, grid_size)\r\n puzzle = shuffle_puzzle(solution)\r\n\r\n solved = check_win(puzzle, solution)\r\n print_solution_position(solution, puzzle)\r\n\r\n # Continue to loop until the puzzle is solved or the user gives up\r\n while not solved:\r\n player_action = input(DIRECTION_PROMPT)\r\n\r\n # Player move input handler\r\n # Updates the puzzle with the new board layout, if fail alert user\r\n if player_action in move_actions:\r\n move_attempt = move(puzzle, player_action)\r\n if move_attempt:\r\n puzzle = move_attempt\r\n else:\r\n print(INVALID_MOVE_FORMAT.format(player_action))\r\n\r\n # Other inputs handler\r\n elif player_action in other_actions:\r\n if player_action == GIVE_UP:\r\n break\r\n elif player_action == HELP:\r\n print(HELP_MESSAGE)\r\n\r\n # If there is no match for input, alert the user\r\n else:\r\n print(INVALID_MESSAGE)\r\n\r\n print_solution_position(solution, puzzle)\r\n solved = check_win(puzzle, solution)\r\n\r\n # Show message depending if user won or not\r\n if solved:\r\n print(WIN_MESSAGE)\r\n else:\r\n print(GIVE_UP_MESSAGE)\r\n\r\n # Check if the user wishes to play again\r\n play_again = input(PLAY_AGAIN_PROMPT)\r\n if not (play_again.lower() == \"y\" or play_again == \"\"):\r\n playing = False\r\n print(BYE)", "def main_loop(self):\n while self.game_manager.game_state != GameState.Quit:\n\n self.handle_events()\n self.handle_ui_response()\n #in menu\n if self.game_manager.game_state == GameState.Menu: \n self.display.clear()\n\n #in game\n elif self.game_manager.game_state == GameState.Running:\n self.game_manager.move_players()\n\n #after game\n elif self.game_manager.game_state == GameState.Finished:\n if self.game_manager.winner == None:\n self.game_manager.player1.decay()\n self.game_manager.player2.decay() \n else:\n self.game_manager.loser.decay()\n self.game_manager.loser.draw()\n\n #perform game manager actions\n self.game_manager.act()\n #do all the rendering stuff\n self.render_scene()\n #control FPS\n self.clock.tick(self.FPS)", "def start():\n commands = {\"new tournament\": Controller.new_tournament,\n \"new round\": Controller.new_round,\n \"new player\": Controller.new_player,\n\n \"set round\": Controller.set_round,\n \"set player\": Controller.set_player,\n\n \"get players -all -alpha\": Controller.get_all_players_alpha,\n \"get players -all -rank\": Controller.get_all_players_rank,\n \"get players -alpha\": Controller.get_players_alpha,\n \"get players -rank\": Controller.get_players_rank,\n\n \"get tournament -all\": Controller.get_all_tournaments,\n \"get tournament\": Controller.get_tournament,\n\n \"get round -all\": Controller.get_all_rounds,\n \"get round\": Controller.get_round,\n\n \"get match -all\": Controller.get_all_matches,\n \"get match\": Controller.get_match,\n\n \"load\": Controller.load,\n\n \"exit\": Controller.close_app\n }\n\n # At the beginning of the program, load all data from a data_base.\n Controller.load()\n print(\"Need help? Type 'commands' to see all commands and there purposes.\")\n\n while True:\n instruction = str(input(\"ChessManager >>> \"))\n try:\n commands[instruction]()\n except KeyError:\n print(\"Wrong Command.\")", "def startGame():\n\n\tprint(\"\\nOK! Let's play!\")\n\tprint(\"--------------------------------------------------------------------------------------\")\n\tprint(\"Note:\")\n\tprint(\"\\tNow you must be kept in your mind a random integer from specific range and I must be guessing that number!\")\n\tprint(\"\\tIf you answer honestly all of my questions I certainly will guess that number!\")\n\tprint(\"--------------------------------------------------------------------------------------\\n\")\n\tgameLogic()", "def main():\n print(\"def main\")\n return APP.run()", "def main():\n g = CommanderGame([4, 3], 3)\n g.fit_army_orders()\n g.fit_game_matrix()\n g.show_submatrixes([\n [[4, 0, 0]],\n [[2, 1, 0], [1, 1, 1]],\n ])\n print(f'Full game matrix: \\n{g.game_matrix_}')", "def main():\n\tprint(\"Welcome to TicTacToe\")\n\tboard = Board()\n\twhile (not board.isOver()):\n\t\tprint(\"It is {0}'s turn\".format(board.current) + board.__str__())\n\t\tmove = input('Where would you like to go? : ').strip()\n\t\tif (move == 'q'):\n\t\t\tbreak\n\t\telif (board.makeMove(move) == 1):\n\t\t\tboard.switchPlayer()\n\t\telse:\n\t\t\tprint(\"I didn't understand your input, these are the valid inputs:\\nentering 'q' will quit out of the game.\\n\")\n\t\t\tprint(\"entering a number will place the peice in that box, the numbers are as follows:\\n \\n1|2|3\\n-----\\n4|5|6\\n-----\\n7|8|9\\n\")\n\tprint(board.__str__() + \"\\nGame Over\")\n\tif (board.isOver() is Piece.EX or board.isOver() is Piece.OH):\n\t\tprint(\"Player {0} wins!\".format(board.isOver())) \n\telse:\n\t\tprint(\"It was a draw\")", "def main(self) -> None:\n pass", "def main():\n ans = random_word()\n run_game(ans, N_TURNS)", "def main(self):\n \n print(\"Welcome to Speedy Gonzales!\")\n print(\"Press 'a' to move your left leg, 'k' to move your right leg.\")\n print(\"Ready?\")\n \n start = time.time()\n self.score = 0\n self.previous_key = None\n \n def on_key_release(key):\n try:\n if key == keyboard_0.Key.esc:\n print(\"Good Bye!\")\n listener.stop()\n exit()\n \n if key.char == \"a\" or key.char=='k':\n msg = \" . \" if key.char=='a' else \" .\"\n print(msg)\n # Main loop to run the program.\n if key.char != self.previous_key:\n self.score = self.score + 1\n self.previous_key = key.char\n else:\n # Hit 'a' or 'k' two times in a row, fell for 2s.\n self.previous_key = None\n print(\"Ops! You fell down on your face!\")\n time.sleep(1)\n print(\"Stand up!\")\n time.sleep(1)\n print(\"Continue running!\")\n \n # Finish running.\n if self.score >= 40:\n listener.stop()\n end = time.time()\n score = round(end - start, 2)\n print(\"Congrats! You have run 100m in {}!\".format(score))\n \n print(\"Check the previous winner...\")\n self.print_leaderboard() \n \n score_flag = self.get_highscore(score)\n if score_flag == 0:\n # Not in leaderboard.\n print(\"Thanks for playing.\")\n else:\n if score_flag == 1:\n # In top 10 leaderboard.\n winner_msg = \"\\n\\nYou have achieved top 10! \"\n else:\n # Get champion.\n winner_msg = \"\\n\\nYou are the champion! \"\n \n # Get winner's name and update leaderboard.\n name = input(winner_msg + \"Please enter your name:\" )\n self.update_leaderboard(name, score)\n \n print(\"Lets check the new leaderboard.... \\n\\n\\n\\n\\n\")\n time.sleep(1)\n self.print_leaderboard() \n \n \n # Ask for new game.\n in_flag = input(\"New game? (Y/N)\")\n if in_flag.upper() == 'Y':\n self.main()\n else:\n print(\"Good Bye!\")\n exit()\n \n except AttributeError:\n print(key)\n\n with keyboard_0.Listener(on_release = on_key_release, suppress=True) as listener:\n listener.join()", "def main():\n app = RunSnakeRunApp(0)\n app.MainLoop()", "def start_game(self) -> None:\n self.init_game()\n self.play()", "def main(self, win):\n\n # The rock, paper, scissor buttons\n rockButton = Button(white, 50, 400, 100, 50, 'ROCK')\n paperButton = Button(white, 200, 400, 100, 50, 'PAPER')\n scissorButton = Button(white, 350, 400, 100, 50, 'SCISSOR')\n\n # Player and computer scores\n player = 0\n computer = 0\n\n run = True\n while run:\n userChoice = 'none'\n compChoice = 'none'\n beginGame = False\n for event in pygame.event.get():\n pos = pygame.mouse.get_pos()\n if event.type == pygame.QUIT:\n run = False\n\n # Control mouse button events\n if event.type == pygame.MOUSEBUTTONDOWN:\n if rockButton.isOver(pos):\n userChoice = 'rock'\n compChoice = self.computer_generate()\n beginGame = True\n elif paperButton.isOver(pos):\n userChoice = 'paper'\n compChoice = self.computer_generate()\n beginGame = True\n elif scissorButton.isOver(pos):\n compChoice = self.computer_generate()\n userChoice = 'scissor'\n beginGame = True\n\n self.display_score(win, player, computer)\n self.display_playground(win, rockButton, paperButton, scissorButton)\n\n if beginGame:\n self.game_initiate(win)\n\n self.display_player(userChoice, win)\n self.display_computer(compChoice, win)\n\n if beginGame:\n scores = self.decide_winner(userChoice, compChoice)\n pygame.display.update()\n pygame.time.delay(1000)\n player += scores[0]\n computer += scores[1]\n\n pygame.display.update()\n pygame.time.delay(40)", "async def game(self):\n pass", "def play_game():\n\n # Welcome character\n game_start = game_menu()\n while game_start != 'newgame' and game_start != 'loadgame':\n if game_start == 'walkthrough':\n game_walkthrough()\n print \" Please enter newgame, loadgame, or walkthrough\"\n game_start = game_menu()\n\n if game_start == 'newgame':\n # Initialize character and map\n character = Character.Character()\n game_intro()\n game_map = create_map(None)\n\n # Setup the starting room\n current_room = game_map['Dungeon Entrance']\n\n # Setup the character\n character_name = raw_input('Enter character name > ')\n character.set_name(character_name)\n character.set_current_room(current_room)\n character.set_game_map(game_map)\n\n elif game_start == 'loadgame':\n confirm_load = raw_input('Are you sure? (y or n) > ')\n if confirm_load == 'y':\n # Initialize character and map\n character = Character.Character()\n saved_game_data = load_game()\n if saved_game_data != False:\n game_map = create_map(saved_game_data['json_game_map'])\n\n # Set current room and character name based on saved JSON\n current_room = game_map[saved_game_data['current_room']]\n character_name = saved_game_data['character_name']\n character.set_name(character_name)\n character.set_current_room(current_room)\n character.set_game_map(game_map)\n\n # Add correct items to inventory\n for item in saved_game_data['json_inventory']:\n new_item = Item.Item()\n new_item.set_name(saved_game_data['json_inventory'][item]['Name'])\n new_item.set_description(saved_game_data['json_inventory'][item]['Description'])\n character.add_to_inventory(item, new_item)\n else:\n print \"\\nERROR: saved_game.json does not exist. You must start a new game.\"\n play_game()\n else:\n play_game()\n\n #handle commands\n new_command = user_input()\n while new_command != 'quit':\n handle_commands(new_command, character, game_map)\n new_command = user_input()", "def main_menu(win):\n global gen\n\n gen = 0 # reset the current generation of A.I. birds\n menu_bg = pygame.transform.scale(menu_img, (int(menu_img.get_width() * 1.5), int(menu_img.get_height() * 1.3)))\n menu_bg.set_colorkey((255, 255, 255))\n menu_pos = (SCREEN_WIDTH * 0.12, SCREEN_HEIGHT // 10)\n intro_pos = (SCREEN_WIDTH * 0.23, SCREEN_HEIGHT // 7)\n intro = score_font.render(\"Flappy Bird Clone\", False, (188, 91, 63))\n rndm_pos = (SCREEN_WIDTH * 0.47, SCREEN_HEIGHT // 5)\n rndm = score_font.render(\"&\", False, (188, 91, 63))\n intro2_pos = (SCREEN_WIDTH * 0.35, SCREEN_HEIGHT // 3.9)\n intro2 = score_font.render(\"NEAT A.I.\", False, (188, 91, 63))\n by_pos = (SCREEN_WIDTH * 0.51, SCREEN_HEIGHT // 3.2)\n by = small_font.render(\"by Viktor Stefanov\", False, (188, 91, 63))\n\n\n win.blit(background_img, (0, 0))\n win.blit(menu_bg, menu_pos)\n win.blit(intro, intro_pos)\n win.blit(rndm, rndm_pos)\n win.blit(intro2, intro2_pos)\n win.blit(by, by_pos)\n\n play_color = (250, 100, 85)\n ai_color = (250, 100, 85)\n while True:\n for event in pygame.event.get():\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == pygame.BUTTON_LEFT:\n mouse_x, mouse_y = pygame.mouse.get_pos()\n if play.collidepoint(mouse_x, mouse_y):\n play_game(win)\n elif ai_play.collidepoint(mouse_x, mouse_y):\n run()\n elif event.type == pygame.QUIT:\n pygame.quit(), sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key in (pygame.K_q, pygame.K_ESCAPE):\n pygame.quit(), sys.exit()\n\n play_game_pos = (SCREEN_WIDTH // 2.8, SCREEN_HEIGHT // 2.3)\n play_game_opt = score_font.render(\"Play Game\", False, play_color)\n ai_game_pos = (SCREEN_WIDTH // 3.15, SCREEN_HEIGHT // 1.8)\n ai_game = score_font.render(\"Let A.I. play\", False, ai_color)\n play = win.blit(play_game_opt, play_game_pos)\n ai_play = win.blit(ai_game, ai_game_pos)\n\n # check if the mouse is over the text on the screen and add a hover effect\n mouse_x, mouse_y = pygame.mouse.get_pos()\n if play.collidepoint(mouse_x, mouse_y) != 0:\n play_color = 140, 168, 69\n else:\n play_color = (250, 100, 85)\n if ai_play.collidepoint(mouse_x, mouse_y) != 0:\n ai_color = 140, 168, 69\n else:\n ai_color = (250, 100, 85)\n\n pygame.display.update()" ]
[ "0.8002683", "0.7967029", "0.78307676", "0.7737318", "0.7592417", "0.7592117", "0.74496835", "0.74165875", "0.74120873", "0.7353868", "0.725643", "0.72122735", "0.72043264", "0.71520734", "0.71451414", "0.7072199", "0.70497036", "0.69795376", "0.69768864", "0.6971949", "0.6971595", "0.6966058", "0.6943897", "0.69029963", "0.6893981", "0.68914014", "0.68820107", "0.6878663", "0.6815054", "0.68053794", "0.6796595", "0.67714924", "0.67449296", "0.6732346", "0.6729545", "0.67061985", "0.6685804", "0.66852015", "0.66735625", "0.66695243", "0.66691417", "0.6662404", "0.65924114", "0.65842915", "0.6568481", "0.65679115", "0.65650356", "0.65493983", "0.6548975", "0.6546608", "0.6530639", "0.6521908", "0.65055823", "0.64589924", "0.6452688", "0.64474154", "0.6445443", "0.64414907", "0.6431234", "0.64239144", "0.6414743", "0.641398", "0.6413207", "0.64061534", "0.6403519", "0.64024216", "0.6398132", "0.63973105", "0.6392342", "0.63910437", "0.6390253", "0.63692766", "0.63634264", "0.6362752", "0.6358463", "0.63560003", "0.63527465", "0.6341391", "0.6341391", "0.6341391", "0.6339736", "0.63351285", "0.6321306", "0.6319284", "0.63171756", "0.6302118", "0.6293395", "0.62847155", "0.62826884", "0.62787676", "0.6274871", "0.6257917", "0.624867", "0.6230597", "0.6225443", "0.62234807", "0.62219465", "0.6211469", "0.6210658", "0.62013656", "0.61984575" ]
0.0
-1
Inserts the (priority, content) element and percolates it up in the binary heap.
def push(self, item: tuple): self.__heap.append(item) self.__sift_up(self.__len__() - 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert(self, data):\n # add data to list'end\n self.heap_list.append(data)\n # adjust max-heap from bottom to top\n self.sift_up(len(self.heap_list)-1)", "def insert(self, element):\n if self.size >= self.maxsize : \n return\n self.size+= 1\n self.Heap[self.size] = element \n \n current = self.size \n \n while self.Heap[current] < self.Heap[self.parent(current)]: \n self.swap(current, self.parent(current)) \n current = self.parent(current)", "def test_insert_increases_size(sample_priorityq):\n assert len(sample_priorityq.heap_list) == 0\n sample_priorityq.insert([5, 1])\n assert len(sample_priorityq.heap_list) == 1\n sample_priorityq.insert([6, 2])\n assert len(sample_priorityq.heap_list) == 2", "def insert(self, element):\n if self.size >= self.maxsize:\n return\n self.size += 1\n self.heap[self.size] = element\n\n current = self.size\n\n while self.heap[current] < self.heap[self.parent(current)]:\n self.swap(current, self.parent(current))\n current = self.parent(current)", "def enqueue(self, item, priority):\n # TODO: Insert given item into heap\n ...", "def insert(self, to_insert: Article) -> None:\n heapq.heappush(self.heap, to_insert)", "def insert(self, k): \r\n self.heap_array.append(k)\r\n\r\n current_index = len(self.heap_array) - 1\r\n while (current_index > 0):\r\n parent_index = ((current_index-1)//2)\r\n\r\n if int(self.heap_array[current_index]) > int(self.heap_array[parent_index]): # if no vialation of the min heap property \r\n return\r\n else: # if heap property is broken then swap the parent and child that are breaking the prop \r\n self.heap_array[parent_index], self.heap_array[current_index] = self.heap_array[current_index], self.heap_array[parent_index]\r\n current_index = parent_index", "def insert(self, key, value):\n self.heap.append(None)\n hi = HeapItem(key,value)\n self.siftup(hi, len(self.heap)-1)\n return hi", "def insert(self, value):\n self.heap.append(value)\n index = len(self.heap) - 1\n self._percolate_up(index)", "def test_insert(self):\n self.minheap.heap = [0, 1, 4, 6, 9]\n self.minheap.insert(2)\n assert self.minheap.heap == [0, 1, 2, 6, 9, 4]", "def insert(self, key, value):\r\n self._data.append(self._Item(key, value))\r\n self._upheap(len(self._data) - 1) # upheap newly added position\r", "def insert(self, value):\n self.heap.append(value)\n self._shiftUp()", "def heap_push(self, value):\n if self.find(value) is None:\n self.table.append(value)\n self.percolate_up(self.get_size() - 1)", "def insert(self, item):\n self._heap.append(item)\n self._perc_up(len(self._heap) - 1)", "def push(self, priority, key):\n index = len(self.__heap)\n self.__position[key] = index\n self.__heap.append([priority, key])\n self.__bubble_up(index)", "def enqueue(self, priority, value, key=None):\n key = key if key else value\n if key in self._index:\n self._update(priority, key)\n return\n self._heap.append(Item(priority, value, key))\n self._size = len(self._heap)\n self._index[key] = self._size - 1\n self._sift_up(self._size - 1)", "def insert(self, k):\n #Append the element to the min heap\n self.heap_list.append(k)\n #Increase the size of the min heap\n self.current_size += 1\n #Move the value to its appropriate position in the min heap (following the definition of a min heap)\n self.sift_up(self.current_size)", "def insert(self,key, value):\n if key in self._position:\n # reset value for this node\n node_pos = self._position[key]\n node = self._heap[node_pos]\n node.value = value\n self._sink(node_pos)\n self._swim(node_pos)\n else:\n # insert a new node\n new_node = _Node(key,value)\n node_pos = len(self._heap)\n self._heap.append(new_node)\n self._position[key] = node_pos\n\n # repair priority\n self._swim(node_pos)", "def put(self, element):\n self.heap.append(element)\n # sift up the element append before\n self.sift_up(self.size() - 1)", "def insert(self, value: T) -> None:\n if self._array == []:\n self._array.append(value)\n else:\n parent_idx = (len(self._array) - 1) // 2\n curr_idx = len(self._array)\n self._array.append(value)\n \n # While the value to be inserted is less than it's parent,\n # keep swapping the parent and child from the bottom up until\n # the min heap properties hold or, until swapped with the root node.\n while value < self._array[parent_idx] and parent_idx >= 0:\n temp_value = self._array[parent_idx]\n self._array[parent_idx] = value\n self._array[curr_idx] = temp_value\n curr_idx = parent_idx\n parent_idx = (parent_idx - 1) // 2", "def heappush(heap, item):\n pass", "def push(self, x):\n assert self._data is not None\n if len(self._data) < self._n:\n heapq.heappush(self._data, x)\n else:\n heapq.heappushpop(self._data, x)", "def test_insert(self):\n data = [4, 4, 8, 9, 4, 12, 9, 11, 13]\n h = Heap(data)\n\n h.insert(7)\n self.assertTrue(Heap.is_heap(h.data), 'should still be a heap')\n\n h.insert(10)\n self.assertTrue(Heap.is_heap(h.data), 'should still be a heap')\n\n h.insert(5)\n self.assertTrue(Heap.is_heap(h.data), 'should still be a heap')", "def push(self, priority: float, item):\n heappush(self._heap, (-1 * priority, item))", "def push(self, key, value):\r\n if len(self.heap)<self.depth:\r\n heapq.heappush(self.heap, key)\r\n self.elements[key] = value\r\n else:\r\n oldkey = heapq.heappushpop(self.heap, key)\r\n self.elements[key] = value\r\n del self.elements[oldkey]", "def insert(self, item):\n self.heaplist.append(item)\n self.currentsize += 1\n self.shift_item_up(self.currentsize)", "def push(self, obj):\n # wrap the object to allow for correct pop operation\n # remember that in python it's a min-heap (not max!)\n wrap_obj = (obj.minus_priority, len(self), obj)\n # use insertion number to ensure we never compare based on obj itself!\n # additionally resolves ties by popping earliest-inserted object\n heapq.heappush(self._heap, wrap_obj)", "def push(self, item):\n heapq.heappush(self.heap, item)", "def max_heap_insert(self, new_node):\n changed_node = PriorityQueueNode(new_node.data, float(\"-inf\"))\n self.insert_at_end(changed_node)\n self.heap_increase_key(self.heap_size-1, new_node.priority_key)", "def insert(self, node, priority=0):\n\n if node in self.entry_finder:\n self.delete(node)\n entry = [priority, node]\n self.entry_finder[node] = entry\n # logger_cagada.debug(\"el puto entry %s\" % entry)\n # logger_cagada.debug(\"l nodo q c agrega %s es %s\" % (type(node), node))\n self.heappush(self.heap, entry)\n # logger_cagada.debug(\"el finde aora es %s\" % self.entry_finder)\n # logger_cagada.debug(\"el heap aora es %s\" % self.heap)\n self.valida_caca()", "def insert_elements_to_heap(heap, elements):\n for element in elements:\n heap.insert(element)", "def insert(self, id, priority):\n self.n += 1\n i = self.n\n while i > 1:\n pIdx = int(i/2)\n p = self.elements[pIdx]\n\n if priority > p[PRIORITY]:\n break\n self.elements[i] = list(p)\n self.positions[p[ID]] = 1\n i = pIdx\n\n self.elements[i][ID] = id\n self.elements[i][PRIORITY] = priority\n self.positions[id] = i", "def push(self, x):\n heapq.heappush(self.array, x)", "def build_heap(self, items):\n for key in items:\n self.insert(key)", "def _heapify_after_add(self,ele):\r\n parent = self._parent(ele)\r\n if ele > 0 and self._data[ele] < self._data[parent]:\r\n self.swap(ele, parent)\r\n self._heapify_after_add(parent)", "def insertElement(self, element , i ):\n\n self.heap[i] = element\n # Parent of ith position\n parenti = i // 2\n\n # Inserting element into the heap\n try:\n # Bubbling up\n if parenti != 0 and self.heap[i].dijkstraCriterion < self.heap[parenti].dijkstraCriterion:\n self.heap[i], self.heap[parenti] = self.heap[parenti], self.heap[i]\n self.insertElement(element, parenti)\n # Incrementing self.i position\n else:\n self.i += 1\n return\n\n except:\n # Bubbling up\n self.heap[i] = 'NaN'\n self.insertElement(element, parenti)\n return", "def insert(self, elem, prio):\n self.n += 1\n self.A.append( (e,w) )\n self.pos[e] = self.n\n i = self.n\n p = i // 2\n self.insert_loop(i, p)", "def test_pop_decreases_size(sample_priorityq):\n for i in range(5):\n sample_priorityq.insert([i, i + 3])\n sample_priorityq.pop()\n assert len(sample_priorityq.heap_list) == 4\n sample_priorityq.pop()\n assert len(sample_priorityq.heap_list) == 3\n sample_priorityq.pop()\n assert len(sample_priorityq.heap_list) == 2", "def insert(self, value, priority=2):\n if not isinstance(priority, int):\n raise TypeError(\"Priority must be an integer\")\n if priority in self.priority_queue:\n self.priority_queue[priority].append(value)\n else:\n self.priority_queue[priority] = [value]\n print(self.priority_queue)", "def insert_element(self, element):\n if self._size == 0:\n node = self._Node(element, 0, self)\n self._array[0] = node # Add node to root of empty heap\n self._size += 1\n return self.root()\n self._size += 1\n if self._size == self._N:\n self._resize_array(self._N * 2) # Double size of array\n node = self._Node(element, self._size-1, self)\n self._array[self._size-1] = node # Insert new node at end of heap\n self._upheap(node) # Up-heap it to proper location\n return node", "def push(self, val):\n self._heap_list.append(val)\n self._build_heap()", "def insert(self, k):\n self.heapList.append(k)\n self.currentSize = self.currentSize + 1\n self.percUp(self.currentSize)", "def add(self, element):\n # add element to the heap\n self.heap.append(element)\n\n # get index of added element and parent of added element\n index = len(self.heap) - 1\n parentIndex = (index - 1) // 2\n\n # swap parents and childs while needed\n while index >= 1 and self.heap[parentIndex][1] > self.heap[index][1]:\n\n # swap parent and child\n swap = self.heap[parentIndex]\n self.heap[parentIndex] = self.heap[index]\n self.heap[index] = swap\n\n # update parent and child indexes\n index = parentIndex\n parentIndex = (index - 1) // 2", "def heappush(heap, item):\n heap.append(item)\n Heap.siftdown(heap, 0, len(heap) - 1)", "def __heappush(heap, nodes, node):\n pos = len(heap)\n heap.append(node)\n nodes[node[1]] = pos\n Graph.__siftup(heap, nodes, pos)", "def insert(self, pri):\n heaps = self.priorities\n if pri > 10 or pri < 1:\n raise ValueError(\n 'Priority must be between 1 (high) - 10 (low)'\n )\n if pri not in heaps.keys():\n self._create_priorities(pri)\n\n priority = heaps.get(pri)\n priority.push(self._order)\n self._order += 1", "def push(self, x):\n self.elements.append(x)\n self._heapify()", "def _heapify(self):\n for _ in range(len(self.elements)):\n for i in range(len(self.elements)-1, 0, -1):\n parentPosition = (i-1)/2 # defaults to int i.e. 7/2=3, and 6/2=3\n if parentPosition < 0:\n parentPosition = 0\n \n # change this condition to '>' if coding for max-heap. This is for min-heap.\n if self.elements[i] < self.elements[parentPosition]:\n self.elements[i], self.elements[parentPosition] = self.elements[parentPosition], self.elements[i]", "def insert(self, key: K, value: V) -> None:\n if key in self.__key_map__:\n self.remove(key)\n\n entry = (value, next(self.counter), key)\n self.__key_map__[key] = entry\n\n heapq.heappush(self.queue, entry)", "def add(self, element):\n # add element to the heap\n self.heap.append(element)\n\n # get index of added element and parent of added element\n index = len(self.heap) - 1\n parentIndex = (index - 1) // 2\n\n # swap parents and childs while needed\n while index >= 1 and self.heap[parentIndex] < self.heap[index]:\n\n # swap parent and child\n self.swap(parentIndex, index)\n\n # update parent and child indexes\n index = parentIndex\n parentIndex = (index - 1) // 2", "def append(self,data,priority):\r\n\t\tbisect.insort(self.queue,(priority,data))", "def heappush(heap, item):\n heap.append(item)\n _siftdown(heap, 0, len(heap)-1)", "def change_priority(self, elem, prio):\n pos = self.pos[elem]\n currPrio = self.A[pos][1]\n self.A[pos] = (elem, prio)\n if self.cmpFn(prio, currPrio):\n self.insert_loop(pos, pos // 2) # Up heapify\n else:\n self.combine(pos) # Down heapify", "def test_priority_que_success_priority(priority_queue):\n priority_queue.insert(10)\n priority_queue.insert(5)\n priority_queue.insert(100, 1)\n priority_queue.insert(10, 1)\n assert priority_queue._heap[0].value == 100", "def heapreplace(self, key, value):\n if self.is_empty():\n raise IndexError('Priority queue is empty')\n small = self.min()\n self._data[0]._key = key\n self._data[0]._value = value\n self._down_heap(0)\n return small", "def insert(self, key):\n\n self.heap.append(key)\n\n self.bubble_up(len(self.heap) - 1)", "def push(self, elt):\n if len(self._queue) == 0: self._queue.append(elt); return\n for i in range(len(self._queue)):\n if self._queue[i].priority < elt.priority:\n self._queue.insert(i, elt)\n return\n #if we get here, elt is lower than all the other procs in the queue, so\n #just append it\n self._queue.append(elt)", "def heapify(x):\n pass", "def add(self, item, priority=0):\n if item in self.set:\n self.remove(item)\n count = next(self.counter)\n entry = [priority, count, item]\n self.set[item] = entry\n hpq.heappush(self.heap, entry)", "def push(self, x):\n assert x not in self.rank\n i = len(self.heap)\n self.heap.append(x) # add a new leaf\n self.rank[x] = i\n self.up(i) # maintain heap order", "def push(self, x):\n assert x not in self.rank\n i = len(self.heap)\n self.heap.append(x) # add a new leaf\n self.rank[x] = i\n self.up(i) # maintain heap order", "def test_priority_que_pop_and_push(priority_queue_full):\n priority_queue_full.pop()\n priority_queue_full.insert(11, 1)\n assert priority_queue_full._heap[0].priority == 1\n priority_queue_full.pop()\n priority_queue_full.pop()\n priority_queue_full.insert(10, 1)\n assert priority_queue_full.peek() == 10", "def add(self, key, value):\n self._data.append(self._Item(key, value))\n self._upheap(len(self._data) - 1) # upheap newly added position", "def add(self, item, priority):\n heappush(self.contents, (priority, item))", "def append(self, key):\n if key is None:\n raise ValueError('Cannot insert None in the queue')\n\n i = len(self.heap)\n self.heap.append(key)\n while i > 1:\n parent = i // 2\n if key < self.heap[parent]:\n self.heap[i], self.heap[parent] = self.heap[parent], key\n i = parent\n else:\n break", "def max_heap_insert(self, A, key):\n A.append(-9999999)\n self.heap_increase_key(A, len(A)-1, key)", "def max_heap_insert(self, A, key):\n A.append(-9999999)\n self.heap_increase_key(A, len(A)-1, key)", "def add(self, key, value):\n self._data.append(self._Item(key, value))\n self._upheap(len(self._data) - 1) # upheap newly added position", "def restructureHeap(self):\n\n self.i = 1\n # Storing the elements that already exist in a temporary list\n tempList = []\n for heapElement in self.heap:\n if heapElement != \"NaN\" :\n tempList.append( heapElement )\n\n # Initializing new heap\n self.heap = [\"NaN\"] * self.noOfElements\n\n # Storing all the elements in the temporary list in a continuous fashion in the new heap\n for element in tempList:\n self.insertElement(element, self.i)", "def push(self, item):\n self.heap.append(self.m * item)\n self._sift_up()", "def append(self, x):\n if self.even:\n min_heap_push(self.right, x)\n max_heap_push(self.left, min_heap_pop(self.right))\n else:\n max_heap_push(self.left, x)\n min_heap_push(self.right, max_heap_pop(self.left))\n self.even = not self.even", "def push(self, transition, priority):\n priority = priority * 10000\n priority = self._clip_p(priority)\n priority = int(priority)\n # if we reached the capacity, overwrite the oldest item\n if (self.size == self.capacity):\n self.queue[self.to_write%self.capacity] = transition\n self.sum_tree.update(self.to_write%self.capacity,priority)\n else:\n self.queue.append(transition)\n self.sum_tree.push(priority)\n self.to_write = self.to_write + 1", "def heapify(self):\r\n if self._size:\r\n start = self._parent(len(self._data)-1) # who'se the last parent?\r\n for index in range(start, -1, -1): # for all parents\r\n self.down_heap(index) # fix your heap\r", "def insert(pq):\n\ti = r.randint(0, bound-1)\n\tpq.put(i)\n\tlogging.info(\"insert %s\", i)", "def insert(self, item):\n index = self.insert_at_next_index(item)\n self.items[index] = item\n while index > 1:\n parent_index = index / 2 # Truncate, e.g. 4 and 5 have parent 2.\n if self.is_heap_order(self.items[parent_index], self.items[index]):\n # The item does not need to bubble up anymore. Done.\n return\n else:\n # Swap items at index and parent_index\n temp = self.items[index]\n self.items[index] = self.items[parent_index]\n self.items[parent_index] = temp\n index = parent_index\n # The item bubbled all the way to the root. Done.\n return", "def insert(self, item):\n self.pool.append(item)\n if len(self.pool) == self.min_tree_size:\n self.trees.append(_ExtendedVPTree(self.pool, self.dist_fn))\n self.pool = []\n while len(self.trees) > 1 and self.trees[-1].size == self.trees[-2].size:\n a = self.trees.pop()\n b = self.trees.pop()\n self.trees.append(_ExtendedVPTree(a.points + b.points, self.dist_fn))", "def insert(self, key):\n\n badIndex = self._append(key)\n # heap invariant might be violated at the new index\n while badIndex > 0 and not self.invariant(self.queue[self._parent(badIndex)], self.queue[badIndex]):\n self._swap(badIndex, self._parent(badIndex))\n badIndex = self._parent(badIndex)", "def insert_at_end(self, new_node):\n self.heap.append(new_node)\n self.heap_size += 1", "def make_heap(self, frequency):\n\n\n\t\t\tfor key in frequency:\n\t\t\t\tnode = self.HeapNode(key, frequency[key])#instaciamos un nodo con el valor y frecuencia\n\t\t\t\theapq.heappush(self.heap, node)#agregamos el nodo al priority queue", "def test_priority_que_success(priority_queue):\n priority_queue.insert(15)\n assert (priority_queue._heap[0].value,\n priority_queue._heap[0].priority) == (15, float('inf'))", "def min_heap(self): \n \n for pos in range(self.size//2, 0, -1): \n self.min_heapify(pos)", "def siftup(self, node, pos):\n p = self.parent(pos)\n while p is not None and self.heap[p].key > node.key:\n self.heap[pos] = self.heap[p]\n self.heap[pos].pos = pos\n pos = p\n p = self.parent(p)\n self.heap[pos] = node\n node.pos = pos", "def _heapify_after_remove(self,ele):\r\n \r\n if self._chk_left(ele):\r\n left = self._left(ele)\r\n find_small_child = left\r\n # below to find which child has small integer\r\n if self._chk_right(ele):\r\n right = self._right(ele)\r\n if self._data[left] > self._data[right]:\r\n find_small_child = right\r\n \r\n if self._data[find_small_child] < self._data[ele]:\r\n self.swap(ele, find_small_child)\r\n self._heapify_after_remove(find_small_child)", "def min_heapify(self, pos): \n \n # If the node is a non-leaf node and greater \n # than any of its child \n if not self.is_leaf(pos): \n if (self.Heap[pos] > self.Heap[self.left_child(pos)] or \n self.Heap[pos] > self.Heap[self.right_child(pos)]): \n \n # Swap with the left child and heapify \n # the left child \n if self.Heap[self.left_child(pos)] < self.Heap[self.right_child(pos)]: \n self.swap(pos, self.left_child(pos)) \n self.min_heapify(self.left_child(pos)) \n \n # Swap with the right child and heapify \n # the right child \n else: \n self.swap(pos, self.right_child(pos)) \n self.min_heapify(self.right_child(pos))", "def push(self, element, value):\n insert_pos = 0\n for index, el in enumerate(self.tops):\n if not self.find_min and el[1] >= value:\n insert_pos = index + 1\n elif self.find_min and el[1] <= value:\n insert_pos = index + 1\n self.tops.insert(insert_pos, [element, value])\n self.tops = self.tops[: self.n]", "def test_priority_que_success_multiple_empty(priority_queue):\n priority_queue.insert(15)\n priority_queue.insert(13, 1)\n assert (priority_queue._heap[0].value,\n priority_queue._heap[0].priority,\n priority_queue._heap[1].value) == (13, 1, 15)", "def add(self, item, priority=0) -> None:\n if item in self.entry_finder:\n self.remove(item)\n count = next(self.counter)\n entry = (priority, count, [item])\n self.entry_finder[item] = entry\n heapq.heappush(self.priority_queue, entry)", "def push(self, val):\n if type(val) == int:\n if val in self._heap:\n raise ValueError('Cannot have duplicate values in list')\n if not self._heap:\n self._heap.append(val)\n else:\n self._heap.append(val)\n self._sort(len(self._heap) - 1)\n else:\n raise TypeError('Must add an integer')", "def push(self, val):\n self.insert(val)", "def push(self, new_node):\n # Add a new node into the data list\n self.data.append(new_node)\n # Maintain the min heap\n self.heapify_up(len(self.data) - 1)", "def add(self,key,value):\r\n try:\r\n self._data.append(self.Item(key,value)) # insert as Item class object with key value.\r\n self._heapify_after_add(len(self._data) - 1 )\r\n return self._data\r\n except Exception, e:\r\n print \"Error occurred in HeapDistance: add method\", e", "def heappop(heap):\n pass", "def construct_heap(self, elems):\n for e in elems:\n self.n += 1\n self.A.append(e)\n self.pos[e[0]] = self.n\n for i in range(self.n // 2, 0, -1):\n self.combine(i)", "def _upheap(self, node):\n parent = self.parent(node)\n while parent is not None and node.element() < parent.element():\n self._swap(node, parent) # Move node upward while key\n parent = self.parent(node) # smaller than parent's key", "def insert(self, p, elem):\n node = self._validate(p)\n new_node = self._Node(elem, idx=self._curr_idx, parent=node._parent)\n self._curr_idx += 1\n node._parent = new_node\n new_node._children.append(node)\n self._size += 1\n\n # Invalidate depths and heights after modifying the tree.\n self._depths, self._heights = None, None\n return self._make_position(new_node)", "def build_heap(data):\n size = len(data)\n for i in range(size//2, -1,-1):\n shiftDown(data, i)", "def push_pop(self, item, priority):\n if self.size() < 1:\n raise ValueError('Priority queue is empty and has no front item')\n else:\n # TODO: Replace and return min item from heap, if any\n ...", "def test_priority_que_success_priority_multiple(priority_queue):\n priority_queue.insert(20)\n priority_queue.insert(5)\n priority_queue.insert(100, 5)\n priority_queue.insert(10, 2)\n priority_queue.insert(50, 1)\n assert priority_queue._heap[0].value == 50", "def insert(self, val):\n if not self.root:\n self.root = Node(val)\n self.size_number += 1\n else:\n self._sink(val, self.root)\n # check parent from node, until unbalanced.", "def Insert(self, val, extra=None):\n if self._size >= 0:\n if val > self.best[0]:\n idx = bisect.bisect(self.best, val)\n # insert the new element\n if idx == self._size:\n self.best.append(val)\n self.extras.append(extra)\n else:\n self.best.insert(idx, val)\n self.extras.insert(idx, extra)\n # and pop off the head\n self.best.pop(0)\n self.extras.pop(0)\n else:\n idx = bisect.bisect(self.best, val)\n self.best.insert(idx, val)\n self.extras.insert(idx, extra)" ]
[ "0.7215978", "0.7176819", "0.7170936", "0.71155995", "0.7005643", "0.6955782", "0.6939675", "0.6876973", "0.6861779", "0.6800935", "0.67226076", "0.6711996", "0.669744", "0.6697037", "0.6668611", "0.66360056", "0.6631062", "0.66289246", "0.6607817", "0.6604025", "0.6573611", "0.65559596", "0.65438515", "0.6536176", "0.6527394", "0.652451", "0.65128905", "0.65004957", "0.6500446", "0.6495245", "0.6487986", "0.6484566", "0.64520544", "0.6446229", "0.63835174", "0.63757914", "0.6352546", "0.6340619", "0.6334559", "0.63156575", "0.62974465", "0.62905496", "0.62819815", "0.6271445", "0.6267313", "0.62590986", "0.625481", "0.62498146", "0.62459695", "0.6241688", "0.6231404", "0.6220565", "0.62173015", "0.62142754", "0.62012994", "0.6184132", "0.61821866", "0.6175188", "0.61716914", "0.61695254", "0.61695254", "0.61341983", "0.61219025", "0.61119926", "0.60950756", "0.6082868", "0.6082868", "0.607804", "0.6071623", "0.60693073", "0.60610783", "0.60439724", "0.6032706", "0.6032308", "0.6022602", "0.5975816", "0.5973365", "0.5969789", "0.5962297", "0.5952851", "0.59283406", "0.59170413", "0.59137917", "0.591169", "0.5905716", "0.59000134", "0.5897154", "0.5896391", "0.58941245", "0.5885974", "0.58826995", "0.58665186", "0.5858348", "0.58561236", "0.5838783", "0.58315253", "0.5828317", "0.5826347", "0.5826291", "0.5823234" ]
0.6376806
35
Pops the item wih the largest priority and repairs the heap.
def pop(self) -> tuple: item = self.__heap.popleft() if len(self) > 1: self.__heap.appendleft(self.__heap.pop()) self.__sift_down(0) return item
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def push_pop(self, item, priority):\n if self.size() < 1:\n raise ValueError('Priority queue is empty and has no front item')\n else:\n # TODO: Replace and return min item from heap, if any\n ...", "def delete_max(self):\n retval = self.heap_list[1]\n self.heap_list[1] = self.heap_list[self.size]\n self.size = self.size - 1\n pop_val = self.heap_list.pop()\n self.percolate_down(1)\n return retval", "def remove(self):\n max_item = self.heaplist[1]\n self.heaplist[1] = self.heaplist[self.currentsize]\n self.currentsize -= 1\n self.heaplist.pop()\n self.shift_item_down(1)\n return max_item", "def pop(self):\n\n assert self.size > 0, \"Cannot pop item! The MaxHeap is empty!\"\n ret = self.items[0]\n self.items[0] = self.items[self.size - 1]\n self.items[self.size - 1] = None\n self.size -= 1\n self._shift_down(0)\n return ret", "def dequeue(self):\n if self.size() < 1:\n raise ValueError('Priority queue is empty and has no front item')\n else:\n # TODO: Remove and return min item from heap, if any\n ...", "def pop(self):\n priority, value = heappop(self._heap)\n return (-1 * priority, value)", "def heap_pop_max(heap):\n last = heap.pop()\n if heap:\n return_item = heap[0]\n heap[0] = last\n heapq._siftup_max(heap, 0)\n else:\n return_item = last\n return return_item", "def pop(self):\n self.data[0], self.data[-1] = self.data[-1], self.data[0]\n result = self.data.pop()\n self.heapify_down(0)\n return result", "def remove_min(self):\r\n # Should raise an exception of size is 0...\r\n if self._size == 0: raise KeyError # Can't remove from an empty heap\r\n result = self._data[0] # remember the smallest\r\n self._data[0] = None # None is so we don't have a reference.\r\n self._size -= 1 # don't forget we have one less\r\n # bring the last to the front and stick the None at the end\r\n self.swap(0, self._size)\r\n # and let the item inserted at the front \"drift down\"\r\n self.down_heap(0)\r\n return result # finally return what was the minimum\r", "def pop(self):\n\t\tif self.heap:\n\t\t\treturn heapq.heappop(self.heap)[1]\n\t\telse:\n\t\t\traise Exception('Trying to pop from empty PriorityQueue.')", "def delMin(self):\n retval = self.heapList[1]\n self.heapList[1] = self.heapList[self.currentSize]\n self.currentSize = self.currentSize - 1\n self.heapList.pop()\n self.percDown(1)\n return retval", "def pop(self):\n priority, key = self.__heap[0]\n self.__swap(0, len(self.__heap) - 1)\n del self.__position[key]\n del self.__heap[-1]\n\n if self:\n self.__bubble_down(0)\n\n return priority, key", "def pop(self):\n while self.heap:\n priority, count, smallest = hpq.heappop(self.heap)\n if smallest is not self.REMOVED:\n del self.set[smallest]\n return priority, smallest\n raise KeyError('pop from an empty priority queue')", "def push_pop(self, priority, key):\n if not self or priority <= self.__heap[0][0]:\n return priority, key\n\n result_priority, result_key = self.__heap[0]\n del self.__position[result_key]\n\n self.__heap[0] = [priority, key]\n self.__position[key] = 0\n self.__bubble_down(0)\n\n return result_priority, result_key", "def pop(self):\n\n while self.heap:\n# #logger_cagada.debug(\"elem de heap %s\" % self.heap)\n priority, node = self.heappop(self.heap)\n if node is not self.REMOVED:\n del self.entry_finder[node]\n return priority, node\n raise KeyError('pop from an empty priority queue')", "def pop(self):\n try:\n top_node = self._heap[0]\n self._heap = [self._heap[-1]] + self._heap[1:-1]\n self.sort_down(0)\n return top_node\n except IndexError:\n raise IndexError('Cannot pop from an empty heap')", "def test_pop_decreases_size(sample_priorityq):\n for i in range(5):\n sample_priorityq.insert([i, i + 3])\n sample_priorityq.pop()\n assert len(sample_priorityq.heap_list) == 4\n sample_priorityq.pop()\n assert len(sample_priorityq.heap_list) == 3\n sample_priorityq.pop()\n assert len(sample_priorityq.heap_list) == 2", "def heappop(heap):\n lastelt = heap.pop() # raises appropriate IndexError if heap is empty\n if heap:\n returnitem = heap[0]\n heap[0] = lastelt\n Heap.siftup(heap, 0)\n return returnitem\n return lastelt", "def pop(self):\n\n def sub_pop():\n heaps = self.priorities\n keys = heaps.keys()\n keys = min(keys)\n heap = heaps[keys]\n pop = heap.pop()\n return pop\n\n try:\n val = sub_pop()\n except IndexError:\n self._remove_key()\n val = sub_pop()\n\n return val", "def delete_top_from_max_heap(x):\n last = x[-1]\n x = x.at[0].set(last)[:-1]\n return heapify_subtree(x, 0)", "def dequeue(self):\n if self._size == 0:\n raise EmptyQueue('dequeue from empty queue')\n priority = self._heap[0].priority\n value = self._heap[0].value\n key = self._heap[0].key\n del self._index[key]\n item = self._heap.pop()\n self._size -= 1\n if self._size == 0:\n return priority, value, key\n self._heap[0] = item\n self._index[item.key] = 0\n self._sift_down(0)\n return priority, value, key", "def pop(self):\n if len(self.priority_queue.values()):\n nextkey = 0\n while nextkey not in self.priority_queue:\n nextkey += 1\n up_next = self.priority_queue[nextkey][0]\n self.priority_queue[nextkey] = self.priority_queue[nextkey][1:]\n return up_next\n else:\n raise IndexError(\"There's nothing in your queue\")", "def pop(self):\n if self.n == 0:\n raise ValueError(\"Heap is empty\")\n value = self.ar[0]\n self.n -= 1\n self.ar[0] = self.ar[self.n]\n self.heapify(0)\n return value", "def pushpop(self, item):\n return heapq.heappushpop(self.heap, item)", "def pop(self):\n root = self.heap[1]\n del self.rank[root]\n x = self.heap.pop() # remove last leaf\n if self: # if heap is not empty\n self.heap[1] = x # move the last leaf\n self.rank[x] = 1 # to the root\n self.down(1) # maintain heap order\n return root", "def pop(self):\r\n while self.pq:\r\n priority, count, task = heapq.heappop(self.pq)\r\n if task is not self.REMOVED:\r\n del self.entry_finder[task]\r\n return task\r\n raise KeyError('pop from an empty priority queue')", "def pop(self):\n _, _, obj = heapq.heappop(self._heap)\n return obj", "def pop(self):\n root = self.heap[1]\n del self.rank[root]\n x = self.heap.pop() # remove last leaf\n if self: # if heap is not empty\n self.heap[1] = x # put last leaf to root\n self.rank[x] = 1\n self.down(1) # maintain heap order\n return root", "def heappop(heap):\n lastelt = heap.pop()\n if heap:\n returnitem = heap[0]\n heap[0] = lastelt\n _siftup(heap, 0)\n return returnitem\n return lastelt", "def pop(self):\n try:\n result = self._heap_list.pop(0)\n except IndexError:\n raise IndexError(\"Cannot pop from an empty heap.\")\n self._build_heap()\n return result", "def pop(self):\n return heapq.heappop(self.heap)", "def pop_smallest(self):\n values = [item[0] for item in self.items] #list of the values\n #values = L[:]\n heapq.heapify(values)\n smallest = heapq.heappop(values)#not forgetting heapq.heapify(values)\n #directly writing t = heapq.heappop([4,2,4]) would result in t = 4\n i = self.getItemByValue(smallest)\n self.items.remove(i)\n return i[1]", "def remove(self): \n \n popped = self.Heap[self.FRONT] \n self.Heap[self.FRONT] = self.Heap[self.size] \n self.size-= 1\n self.min_heapify(self.FRONT) \n return popped", "def pop(self):\n if self.size is 0:\n print \"Heap is empty\"\n return\n # Swap the top most element with the last one\n self._swap(0, self.size - 1)\n poppedKey = self.queue[self.size - 1]\n # Reduce the size of the queue\n self.size -= 1\n # Rebalance\n self._heapify(0)\n return poppedKey", "def poll(self):\n assert len(self.heap) > 0, \"ERROR: Heap is empty.\"\n item = self.heap[0]\n self.heap[0] = self.heap.pop()\n self.heapify_down()\n return item", "def pop(self):\n if self.heap == [0]:\n raise EmptyHeapException('Heap is empty.')\n self.heap[1], self.heap[-1] = self.heap[-1], self.heap[1]\n minimum = self.heap[-1] # Store min val to return later\n self.heap = self.heap[:-1] # Remove final element\n self._percolate_down(1)\n return minimum", "def pop(self) -> T:\n while self.priority_queue:\n _, _, (item,) = heapq.heappop(self.priority_queue)\n if item is not None:\n del self.entry_finder[item] # type: ignore\n return cast(T, item)\n raise KeyError('pop from an empty priority queue')", "def pop(self):\r\n\r\n if not self.is_empty():\r\n\r\n half_cap = self._capacity // 2\r\n item = self._data[self._size-1]\r\n self._data[self._size-1] = 0\r\n self._size -= 1\r\n\r\n if self._size <= half_cap:\r\n if half_cap != 0:\r\n\r\n self.shrink()\r\n\r\n return item\r\n\r\n else:\r\n pass", "def pop(self):\n result = self.peek()\n self.item_count -= 1\n index = 1\n mem_size = len(self.items)\n while True:\n left = index * 2\n right = left + 1\n if self.is_invalid_index(left) and self.is_invalid_index(right):\n # Neither child exists, so delete this item.\n self.mark_invalid_index(index)\n return result\n elif self.is_invalid_index(right):\n # Right child does not exist, so bubble up from left.\n self.items[index] = self.items[left]\n index = left\n elif self.is_invalid_index(left):\n # Left child does not exist, so bubble up from right.\n self.items[index] = self.items[right]\n index = right\n elif self.is_heap_order(self.items[left], self.items[right]):\n # Left child should be on top, so bubble up from left.\n self.items[index] = self.items[left]\n index = left\n else:\n # Right child should be on top, so bubble up from right.\n self.items[index] = self.items[right]\n index = right", "def pop(self):\n (cost, node) = heapq.heappop(self.heap)\n self.states.pop(node.state, None) # remove state\n return node", "def pop(self):\n if len(self.heap)==0:\n raise ValueError(\"Tried popping empty heap\")\n return heapq.heappop(self.heap)[1]", "def heap_push_max(heap, item):\n heap.append(item)\n heapq._siftdown_max(heap, 0, len(heap)-1)", "def remove_min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty.')\n self._swap(0, len(self._data) - 1) # put minimum item at the end\n item = self._data.pop() # and remove it from the list;\n self._downheap(0) # then fix new root\n return (item._key, item._value)", "def percolate_down(self, i):\n while (i * 2) <= self.size:\n max_child = self.max_child(i)\n if self.heap_list[max_child] > self.heap_list[i]:\n tmp = self.heap_list[i]\n self.heap_list[i] = self.heap_list[max_child]\n self.heap_list[max_child] = tmp\n i = max_child", "def pop(self):\n if not self.empty():\n self.size -= 1\n return heapq.heappop(self.queue)\n else:\n return None", "def heap_pop(self, value):\n if value is None or self.get_size() == 0:\n return\n\n if self.find(value) is not None:\n # end of list\n position = self.find(value)\n last = self.get_size() - 1\n\n # pop element and percolate down\n self.swap(position, last)\n self.table.pop()\n self.percolate_down(position)\n return", "def pop(self):\n temp = self.elements.pop(0)\n self._heapify()\n return temp", "def shift_item_down(self, parent_index):\n while 2 * parent_index <= self.currentsize:\n child_index = self.max_child_index(parent_index)\n if self.heaplist[child_index] > self.heaplist[parent_index]:\n self.heaplist[child_index], self.heaplist[parent_index] = self.heaplist[parent_index], self.heaplist[child_index]\n parent_index = child_index", "def remove_min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty.')\n self._swap(0, len(self._data) - 1) # put minimum item at the end\n item = self._data.pop() # and remove it from the list;\n self._downheap(0) # then fix new root\n return (item._key, item._value)", "def percDown(self, i):\n while(i * 2) <= self.currentSize:\n mc = self.minChild(i)\n if self.heapList[i] > self.heapList[mc]:\n tmp = self.heapList[i]\n self.heapList[i] = self.heapList[mc]\n self.heapList[mc] = tmp\n i = mc", "def __delitem__(self, key):\n\t\ttry:\n\t\t\tdel self.heap[[item == key for _, item in self.heap].index(True)]\n\t\texcept ValueError:\n\t\t\traise KeyError(str(key) + \" is not in the priority queue\")\n\t\theapq.heapify(self.heap)", "def heapify_down(self, index):\n min_index = index\n\n for c in [index * 2 + 1, index * 2 + 2]:\n if c < len(self.data) and self.data[c] > min_index:\n min_index = c\n if min_index == index:\n return\n self.data[index], self.data[min_index] = self.data[min_index], self.data[index]\n self.heapify_down(min_index)", "def pop(self):\n return heappop(self.priority_queue)[1]", "def pop(self):\n temp = self.high_low.pop(0)\n self.compare_parent(self.high_low.index(self.high_low[-1]))", "def remove_min(self):\n if self.is_empty():\n raise Empty('Priority queue is empty')\n self._swap(0, len(self) - 1)\n item = self._data.pop()\n self._down_heap(0)\n return (item._key, item._value)", "def sift_down(self, i):\n #If the current value has at least one child\n while (i * 2) <= self.current_size:\n #For the current value, get the index of the child with the least value (min child)\n mc = self.min_child(i)\n # If the current value is greater than it's \"min child\" value, swap the values\n if self.heap_list[i][0] > self.heap_list[mc][0]:\n self.heap_list[i], self.heap_list[mc] = self.heap_list[mc], self.heap_list[i]\n i = mc", "def remove_min(self):\r\n if self.is_empty():\r\n raise Exception('Priority queue is empty.')\r\n self._swap(0, len(self._data) - 1) # put minimum item at the end\r\n item = self._data.pop() # and remove it from the list;\r\n self._downheap(0) # then fix new root\r\n return (item._key, item._value)", "def pop(self):\n (_,_,path) = heapq.heappop(self.frontierpq)\n return path", "def delete_min(self):\n\n self.switch(0, -1)\n\n min = self.heap.pop(-1)\n\n self.bubble_down(0)\n\n return min", "def heap_down(self, index):\n left_child = (2*index) + 1\n right_child = (2*index) + 2\n\n if left_child < len(self.store):\n\n if right_child >= len(self.store):\n min_child = left_child\n elif self.store[left_child].key < self.store[right_child].key:\n min_child = left_child\n else:\n min_child = right_child\n\n if self.store[index].key > self.store[min_child].key:\n self.swap(index, min_child)\n self.heap_down(min_child)", "def pop(self): # 06:30 Lecture Week 2 \"Stacks\" (16:24)\n if self.isEmpty():\n raise Exception(\"Stack underflow\")\n item = self.first.Item # save item to return\n self.first = self.first.Next # delete last Node added\n self.N -= 1\n return item # return the saved item", "def push(self, priority: float, item):\n heappush(self._heap, (-1 * priority, item))", "def delete_min(self):\n #The length is 1 because the heap list was initialized with 0\n if len(self.heap_list) == 1:\n return \"Empty heap.\"\n\n #Store the min value of the heap\n top = self.heap_list[1]\n\n #Move the last value of the heap to the top\n self.heap_list[1] = self.heap_list[self.current_size]\n\n #Pop the last value from the heap (that was moved to the top)\n *self.heap_list, _ = self.heap_list\n\n # Decrease the size of the heap\n self.current_size -= 1\n\n #Move down the top value to the appropriate position (following the definition of a min heap)\n #The value is at index 1 since the heap list was initialized with 0) \n self.sift_down(1)\n\n #Return the min value of the heap\n return top", "def pop(self):\n item = self.stack.pop()\n\n if item == self.max[-1]: # pop if the same element\n self.max.pop()\n\n return item", "def max_heapify(self, i):\n largest, left_index, right_index = i, 2*i+1, 2*i+2\n current_length = self.heap_size\n\n if (left_index < current_length) and (self.heap[left_index].priority_key > self.heap[largest].priority_key):\n largest = left_index\n\n if (right_index < current_length) and (self.heap[right_index].priority_key > self.heap[largest].priority_key):\n largest = right_index\n\n if largest != i:\n self.heap[largest], self.heap[i] = self.heap[i], self.heap[largest]\n self.max_heapify(largest)\n return self.heap", "def __heappop(heap, nodes, pos, stopPos = None):\n # Default stopping position to end of heap\n stopPos = stopPos if not None else len(heap) - 1\n \n # Swap target node with stopping position, re-order heap to stopping\n # position minus one, then pop the target node\n Graph.__swapHeapNodes(heap, nodes, pos, stopPos)\n Graph.__siftdown(heap, nodes, pos, stopPos - 1)\n node = heap.pop(stopPos)\n \n # Delete node from dictionary and return\n del nodes[node[1]]\n return node", "def shift_item_up(self, index):\n while index > 0:\n parent_index = index // 2\n if parent_index > 0 and self.heaplist[parent_index] < self.heaplist[index]:\n self.heaplist[parent_index], self.heaplist[index] = self.heaplist[index], self.heaplist[parent_index]\n index = index // 2", "def pop(self):\n heap = self.heap\n popped_key = heap[1]\n if len(heap) == 2:\n return heap.pop()\n heap[1] = key = heap.pop()\n\n i = 1\n while True:\n left = i * 2\n if len(heap) <= left:\n break\n left_key = heap[left]\n right = i * 2 + 1\n right_key = right < len(heap) and heap[right]\n if right_key and right_key < left_key:\n child_key = right_key\n child = right\n else:\n child_key = left_key\n child = left\n if key <= child_key:\n break\n self.heap[i], self.heap[child] = child_key, key\n i = child\n return popped_key", "def pop(self) -> int:\n if self.empty():\n raise RuntimeError(\"Queue is empty!\")\n result = self.data[self.head]\n self.data[self.head] = None\n self.head = (self.head + 1) % len(self.data)\n self.size -= 1\n if 0 < self.size < len(self.data) // 4 and len(self.data) > 10:\n self.resize(len(self.data) // 2)\n return result", "def pop(self):\n heap = self.heap\n if len(heap) < 1:\n return None\n\n ret_val = self.front()\n self.__delete(0)\n return ret_val", "def pop(self):\n return heapq.heappop(self.array)", "def __sift_down(self, i: int):\n while (2 * i + 1) <= self.__len__() - 1:\n\n child_idx = self.__get_smallest_child(i)\n\n if self.__heap[i][0] > self.__heap[child_idx][0]:\n tmp = self.__heap[i]\n self.__heap[i] = self.__heap[child_idx]\n self.__heap[child_idx] = tmp\n i = child_idx", "def test_pop_always_returns_highest_priority(sample_priorityq):\n sample_priorityq.insert([\"value1\", 5])\n sample_priorityq.insert([\"value2\", 10])\n sample_priorityq.insert([\"value3\", 7])\n sample_priorityq.insert([\"value4\", 0])\n assert sample_priorityq.pop() == \"value2\"", "def pop_smallest(self):\n smallest = heapq.heappop(self.heap)\n del self.set[smallest]\n return smallest", "def _heapify_after_remove(self,ele):\r\n \r\n if self._chk_left(ele):\r\n left = self._left(ele)\r\n find_small_child = left\r\n # below to find which child has small integer\r\n if self._chk_right(ele):\r\n right = self._right(ele)\r\n if self._data[left] > self._data[right]:\r\n find_small_child = right\r\n \r\n if self._data[find_small_child] < self._data[ele]:\r\n self.swap(ele, find_small_child)\r\n self._heapify_after_remove(find_small_child)", "def pop(self) -> int:\n self._aux()\n ret = self.q1.popleft()\n self.q1, self.q2 = self.q2, self.q1\n self.size -= 1\n return ret", "def popitem(self):\n pass", "def pop_min(self):\n if self.get_size() == 0:\n return None\n\n # put minimum item at the end\n self.swap(0, len(self.table) - 1)\n\n # and remove it from the list;\n item = self.table.pop()\n\n # then fix new root\n self.percolate_down(0)\n return item", "def pop(self) -> int:\n old_top = self.topEle\n self.topEle = self.q1[self.n - 2]\n for i in range(self.n - 1):\n self.q2.append(self.q1[i])\n self.n -= 1\n self.q1 = self.q2\n return old_top", "def get(self):\n size = self.size()\n if size < 0:\n return None\n res = self.heap[0]\n self.heap[0], self.heap[size - 1] = self.heap[size - 1], self.heap[0]\n self.heap.pop()\n self.sift_down(0)\n # self.sift_down_recursion(0)\n return res", "def pop(self) -> Article:\n return heapq.heappop(self.heap)", "def pop(self):\n assert self.__stack\n self.__stack.pop()\n self.__max_values.pop()", "def remove_min(self) -> object:\n if self.is_empty():\n raise MinHeapException\n return\n parent_index=0\n parent=self.get_min()\n #parent=5\n #print(parent)\n #print(self)\n self.heap.swap(parent_index,self.heap.length()-1)\n self.heap.pop()\n if self.is_empty():\n return parent\n min_child=self.find_min_child(1,2)\n while min_child!=None:\n if self.heap.get_at_index(min_child)>self.heap.get_at_index(parent_index):\n break\n self.heap.swap(min_child,parent_index)\n parent_index=min_child\n if parent_index==None:\n break\n min_child=self.find_min_child((parent_index * 2)+1,(parent_index * 2) + 2)\n return parent", "def pop(self):\n # O(1)\n # Your code here\n item = self.stack.pop() # O(1)\n # check if we're removing the max\n #if item == max: #O(1)\n # if so, we need to update self. max\n #new_max = self.find_max() # O(n) # Don't need find anymore\n # self.max = new_max #O(1)\n # self.max = item\n #return self.stack.pop()\n self.max_stack.pop()\n return item", "def sort_down(self, i):\n while ((i + 1) * 2) <= len(self._heap) + 1:\n mc = self.max_child(i)\n if self._heap[i] < self._heap[mc]:\n tmp = self._heap[i]\n self._heap[i] = self._heap[mc]\n self._heap[mc] = tmp\n i = mc", "def heapify_down(self):\n index = 0\n while self.has_left_child(index):\n smaller_child_index = self.get_left_child_index(index)\n if self.has_right_child(index) and self.get_right_child(index) < self.get_left_child(index):\n smaller_child_index = self.get_right_child_index(index)\n if self.heap[index] < self.heap[smaller_child_index]:\n break\n else:\n self.swap_values(index, smaller_child_index)\n index = smaller_child_index", "def decrease_priority(self):\n self._priority += 1", "def pop(self) -> int:\n cur = None\n if(not self.empty()):\n cur = self.queue[0] \n self.queue = self.queue[1:] \n return cur", "def test_priority_que_pop_and_push(priority_queue_full):\n priority_queue_full.pop()\n priority_queue_full.insert(11, 1)\n assert priority_queue_full._heap[0].priority == 1\n priority_queue_full.pop()\n priority_queue_full.pop()\n priority_queue_full.insert(10, 1)\n assert priority_queue_full.peek() == 10", "def _heapify(self):\n start = self._parent(len(self) - 1)\n for i in range(start, -1, -1):\n self._down_heap(i)", "def heapreplace(self, key, value):\n if self.is_empty():\n raise IndexError('Priority queue is empty')\n small = self.min()\n self._data[0]._key = key\n self._data[0]._value = value\n self._down_heap(0)\n return small", "def heap_sort_increase(alist):\r\n heap = MaxHeap()\r\n heap.build_heap(alist)\r\n originalSize = heap.size\r\n for i in range(heap.size):\r\n maxVal = heap.items[1]\r\n heap.del_max()\r\n heap.items[originalSize-i] = maxVal\r\n return heap.items[1:originalSize+1]", "def pop(self) -> Tuple[MeasureInput, MeasureResult]:\n return heapq.heappop(self._data)[2]", "def deleteMin(self):\n heap = self._heap\n position = self._position\n\n try:\n end = heap.pop(-1)\n except IndexError:\n raise KeyError('pqdict is empty')\n\n if heap:\n node = heap[0]\n # grab last node in PQ to root and sink it down appropriately\n heap[0] = end\n position[end.key] = 0\n self._sink(0)\n else:\n node = end\n del position[node.key] # delete index from position dict\n return node.key, node.value", "def pop(self):\n self._raise_if_empty()\n item = self._top.data\n self._top = self._top.next\n return item", "def heappop(heap):\n pass", "def pop(self):\r\n try:\r\n key = heapq.heappop(self.heap)\r\n return self.elements[key]\r\n except:\r\n raise StopIteration", "def pop(self):\n array = self.array\n item = array[0] \n if len(array) == 1:\n del array[0]\n else:\n compare = self.compare\n del self.pos[array[0]] \n array[0] = array.pop()\n self.pos[array[0]] = 0\n low, high = 0, 1\n while high < len(array):\n if ((high+1 < len(array)\n and compare(array[high], array[high+1]) > 0)):\n high = high+1\n if compare(array[low], array[high]) <= 0:\n break\n self.pos[array[high]] = low\n self.pos[array[low]] = high \n array[low], array[high] = array[high], array[low]\n low, high = high, 2*high+1\n return item", "def popitem(self): # real signature unknown; restored from __doc__\n pass", "def _upheap(self, node):\n parent = self.parent(node)\n while parent is not None and node.element() < parent.element():\n self._swap(node, parent) # Move node upward while key\n parent = self.parent(node) # smaller than parent's key" ]
[ "0.77817386", "0.76223326", "0.7581046", "0.7518329", "0.74608326", "0.73750526", "0.72169954", "0.7203851", "0.7195688", "0.7184285", "0.7160784", "0.7153954", "0.71533555", "0.70930916", "0.70832413", "0.7074128", "0.7068215", "0.70491946", "0.70461", "0.7043287", "0.70305884", "0.7013482", "0.70117295", "0.7005434", "0.6991565", "0.69611883", "0.6949862", "0.69472855", "0.6937198", "0.6874766", "0.6865855", "0.6855626", "0.6852174", "0.6847418", "0.68460065", "0.68364304", "0.68292624", "0.68227476", "0.68034065", "0.6796225", "0.6783089", "0.6756906", "0.67541194", "0.6749177", "0.67397964", "0.6712117", "0.67119354", "0.6707711", "0.66998106", "0.6683362", "0.6678087", "0.6669527", "0.66681445", "0.6667447", "0.6646642", "0.6641782", "0.66295415", "0.6569482", "0.65631413", "0.65616596", "0.65585136", "0.6549201", "0.6540297", "0.65367913", "0.6534136", "0.6532156", "0.65297705", "0.6522865", "0.6513338", "0.6507976", "0.64887893", "0.64857817", "0.64367825", "0.6433732", "0.6426681", "0.64233965", "0.6383101", "0.63732433", "0.6361338", "0.6357871", "0.63556993", "0.63530797", "0.63303435", "0.63108504", "0.6298304", "0.62939864", "0.6280882", "0.62560904", "0.62386185", "0.6227415", "0.6225058", "0.6219687", "0.6219111", "0.6218557", "0.62171006", "0.62071675", "0.6201827", "0.6201201", "0.6192337", "0.61877066" ]
0.70076555
23
Updates the priority of an item.
def update(self, idx: int, new_priority: T.Union[int, float]): old_priority, item = self.__heap[idx] self.__heap[idx] = (new_priority, item) if new_priority < old_priority: self.__sift_up(idx) else: self.__sift_down(idx)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setPriority(self, p):\n self.priority = p", "def increase_priority(self):\n if self._priority > 0:\n self._priority -= 1", "def schedule_update_priority(self, func, pri, *args, **kwargs):\n self.unschedule(func)\n new_item = _Item(func, pri, args, kwargs)\n for i,sched in enumerate(self.update_schedules):\n if sched.pri > new_item.pri:\n self.update_schedules.insert(i, new_item)\n return\n self.update_schedules.append(new_item)", "def _update(self, priority, key):\n i = self._index[key]\n item = self._heap[i]\n old_priority = item.priority\n item.priority = priority\n if priority < old_priority:\n self._sift_up(i)\n else:\n self._sift_down(i)", "def _update_priority(self, task, prio, worker):\n task.priority = prio = max(prio, task.priority)\n for dep in task.deps or []:\n t = self._state.get_task(dep)\n if t is not None and prio > t.priority:\n self._update_priority(t, prio, worker)", "def change_priority(self, elem, prio):\n pos = self.pos[elem]\n currPrio = self.A[pos][1]\n self.A[pos] = (elem, prio)\n if self.cmpFn(prio, currPrio):\n self.insert_loop(pos, pos // 2) # Up heapify\n else:\n self.combine(pos) # Down heapify", "def change_priority(self, priority, key):\n index = self.__position[key]\n current = self.__heap[index][0]\n self.__heap[index][0] = priority\n\n if priority > current:\n self.__bubble_down(index)\n else:\n self.__bubble_up(index)", "def update(self, index, priority=-1):\n if (priority == -1):\n priority = self._max_priority\n elif (priority > self._max_priority):\n self._max_priority = priority\n\n # Search for index\n node = self.findIndex(index)\n\n # Replace with new priority\n diff = priority - node.priority\n node.priority = priority\n\n # Update value\n self._updateValue(node.parent, diff)", "def _update_item(self, item, user):\n item.user_modified = user\n try:\n item.panel = item.panel\n item.item_priority = item.priority\n except AttributeError:\n pass\n item.is_packed = True\n item.save()\n return item", "def put(self, item, priority=None, *args, **kwargs):\n if priority is None:\n raise self.PQueueException('priority must be specified')\n super().put((priority, item), *args, **kwargs)", "def decrease_priority(self):\n self._priority += 1", "def get_priority(self, item):\n try:\n return self.set[item][0]\n except KeyError:\n print(\"Can't get priority of non-existing item\")", "def priority_update(self, indices, priorities):\n for i, p in zip(indices, priorities):\n self.tree.val_update(i, p**self.alpha)", "def priority_update(self, indices, priorities):\n for i, p in zip(indices, priorities): self.tree.val_update(i, float(p**self.alpha))", "def delete_and_update_priority(self):\r\n for pbi in PBI.objects.filter(priority__gt=self.priority, project=self.project):\r\n pbi.priority -= 1\r\n pbi.save()\r\n\r\n self.delete()", "def SetPriorityValue(self, *args, **kwargs):\n pass", "def priority_update(self, indices, priorities):\n for i, p in zip(indices, priorities):\n self.tree.val_update(i, p ** self.alpha)", "def priority_update(self, indices, priorities):\n for i, p in zip(indices, priorities):\n self.tree.val_update(i, p ** self.alpha)", "def priority(self, priority):\n self._priority = priority", "def set_priority(self, priority):\n self.options[\"priority\"] = priority", "def set_priority(self, priority):\n self.options['priority'] = priority", "def _set_priority(self, v, load=False):\n try:\n t = YANGDynClass(v,base=np.uint8, is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True)\n except (TypeError, ValueError):\n raise ValueError(\"\"\"priority must be of a type compatible with base=np.uint8, is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True\"\"\")\n self.__priority = t\n if hasattr(self, '_set'):\n self._set()", "def priority(self, priority):\n\n self._priority = priority", "def priority(self, priority):\n\n self._priority = priority", "def priority(self, priority):\n\n self._priority = priority", "def set_priority(self, priority):\n self._priority = priority", "def updatePriority(self, xDest, yDest):\n self.priority = self.distance + self.estimate(xDest, yDest)", "def setPriority(self, *args):\n return _libsbml.Event_setPriority(self, *args)", "def priority_update(self, error,batch_index):\n pass", "def change_priority(self, index, new_key):\n # if index is within array\n if index < len(self):\n old_key = self._data[index].get_key()\n self._data[index].set_key(new_key)\n\n # if new key greater percolate down\n if new_key > old_key:\n self.percolate_down(index)\n # if new key is less than percolate up\n elif new_key < old_key:\n self.percolate_up(index)", "def _set_priority(self, args):\n if 'priority' in args:\n try:\n self._priority = float(args['priority'])\n except TypeError:\n raise InvalidPriority('Invalid priority: %s' % args['priority'])\n except ValueError:\n raise InvalidPriority()\n else:\n self._priority = None", "def turn_priority(self):\n raise NotImplementedError(\"turn_priority() was not implemented in a subclass of TurnListItem.\")", "def setpriority(self, pid=None, priority=5):\n\t \n\t import win32api,win32process,win32con\n\t \n\t priorityclasses = [win32process.IDLE_PRIORITY_CLASS,\n\t win32process.BELOW_NORMAL_PRIORITY_CLASS,\n\t win32process.NORMAL_PRIORITY_CLASS,\n\t win32process.ABOVE_NORMAL_PRIORITY_CLASS,\n\t win32process.HIGH_PRIORITY_CLASS,\n\t win32process.REALTIME_PRIORITY_CLASS]\n\t if pid == None:\n\t pid = win32api.GetCurrentProcessId()\n\t handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)\n\t win32process.SetPriorityClass(handle, priorityclasses[priority])", "def _priority_changed(self, priority):\n if self.next is not None:\n self.next.priority = priority", "def enqueue(self, item, priority):\n # TODO: Insert given item into heap\n ...", "def _set_priority(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"priority must be of a type compatible with enumeration\"\"\",\n 'defined-type': \"openconfig-qos:enumeration\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\"\"\",\n })\n\n self.__priority = t\n if hasattr(self, '_set'):\n self._set()", "def _set_priority(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"priority must be of a type compatible with enumeration\"\"\",\n 'defined-type': \"openconfig-qos:enumeration\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\"\"\",\n })\n\n self.__priority = t\n if hasattr(self, '_set'):\n self._set()", "def _set_priority(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"priority must be of a type compatible with enumeration\"\"\",\n 'defined-type': \"openconfig-qos:enumeration\",\n 'generated-type': \"\"\"YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type=\"dict_key\", restriction_arg={u'STRICT': {}},), is_leaf=True, yang_name=\"priority\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/qos', defining_module='openconfig-qos', yang_type='enumeration', is_config=True)\"\"\",\n })\n\n self.__priority = t\n if hasattr(self, '_set'):\n self._set()", "def priority_option(args, run):\n try:\n priority = float(args)\n except ValueError:\n raise ValueError(\n \"The PRIORITY argument must be a number! (but was '{}')\".format(args)\n )\n run.meta_info[\"priority\"] = priority", "def update_priorities(self, idxes, priorities):\n assert len(idxes) == len(priorities)\n for idx, priority in zip(idxes, priorities):\n assert priority > 0\n assert 0 <= idx < len(self._storage)\n delta = priority**self._alpha - self._it_sum[idx]\n self._prio_change_stats.push(delta)\n self._it_sum[idx] = priority**self._alpha\n self._it_min[idx] = priority**self._alpha\n\n self._max_priority = max(self._max_priority, priority)", "def push(self, priority: float, item):\n heappush(self._heap, (-1 * priority, item))", "def update_incident_priority(\n *,\n db_session: Session = Depends(get_db),\n incident_priority_id: int,\n incident_priority_in: IncidentPriorityUpdate,\n):\n incident_priority = get(db_session=db_session, incident_priority_id=incident_priority_id)\n if not incident_priority:\n raise HTTPException(\n status_code=404, detail=\"The incident priority with this id does not exist.\"\n )\n\n incident_priority = update(\n db_session=db_session,\n incident_priority=incident_priority,\n incident_priority_in=incident_priority_in,\n )\n return incident_priority", "def update_quantity(item: dict, new_qty):\n qty = item.get('quantity')\n if isinstance(qty, dict):\n item['quantity']['value'] = new_qty\n else:\n item['quantity'] = new_qty", "def setpriority(pid=None, priority=1):\n\n #import win32api,win32process,win32con\n from ctypes import windll\n\n priorityclasses = [0x40, # IDLE_PRIORITY_CLASS,\n 0x4000, # BELOW_NORMAL_PRIORITY_CLASS,\n 0x20, # NORMAL_PRIORITY_CLASS,\n 0x8000, # ABOVE_NORMAL_PRIORITY_CLASS,\n 0x80, # HIGH_PRIORITY_CLASS,\n 0x100, # REALTIME_PRIORITY_CLASS\n ]\n if pid is None:\n pid = windll.kernel32.GetCurrentProcessId()\n handle = windll.kernel32.OpenProcess(PROCESS_ALL_ACCESS, True, pid)\n windll.kernel32.SetPriorityClass(handle, priorityclasses[priority])", "def update_priorities(self, idxes, priorities):\n assert len(idxes) == len(priorities)\n for idx, priority in zip(idxes, priorities):\n assert priority > 0\n assert 0 <= idx < len(self._storage)\n self._it_sum[idx] = priority ** self._alpha\n self._it_min[idx] = priority ** self._alpha\n\n self._max_priority = max(self._max_priority, priority)", "def set_priority(priority=2, pid=None):\n print \"TODO: add os independent support\"\n priorityclasses = [win32process.IDLE_PRIORITY_CLASS,\n win32process.BELOW_NORMAL_PRIORITY_CLASS,\n win32process.NORMAL_PRIORITY_CLASS,\n win32process.ABOVE_NORMAL_PRIORITY_CLASS,\n win32process.HIGH_PRIORITY_CLASS,\n win32process.REALTIME_PRIORITY_CLASS]\n if pid == None:\n pid = win32api.GetCurrentProcessId()\n handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)\n win32process.SetPriorityClass(handle, priorityclasses[priority])", "def update_priority(self, indexes, values):\n values = values * 10000\n values = self._clip_p(values)\n values = int(values)\n self.sum_tree.update(indexes, values)", "def set_priority(self, job_id, priority):\n job = Job.get_job_by_id(job_id)\n self.access_handler.check_set_priority(job)\n self.master.set_priority(job, priority)", "def add(self, item, priority=0) -> None:\n if item in self.entry_finder:\n self.remove(item)\n count = next(self.counter)\n entry = (priority, count, [item])\n self.entry_finder[item] = entry\n heapq.heappush(self.priority_queue, entry)", "def add(self, item, priority):\n heappush(self.contents, (priority, item))", "def update_priorities(self,indexes, priorities):\r\n\r\n assert len(indexes) == len(priorities)\r\n for index, priority in zip(indexes, priorities):\r\n assert priority > 0 and 0 <= index < len(self.buffer)\r\n self._it_sum[index] = priority ** self._alpha\r\n self._it_min[index] = priority ** self._alpha\r\n self._max_priority = max(self._max_priority, priority)", "def setFrequencyPriority(self, value):\n return self._set(frequencyPriority=value)", "def set_normal_priority(self, cr, uid, ids, context=None):\n return self.set_priority(cr, uid, ids, '3')", "def priority(self) -> int:\n return pulumi.get(self, \"priority\")", "def item_starred(self, item):\n self.update_item(item)", "def set_optimization_priority(self):\n\n if len(self.groups) == 0 and len(self.servers) == 0:\n return\n\n if self.resource.CPU_avail > 0:\n app_cpu_weight = float(self.total_CPU) / float(self.resource.CPU_avail)\n else:\n if self.total_CPU > 0:\n app_cpu_weight = 1.0\n else:\n app_cpu_weight = 0.0\n\n if self.resource.mem_avail > 0:\n app_mem_weight = float(self.total_mem) / float(self.resource.mem_avail)\n else:\n if self.total_mem > 0:\n app_mem_weight = 1.0\n else:\n app_mem_weight = 0.0\n\n if self.resource.local_disk_avail > 0:\n app_local_vol_weight = float(self.total_local_vol) / float(self.resource.local_disk_avail)\n else:\n if self.total_local_vol > 0:\n app_local_vol_weight = 1.0\n else:\n app_local_vol_weight = 0.0\n\n opt = [(\"cpu\", app_cpu_weight),\n (\"mem\", app_mem_weight),\n (\"lvol\", app_local_vol_weight)]\n\n self.optimization_priority = sorted(opt, key=lambda resource: resource[1], reverse=True)", "def put(self, item, priority=False):\n id = uuid.uuid4().hex\n pipe = self.redis.pipeline()\n\n if priority:\n pipe.rpush(self.feed_ids, id)\n else:\n pipe.lpush(self.feed_ids, id)\n pipe.incr(self.feed_publishes)\n pipe.hset(self.feed_items, id, item)\n pipe.zadd(self.feed_published, **{id: int(time.time()*1000)})\n pipe.execute()\n return id", "def updateItem(self, object):\n pass", "def update_collection_priority(self, collid, prid):\n # cond = SQLBinaryExpr(collid, OP_EQ, collid)\n cond = SQLBinaryExpr(COL_NAME_COLL_COLLID, OP_EQ, collid)\n self.update_generic_data({COL_NAME_COLL_PRID: prid}, TABLE_NAME_COLL, cond)", "def set_priority(self, new_prio):\n if Priority.MIN_PRIORITY <= new_prio <= Priority.MAX_PRIORITY:\n self.__priority = new_prio\n LOG(msg='New priority value has been assigned. Priority=%d' % (self.__priority))\n return True\n\n LOG(msg='Given priority value is not within the range of [%d, %d].' % (Priority.MIN_PRIORITY, Priority.MAX_PRIORITY), log=Logs.ERROR)\n return False", "def heap_increase_key(self, i, key):\n if key < self.heap[i].priority_key:\n print(\"The new key should be higher than the current priority_key \")\n else:\n self.heap[i].priority_key = key\n while i > 0 and self.heap[(i-1)//2].priority_key < self.heap[i].priority_key:\n self.heap[(i-1)//2], self.heap[i] = self.heap[i], self.heap[(i-1)//2]\n i = (i-1)//2", "def add(self, item, priority=0):\n if item in self.set:\n self.remove(item)\n count = next(self.counter)\n entry = [priority, count, item]\n self.set[item] = entry\n hpq.heappush(self.heap, entry)", "def update_priorities(self, batch_indices, batch_priorities):\n for idx, priority in zip(batch_indices, batch_priorities):\n self.priorities[idx] = priority", "def update_item(self, table, item):", "def update_priorities(self, idxes, priorities):\n assert len(idxes) == len(priorities)\n for ndx, priority in zip(idxes, priorities):\n assert priority > 0\n assert 0 <= ndx < len(self.memory)\n self.iter_sum[ndx] = priority ** self.alpha\n self.iter_min[ndx] = priority ** self.alpha\n\n self.max_p = max(self.max_p, priority)", "async def setIncident_priority(\n self,\n eventID: str,\n incidentNumber: int,\n priority: IncidentPriority,\n author: str,\n ) -> None:", "def updateItem(self, item, values):\n print ('Updating item: ' + unicode(item))\n item = int(item) #Importante: Para evitar que se caiga la api de PODIO más adelante\n message = self._client.Item.update(item, {'fields':values})\n return message", "def decreaseKey(self, id, newPriority):\n\n size = self.n\n self.n = self.positions[id] - 1\n self.insert(id, newPriority)\n self.n = size", "def test_task_priority(self):\r\n # Del previous TaskRuns\r\n self.create()\r\n self.del_task_runs()\r\n\r\n # Register\r\n self.register()\r\n self.signin()\r\n\r\n # By default, tasks without priority should be ordered by task.id (FIFO)\r\n tasks = db.session.query(Task).filter_by(app_id=1).order_by('id').all()\r\n res = self.app.get('api/app/1/newtask')\r\n task1 = json.loads(res.data)\r\n # Check that we received a Task\r\n err_msg = \"Task.id should be the same\"\r\n assert task1.get('id') == tasks[0].id, err_msg\r\n\r\n # Now let's change the priority to a random task\r\n import random\r\n t = random.choice(tasks)\r\n # Increase priority to maximum\r\n t.priority_0 = 1\r\n db.session.add(t)\r\n db.session.commit()\r\n # Request again a new task\r\n res = self.app.get('api/app/1/newtask')\r\n task1 = json.loads(res.data)\r\n # Check that we received a Task\r\n err_msg = \"Task.id should be the same\"\r\n assert task1.get('id') == t.id, err_msg\r\n err_msg = \"Task.priority_0 should be the 1\"\r\n assert task1.get('priority_0') == 1, err_msg", "def priority_update(self, indices, error, gradient):\n priorities = self.__getPriority(error, gradient)\n for i, p in zip(indices, priorities):\n self.tree.val_update(i, p)", "async def priority(self, ctx: Context, *, guild: int = None, channel: int = None):\n\n if not guild:\n guild = ctx.guild\n else:\n guild = self.bot.get_guild(guild)\n if not guild:\n return await ctx.message.add_reaction(\"⚠\")\n\n if guild.id not in self.active_guilds:\n return await ctx.message.add_reaction(\"⚠\")\n\n if not channel:\n channel = ctx.channel\n else:\n channel = self.bot.get_channel(channel)\n if not channel:\n return await ctx.message.add_reaction(\"⚠\")\n\n config = self.get_guild_config(guild)\n config[\"priority_modlog\"] = str(channel.id)\n\n self.config.hmset(f\"guilds:{guild.id}\", config)\n self._config_cache[guild.id] = config\n\n await ctx.message.add_reaction(\"✅\")", "def SetPriority(self, priority=1, interruptMenuAfter=3, timeoutAfter=2):\n self.ListenToMenu(interruptMenuAfter) # listen to 'To sent with normal priority...'\n self.SipPhone.SendDTMF(str(priority))\n self.ListenToMenu(timeoutAfter) # listen to 'Message Sent'\n mailbox = self.getMailBoxDN()\n mailbox.SetPriority(int(priority))\n time.sleep(1)\n for owner in mailbox.owners:\n owner.CheckMWI()", "def push_pop(self, item, priority):\n if self.size() < 1:\n raise ValueError('Priority queue is empty and has no front item')\n else:\n # TODO: Replace and return min item from heap, if any\n ...", "def update(self, item, outcome, timestamp):\n\n system, current_deck = self.deck_of_item[item]\n new_deck = max(1, current_deck + 2 * outcome - 1)\n\n self.deck_of_item[item] = (system, new_deck)\n if current_deck >= 1:\n self.items_of_deck[(system, current_deck)].remove(item)\n if new_deck <= self.num_decks:\n self.items_of_deck[(system, new_deck)].add(item)\n\n self.latest_timestamp_of_item[item] = timestamp", "def postProcess(self):\n flag = False\n for task in GetOsekObjects('TASK'):\n for attr1 in task.getAttribute('RESOURCE'):\n if(attr1.value == self.name):\n flag = True\n if(pInt(task.getValue('PRIORITY')) > pInt(self.getValue('PRIORITY'))):\n self.modifyAttribute('PRIORITY', task.getValue('PRIORITY'))\n if(flag == False):\n print 'WARNING: %s hasn\\'t been assigned to any task.'%(self.name)", "def update_priorities(self, indices, delta):\n for i in range(0, len(indices)):\n self.priority_queue.update(math.fabs(delta[i]), indices[i])", "def priority(self):\n return self._pri", "def update(self, values, priority=\"project\"):\n\t\tself._assert_mutability()\n\t\tif isinstance(values, six.string_types):\n\t\t\tvalues = json.loads(values)\n\t\tif values is not None:\n\t\t\tif isinstance(values, BaseSettings):\n\t\t\t\tfor name, value in six.iteritems(values):\n\t\t\t\t\tself.set(name, value, values.getpriority(name))\n\t\t\telse:\n\t\t\t\tfor name, value in six.iteritems(values):\n\t\t\t\t\tself.set(name, value, priority)", "def add_priority(self, entity_type, obj_list, comp_name=None, priority=3):\n i = priority\n objects = \", \".join(obj_list)\n args = [\"NAME:UpdatePriorityListData\"]\n if entity_type == 1:\n prio = [\n \"NAME:PriorityListParameters\",\n \"EntityType:=\",\n \"Object\",\n \"EntityList:=\",\n objects,\n \"PriorityNumber:=\",\n i,\n \"PriorityListType:=\",\n \"3D\",\n ]\n self._priorities_args.append(prio)\n args += self._priorities_args\n elif entity_type == 2:\n pcblist = self.modeler.oeditor.Get3DComponentInstanceNames(comp_name)\n prio = [\n \"NAME:PriorityListParameters\",\n \"EntityType:=\",\n \"Component\",\n \"EntityList:=\",\n pcblist[0],\n \"PriorityNumber:=\",\n i,\n \"PriorityListType:=\",\n \"3D\",\n ]\n self._priorities_args.append(prio)\n args += self._priorities_args\n self.modeler.oeditor.UpdatePriorityList([\"NAME:UpdatePriorityListData\"])\n self.modeler.oeditor.UpdatePriorityList(args)\n return True", "def test_priority_change_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('priority change major normal')\n rv, output = self._execute('priority list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def schedule_frame_priority(self, func, pri, *args, **kwargs):\n self.unschedule(func)\n new_item = _Item(func, pri, args, kwargs)\n for i,sched in enumerate(self.frame_schedules):\n if sched.pri > new_item.pri:\n self.frame_schedules.insert(i, new_item)\n return\n self.frame_schedules.append(new_item)", "def update_priorities(self, batch_indices, batch_priorities):\n pass", "def put_new_order(self, item: dict):\n return self.table_connection.put_item(Item=item)", "def set_background_priority(self, nVmBackgroundPriority):\n\t\tcall_sdk_function('PrlVmCfg_SetBackgroundPriority', self.handle, nVmBackgroundPriority)", "def enqueue(self, priority, value, key=None):\n key = key if key else value\n if key in self._index:\n self._update(priority, key)\n return\n self._heap.append(Item(priority, value, key))\n self._size = len(self._heap)\n self._index[key] = self._size - 1\n self._sift_up(self._size - 1)", "def prob_update(self):\n pass", "def updateScore(self, node, addToScore):\n currentScore = 0\n scoreString = node.attrib.get('gravityScore')\n if scoreString:\n currentScore = int(scoreString)\n \n newScore = currentScore + addToScore\n node.set(\"gravityScore\", str(newScore))", "def set_foreground_priority(self, nVmForegroundPriority):\n\t\tcall_sdk_function('PrlVmCfg_SetForegroundPriority', self.handle, nVmForegroundPriority)", "def fix_client_priorities(new_client_priority):\n # TODO : make this more efficient\n all_frs = defaultdict()\n all_requests = FeaturesRequest.query.all()\n\n for fr in all_requests:\n # update all_frs dict to key(the client_priority), values(fr_id)\n all_frs.setdefault(fr.client_priority, fr.id)\n\n # if new_client_priority is not yet set return\n if new_client_priority not in all_frs.keys():\n return\n\n # else fix priorities\n j = new_client_priority\n\n while j in all_frs.keys():\n j += 1\n\n # update\n while j > new_client_priority:\n fr_to_change = FeaturesRequest.query.get(all_frs[j - 1])\n fr_to_change.client_priority = j\n db.session.add(fr_to_change)\n j -= 1\n\n db.session.commit()\n\n return", "def add_item(self, item):\n self.items_with_price.update(item)", "def test_priority_add_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('priority add new_priority')\n rv, output = self._execute('priority list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def set_thread_priority(self, priority: \"int\") -> \"int\":\n return _beamforming_swig.doaesprit_sptr_set_thread_priority(self, priority)", "def priority(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"priority\")", "def priority(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"priority\")", "def priority(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"priority\")", "def priority(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"priority\")", "def cascade_list(priority_to_cascade_from, todo_list):\r\n for item in todo_list:\r\n if item.priority >= priority_to_cascade_from:\r\n item.priority += 1\r\n return", "def priority(self):\n return self._priority", "def priority(self):\n return self._priority", "def priority(self):\n return self._priority" ]
[ "0.7033521", "0.70131207", "0.6888573", "0.68740356", "0.6721127", "0.6596157", "0.657843", "0.64021325", "0.6299", "0.6285044", "0.6251421", "0.624495", "0.6241001", "0.62359715", "0.6228096", "0.6220238", "0.6217791", "0.6217791", "0.6208242", "0.62000865", "0.61397344", "0.61139137", "0.61006576", "0.61006576", "0.61006576", "0.60817844", "0.6034781", "0.60276216", "0.60094553", "0.5983482", "0.5964974", "0.59058833", "0.5891066", "0.5871434", "0.5869702", "0.5813449", "0.5813449", "0.5813449", "0.5778135", "0.57599247", "0.5753802", "0.5738203", "0.5736947", "0.56891084", "0.56785214", "0.5658304", "0.5611796", "0.5593109", "0.55844903", "0.55741215", "0.5564824", "0.5539327", "0.55386794", "0.5532926", "0.5532444", "0.5525346", "0.5458953", "0.5446746", "0.54379565", "0.53858835", "0.5372581", "0.5371722", "0.53628844", "0.53616226", "0.535318", "0.533637", "0.53321826", "0.5311527", "0.53043246", "0.52910167", "0.5286404", "0.5276373", "0.5268823", "0.5265009", "0.5256628", "0.52561677", "0.5219982", "0.52008367", "0.51973414", "0.5181752", "0.51793456", "0.5167052", "0.51614374", "0.51515317", "0.51497215", "0.5144714", "0.51424474", "0.5139038", "0.51356673", "0.5133152", "0.5121151", "0.5120345", "0.5114551", "0.5114551", "0.5114551", "0.5114551", "0.5090849", "0.5085159", "0.5085159", "0.5085159" ]
0.66484094
5
Percolates up the item if the first element in the item is smaller than the first element in the parent.
def __sift_up(self, i: int): while i > 0: parent = (i - 1) // 2 if self.__heap[i][0] < self.__heap[parent][0]: tmp = self.__heap[parent] self.__heap[parent] = self.__heap[i] self.__heap[i] = tmp i = parent
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def percolate_up(self, index):\n if self.min is True:\n parent = self._parent(index)\n if index > 0 and self._data[index] < self._data[parent]:\n self._swap(index, parent)\n self.percolate_up(parent)\n if self.min is False:\n parent = self._parent(index)\n if index > 0 and self._data[index] > self._data[parent]:\n self._swap(index, parent)\n self.percolate_up(parent)", "def sift_up(self, i):\n #While the element is not the min value (top) or the second value in the min heap\n while i // 2 > 0:\n # Swap the values if the current value is less than it's parent value\n if self.heap_list[i][0] < self.heap_list[i // 2][0]:\n self.heap_list[i], self.heap_list[i // 2] = self.heap_list[i // 2], self.heap_list[i]\n # Move the index to the parent value (moving up the tree)\n i = i // 2", "def heapify_up(self, index):\n if index == 0:\n return\n parent_index = int((index - 1) / 2)\n if self.data[index] < self.data[parent_index]:\n self.data[index], self.data[parent_index] = self.data[parent_index], self.data[index]\n self.heapify_up(parent_index)", "def _shift_up(self, idx):\n\n parent = (idx - 1) // 2\n while parent >= 0 and self.value(parent) < self.value(idx):\n self.items[parent], self.items[idx] = self.items[idx], self.items[parent]\n idx = parent\n parent = (idx - 1) // 2", "def perc_down(self, i): #\r\n while (i * 2) <= self.size:\r\n mc = self.max_child(i) ## find max child\r\n if self.items[i] < self.items[mc]:\r\n tmp = self.items[i]\r\n self.items[i] = self.items[mc]\r\n self.items[mc] = tmp\r\n i = mc", "def perc_up(self, i):\r\n while i // 2 > 0:\r\n if self.items[i] > self.items[i // 2]:\r\n tmp = self.items[i // 2]\r\n self.items[i // 2] = self.items[i]\r\n self.items[i] = tmp\r\n i = i // 2", "def shift_item_up(self, index):\n while index > 0:\n parent_index = index // 2\n if parent_index > 0 and self.heaplist[parent_index] < self.heaplist[index]:\n self.heaplist[parent_index], self.heaplist[index] = self.heaplist[index], self.heaplist[parent_index]\n index = index // 2", "def percolate_up(self, position):\n parent = self.parent(position)\n if position > 0 and self.table[position] < self.table[parent]: # not root and child > parent\n self.swap(position, parent)\n self.percolate_up(parent) # recurse", "def heapify_up(self):\n index = len(self.heap) - 1\n while self.has_parent(index) and self.get_parent(index) > self.heap[index]:\n self.swap_values(self.get_parent_index(index), index)\n index = self.get_parent_index(index)", "def percolate_up(self, index):\n # reached root\n if index == 0:\n return\n\n p_ind = (index-1)//2\n # swap if parent is greater than current and continue percolating\n if self._data[p_ind] > self._data[index]:\n self.swap(p_ind, index)\n self.percolate_up(p_ind)", "def shift_item_down(self, parent_index):\n while 2 * parent_index <= self.currentsize:\n child_index = self.max_child_index(parent_index)\n if self.heaplist[child_index] > self.heaplist[parent_index]:\n self.heaplist[child_index], self.heaplist[parent_index] = self.heaplist[parent_index], self.heaplist[child_index]\n parent_index = child_index", "def sift_down(self, i):\n #If the current value has at least one child\n while (i * 2) <= self.current_size:\n #For the current value, get the index of the child with the least value (min child)\n mc = self.min_child(i)\n # If the current value is greater than it's \"min child\" value, swap the values\n if self.heap_list[i][0] > self.heap_list[mc][0]:\n self.heap_list[i], self.heap_list[mc] = self.heap_list[mc], self.heap_list[i]\n i = mc", "def sift_up(self, index):\n if self.size() == 1:\n return\n parent_index = self.parent(index)\n # sift up if it is larger than its parent\n while index > 0 and self.heap[index] > self.heap[parent_index]:\n self.heap[index], self.heap[parent_index] = self.heap[parent_index], self.heap[index]\n # update index\n index = parent_index\n parent_index = self.parent(index)", "def percDown(self, i):\n while(i * 2) <= self.currentSize:\n mc = self.minChild(i)\n if self.heapList[i] > self.heapList[mc]:\n tmp = self.heapList[i]\n self.heapList[i] = self.heapList[mc]\n self.heapList[mc] = tmp\n i = mc", "def percolate_down(self, i):\n while (i * 2) <= self.size:\n max_child = self.max_child(i)\n if self.heap_list[max_child] > self.heap_list[i]:\n tmp = self.heap_list[i]\n self.heap_list[i] = self.heap_list[max_child]\n self.heap_list[max_child] = tmp\n i = max_child", "def _swim(self):\n child_ix = len(self) - 1\n parent_ix = self._get_parent(child_ix)\n while (parent_ix is not None and self._test(parent_ix, child_ix)):\n self._exch(parent_ix, child_ix)\n child_ix = parent_ix\n parent_ix = self._get_parent(parent_ix)", "def percolate_up(self, i):\n while i // 2 > 0:\n if self.heap_list[i] > self.heap_list[i // 2]:\n tmp = self.heap_list[i // 2]\n self.heap_list[i // 2] = self.heap_list[i]\n self.heap_list[i] = tmp\n i = i // 2", "def percolate_down(self, index):\n if self.min is True:\n small_child = self.min_child(index)\n if small_child is not None:\n if self._data[small_child] < self._data[index]:\n self._swap(index, small_child)\n self.percolate_down(small_child)\n if self.min is False:\n large_child = self.max_child(index)\n if large_child is not None:\n if self._data[large_child] > self._data[index]:\n self._swap(index, large_child)\n self.percolate_down(large_child)", "def __sift_down(self, i: int):\n while (2 * i + 1) <= self.__len__() - 1:\n\n child_idx = self.__get_smallest_child(i)\n\n if self.__heap[i][0] > self.__heap[child_idx][0]:\n tmp = self.__heap[i]\n self.__heap[i] = self.__heap[child_idx]\n self.__heap[child_idx] = tmp\n i = child_idx", "def _up_heap(self, j):\n p = self._parent(j)\n if j > 0 and self._data[j] < self._data[p]:\n self._swap(j, p)\n self._up_heap(p)", "def collapseUp(self):\n retval = False\n for cStartInd in range(self.col):\n lst = [self.get_cell(i) for i in range(cStartInd, self.length, self.col)]\n lst, tmp = self.collapseRow(lst)\n x = 0\n for i in range(cStartInd, self.length, self.col):\n self.set_cell(i, lst[x])\n x += 1\n retval = retval or tmp\n return retval", "def percUp(self, i):\n while i // 2 > 0:\n if self.heapList[i] < self.heapList[i // 2]:\n tmp = self.heapList[i // 2]\n self.heapList[i // 2] = self.heapList[i]\n self.heapList[i] = tmp\n i = i // 2", "def percolate_down(self, position):\n if self.has_left(position):\n\n left = self.left_child(position)\n small_child = left\n if self.has_right(position):\n right = self.right_child(position)\n if self.table[right] < self.table[left]:\n small_child = right\n\n # swap smaller element up then do again until it cant go down anymore\n if self.table[small_child] < self.table[position]:\n self.swap(position, small_child)\n self.percolate_down(small_child)", "def _perc_up(self, cur_idx):\n while (cur_idx - 1) // 2 >= 0:\n parent_idx = (cur_idx - 1) // 2\n if self._heap[cur_idx] < self._heap[parent_idx]:\n self._heap[cur_idx], self._heap[parent_idx] = (\n self._heap[parent_idx],\n self._heap[cur_idx],\n )\n cur_idx = parent_idx", "def _percolate_down(self, index):\n parent = self.heap[index]\n child_2_index, child_1_index = index * 2, index * 2 + 1\n try:\n child_1_value = self.heap[child_1_index]\n except IndexError:\n child_1_value = maxint\n try:\n child_2_value = self.heap[child_2_index]\n except IndexError:\n child_2_value = maxint\n if parent > child_1_value or parent > child_2_value:\n # Swap parent with lesser child and then recursively percolate\n # the new child (previous parent) downwards\n if child_1_value > child_2_value:\n self._swap(child_2_index, index)\n self._percolate_down(child_2_index)\n else:\n self._swap(child_1_index, index)\n self._percolate_down(child_1_index)", "def _upheap(self, node):\n parent = self.parent(node)\n while parent is not None and node.element() < parent.element():\n self._swap(node, parent) # Move node upward while key\n parent = self.parent(node) # smaller than parent's key", "def bubble_up(self, i):\n\n parent = self.parent(i)\n\n while (parent is not None) and (self.heap[i][1] < self.heap[parent][1]):\n\n self.switch(i, parent)\n\n parent = self.parent(parent)", "def _sift_up(self, i):\n while i > 0:\n p = (i-1)//2\n if self._heap[i] < self._heap[p]:\n self._swap(i, p)\n i = p\n else:\n break", "def percolate_down(self, index):\n child = self.max_child(index)\n\n # swap if child is less than than current and continue percolating\n if child and self._data[child] < self._data[index]:\n self.swap(child, index)\n self.percolate_down(child)", "def _move_up(self, p):\n if p == self._data.first():\n count = p.element()._count\n walk = self._data.before(p)\n if count > walk.element()._count: # must shift forward\n while (walk != self._data.first() and\n count > self._data.before(walk).element()._count):\n walk = self._data.before(walk)\n\n self._data.add_before(walk, self._data.delete(p)) # delete/reinsert", "def up_heap(self, index):\r\n while index: # while not at the root\r\n parent = self._parent(index) # who is my parent?\r\n # Am I smaller than my parent?\r\n if self._data[index] < self._data[parent]:\r\n self.swap(index, parent) # if so, swap me and my parent\r\n index = parent # and continue bubbling up\r\n else:\r\n return # otherwise we are done\r", "def _shift_down(self, idx):\n\n child = (idx + 1) * 2 - 1\n while child < self.size and (\n self.value(idx) < self.value(child) or\n self.value(idx) < self.value(child + 1)):\n # Compare the left child and the right child and get the index of the larger one.\n if self.value(child + 1) > self.value(child):\n child += 1\n self.items[idx], self.items[child] = self.items[child], self.items[idx]\n idx = child\n child = (idx + 1) * 2 - 1", "def up(self):\n if self.top == self.current:\n return\n else:\n self.current += 1", "def __siftup(heap, nodes, pos, stopPos = 0):\n # Loop until past stopping position\n while pos > stopPos:\n # Set parent position\n parentPos = (pos - 1) >> 1\n\n # Swap if child less than parent\n if heap[pos][0] < heap[parentPos][0]:\n Graph.__swapHeapNodes(heap, nodes, pos, parentPos)\n pos = parentPos\n \n # End sift if child's first tuple is greater than or equal to parent\n else: break", "def _heapify_after_remove(self,ele):\r\n \r\n if self._chk_left(ele):\r\n left = self._left(ele)\r\n find_small_child = left\r\n # below to find which child has small integer\r\n if self._chk_right(ele):\r\n right = self._right(ele)\r\n if self._data[left] > self._data[right]:\r\n find_small_child = right\r\n \r\n if self._data[find_small_child] < self._data[ele]:\r\n self.swap(ele, find_small_child)\r\n self._heapify_after_remove(find_small_child)", "def _heapify_after_add(self,ele):\r\n parent = self._parent(ele)\r\n if ele > 0 and self._data[ele] < self._data[parent]:\r\n self.swap(ele, parent)\r\n self._heapify_after_add(parent)", "def _sift_down(self, i):\n mini = i\n l = 2*i + 1\n if l < self._size and\\\n self._heap[l] < self._heap[mini]:\n mini = l\n r = 2*i + 2\n if r < self._size and\\\n self._heap[r] < self._heap[mini]:\n mini = r\n if mini != i:\n self._swap(i, mini)\n self._sift_down(mini)", "def heapify_down(self):\n index = 0\n while self.has_left_child(index):\n smaller_child_index = self.get_left_child_index(index)\n if self.has_right_child(index) and self.get_right_child(index) < self.get_left_child(index):\n smaller_child_index = self.get_right_child_index(index)\n if self.heap[index] < self.heap[smaller_child_index]:\n break\n else:\n self.swap_values(index, smaller_child_index)\n index = smaller_child_index", "def _move_up(self, p):\n if p != self.data.first():\n self.data.add_first(self.data.delete(p))", "def move_up(self):\n\n prev_sibling = self.get_previous_sibling()\n if prev_sibling!=None: \n self.move_to(prev_sibling,'left')\n self.save()", "def siftup(self, node, pos):\n p = self.parent(pos)\n while p is not None and self.heap[p].key > node.key:\n self.heap[pos] = self.heap[p]\n self.heap[pos].pos = pos\n pos = p\n p = self.parent(p)\n self.heap[pos] = node\n node.pos = pos", "def _decrease_parent_count(self):\n if self.parent is not None:\n self.parent.size -= 1\n self.parent._decrease_parent_count()", "def _siftup(self, pos, entry):\n heap, imap = self._heap, self._index_map\n while pos > 0:\n parent_pos = (pos - 1) // 2\n parent_entry = self._heap[parent_pos]\n if not entry < parent_entry:\n break\n heap[pos] = parent_entry\n imap[parent_entry.vertex] = pos\n pos = parent_pos\n heap[pos] = entry\n imap[entry.vertex] = pos", "def sort_down(self, i):\n while ((i + 1) * 2) <= len(self._heap) + 1:\n mc = self.max_child(i)\n if self._heap[i] < self._heap[mc]:\n tmp = self._heap[i]\n self._heap[i] = self._heap[mc]\n self._heap[mc] = tmp\n i = mc", "def _move_up(self, p):\n if p != self._data.first():\n self._data.add_first(self._data.delete(p)) # remove or delete it from initial place and reinsert in new position", "def up(self, i):\n x = self.heap[i]\n while i > 1 and x < self.heap[i // 2]:\n self.heap[i] = self.heap[i // 2]\n self.rank[self.heap[i // 2]] = i\n i //= 2\n self.heap[i] = x # insertion index found\n self.rank[x] = i", "def up(self, i):\n x = self.heap[i]\n while i > 1 and x < self.heap[i // 2]:\n self.heap[i] = self.heap[i // 2]\n self.rank[self.heap[i // 2]] = i\n i //= 2\n self.heap[i] = x # insertion index found\n self.rank[x] = i", "def _update_min(self):\n tmp = self\n while tmp.left is not None:\n tmp = tmp.left\n return tmp.parent.key", "def extract_min(self):\r\n if self.is_empty():\r\n return None\r\n min_elem = self.heap_array[0]\r\n aux_elem = self.heap_array.pop()\r\n\r\n if self.is_empty() == False:\r\n self.heap_array[0] = aux_elem\r\n\r\n current_index = 0\r\n left_child_index = (2 * current_index) + 1\r\n current_value = self.heap_array[current_index]\r\n\r\n while left_child_index < len(self.heap_array): # loop that will repeat until no violation of the minheap properties exist\r\n current_min = current_value\r\n\r\n for i in range(2): # this loop is in place so that both children are compared and the smaller of the two is chosen \r\n if (left_child_index + i) > len(self.heap_array)-1: # condition to avoid out of bounds\r\n continue\r\n else:\r\n if int(self.heap_array[left_child_index + i]) < int(current_min): # if child is smaller than parent\r\n current_min = self.heap_array[left_child_index + i ] # set current minimum value\r\n current_min_index = left_child_index + i # and cureent minimim index( index where current minimum value is found )\r\n if current_min == current_value: # if no property is broken (in this case, the parent is actually less than its' children)\r\n break\r\n else: # if propert is broken\r\n self.heap_array[current_index], self.heap_array[current_min_index] = self.heap_array[current_min_index], self.heap_array[current_index] # swap the elements \r\n current_index = current_min_index\r\n left_child_index = int((2 * current_index) + 1)\r\n return min_elem", "def heapify(self):\r\n if self._size:\r\n start = self._parent(len(self._data)-1) # who'se the last parent?\r\n for index in range(start, -1, -1): # for all parents\r\n self.down_heap(index) # fix your heap\r", "def sift_up(heap, start, end):\n # Swap last node with parents until no longer greater.\n i = end - 1\n heaped = False\n while i > start and not heaped:\n parent = (i - 1) // 2\n if compare(heap[i], heap[parent]) > 0:\n heap[i], heap[parent] = heap[parent], heap[i]\n i = parent\n else:\n heaped = True", "def siftDown(start, count):\n root = start\n while root * 2 + 1 < count:\n child = root * 2 + 1 # 'child' is the left children of the current node\n if child < count - 1 and self.data[child] > self.data[child + 1]:\n # Verify that right sibling is lower than the left one, if so,\n # let 'child' be the right sibling\n child += 1\n if self.data[root] > self.data[child]:\n # Swap the current child and the parent if the parent is higher than the child\n self.data[root], self.data[child] = self.data[child], self.data[root]\n root = child\n else:\n return", "def _perc_down(self, cur_idx):\n while 2 * cur_idx + 1 < len(self._heap):\n min_child_idx = self._get_min_child(cur_idx)\n if self._heap[cur_idx] > self._heap[min_child_idx]:\n self._heap[cur_idx], self._heap[min_child_idx] = (\n self._heap[min_child_idx],\n self._heap[cur_idx],\n )\n else:\n return\n cur_idx = min_child_idx", "def heapify_down(self, index):\n min_index = index\n\n for c in [index * 2 + 1, index * 2 + 2]:\n if c < len(self.data) and self.data[c] > min_index:\n min_index = c\n if min_index == index:\n return\n self.data[index], self.data[min_index] = self.data[min_index], self.data[index]\n self.heapify_down(min_index)", "def minChild(self, i):\n if i * 2 + 1 > self.currentSize:\n return i*2\n else:\n if self.heapList[i*2] < self.heapList[i*2+1]:\n return i*2\n else:\n return i*2+1", "def increasing_parent(self, v):\n parent = None\n for i in range(self.size(), v, -1):\n if self.le(v, i):\n parent = i\n return parent", "def next_larger(self):\n if self.right is not None:\n return self.right.find_min()\n current = self\n while current.parent is not None and current is current.parent.right:\n current = current.parent\n return current.parent", "def pop_min(self):\n if self.get_size() == 0:\n return None\n\n # put minimum item at the end\n self.swap(0, len(self.table) - 1)\n\n # and remove it from the list;\n item = self.table.pop()\n\n # then fix new root\n self.percolate_down(0)\n return item", "def pop(self):\n result = self.peek()\n self.item_count -= 1\n index = 1\n mem_size = len(self.items)\n while True:\n left = index * 2\n right = left + 1\n if self.is_invalid_index(left) and self.is_invalid_index(right):\n # Neither child exists, so delete this item.\n self.mark_invalid_index(index)\n return result\n elif self.is_invalid_index(right):\n # Right child does not exist, so bubble up from left.\n self.items[index] = self.items[left]\n index = left\n elif self.is_invalid_index(left):\n # Left child does not exist, so bubble up from right.\n self.items[index] = self.items[right]\n index = right\n elif self.is_heap_order(self.items[left], self.items[right]):\n # Left child should be on top, so bubble up from left.\n self.items[index] = self.items[left]\n index = left\n else:\n # Right child should be on top, so bubble up from right.\n self.items[index] = self.items[right]\n index = right", "def GetPrevExpanded(self, item): \r\n\r\n return self.GetPrev(item, False)", "def collapseDown(self):\n retval = False\n for cStartInd in range(self.col):\n lst = [self.get_cell(i) for i in range(cStartInd, self.length, self.col)]\n lst.reverse()\n lst, tmp = self.collapseRow(lst)\n lst.reverse()\n x = 0\n for i in range(cStartInd, self.length, self.col):\n self.set_cell(i, lst[x])\n x += 1\n retval = retval or tmp\n return retval", "def heapify(self, i):\n left = 2*i + 1\n right = 2*i + 2\n #find the smallest element of A[i], A[left], A[right]\n if left < self.n and self.ar[left] < self.ar[i]:\n smallest = left\n else:\n smallest = i\n \n if right < self.n and self.ar[right] < self.ar[i]:\n smallest = right\n else:\n smallest = i\n \n #If smallest is not already the parent then swap\n if smallest != i:\n self.ar[i], self.ar[smallest] = self.ar[smallest], self.ar[i]\n self.heapify(smallest)", "def remove_min(self) -> object:\n if self.is_empty():\n raise MinHeapException\n return\n parent_index=0\n parent=self.get_min()\n #parent=5\n #print(parent)\n #print(self)\n self.heap.swap(parent_index,self.heap.length()-1)\n self.heap.pop()\n if self.is_empty():\n return parent\n min_child=self.find_min_child(1,2)\n while min_child!=None:\n if self.heap.get_at_index(min_child)>self.heap.get_at_index(parent_index):\n break\n self.heap.swap(min_child,parent_index)\n parent_index=min_child\n if parent_index==None:\n break\n min_child=self.find_min_child((parent_index * 2)+1,(parent_index * 2) + 2)\n return parent", "def ups(self, node):\r\n if self._row(node.count) > 0:\r\n return self.nodes[node.count - self.width]\r\n else:\r\n return None", "def _siftdown(self, entry, pos=0):\n heap, imap = self._heap, self._index_map\n heaplen = len(heap)\n left = pos * 2 + 1\n while left < heaplen:\n right = left + 1\n minpos = left + (right < heaplen and heap[right] < heap[left])\n minchild = heap[minpos]\n heap[pos] = minchild\n imap[minchild.vertex] = pos\n pos = minpos\n left = pos * 2 + 1\n return self._siftup(pos, entry)", "def _heapify(self):\n for _ in range(len(self.elements)):\n for i in range(len(self.elements)-1, 0, -1):\n parentPosition = (i-1)/2 # defaults to int i.e. 7/2=3, and 6/2=3\n if parentPosition < 0:\n parentPosition = 0\n \n # change this condition to '>' if coding for max-heap. This is for min-heap.\n if self.elements[i] < self.elements[parentPosition]:\n self.elements[i], self.elements[parentPosition] = self.elements[parentPosition], self.elements[i]", "def decreasing_parent(self, v):\n parent = None\n for i in range(1, v):\n if self.le(v, i):\n parent = i\n return parent", "def __build(self) -> None:\n parent_idx = 0\n left_idx = 1\n right_idx = 2\n length = len(self._array)\n\n # While the bottom/end of the min heap has not been reached\n while left_idx < length or right_idx < length:\n\n # initialize the child_idx to the child with the smaller value\n if right_idx < length:\n child_idx = right_idx if self._array[left_idx] > self._array[right_idx] else left_idx\n else:\n child_idx = left_idx\n\n # Swap the parent and child if the child's value is smaller than the parent's value\n if self._array[child_idx] < self._array[parent_idx]:\n self._swap(parent_idx, child_idx)\n parent_idx = child_idx\n right_idx = (2 * child_idx) + 2\n left_idx = (2 * child_idx) + 1\n # Otherwise, break out of the while loop\n else:\n break", "def _heapify(self):\n start = self._parent(len(self) - 1)\n for i in range(start, -1, -1):\n self._down_heap(i)", "def bottom_up(self, safe=False):\n if safe:\n assert not self.cycle()\n discard = set()\n queue = deque(self.leaves())\n while queue:\n new = queue.popleft()\n if new.children() - discard:\n queue.append(new)\n else:\n discard.add(new)\n for parent in sorted(new.parents(), key=lambda x:x.nodeid):\n if not parent in discard and not parent in queue:\n queue.append(parent)\n yield new", "def heap_down(self, index):\n left_child = (2*index) + 1\n right_child = (2*index) + 2\n\n if left_child < len(self.store):\n\n if right_child >= len(self.store):\n min_child = left_child\n elif self.store[left_child].key < self.store[right_child].key:\n min_child = left_child\n else:\n min_child = right_child\n\n if self.store[index].key > self.store[min_child].key:\n self.swap(index, min_child)\n self.heap_down(min_child)", "def removeMinimum(self, i = 1):\n\n # print(\"I\", i, self.heap[i], self.noOfRemovedElements)\n\n # Base cases\n if self.heap[i] == 'NaN' :\n self.noOfRemovedElements += 1\n # Restructures heap to be a continuous list otherwise a lot of \"Nan\" noOfElements\n # due to removal of minimums a lot of times interfere with the logic of the program\n if self.noOfRemovedElements == self.limitOfRestructuring:\n self.restructureHeap()\n self.noOfRemovedElements = 0\n return\n if 2 * i + 1 > self.noOfElements or 2 * i > self.noOfElements:\n self.heap[i] == \"NaN\"\n self.noOfRemovedElements += 1\n # Restructures heap to be a continuous list otherwise a lot of \"Nan\" noOfElements\n # due to removal of minimums a lot of times interfere with the logic of the program\n if self.noOfRemovedElements == self.limitOfRestructuring:\n self.restructureHeap()\n self.noOfRemovedElements = 0\n return\n\n # Initializing children element positions\n child1 = 2 * i\n child2 = ( 2 * i ) + 1\n # print(\"child 1\", child1, self.heap[child1])\n # print(\"child 2\", child2, self.heap[child2])\n\n # Case when there are no children\n if self.heap[child1] == 'NaN' and self.heap[child2] == 'NaN':\n self.heap[i] = 'NaN'\n self.noOfRemovedElements += 1\n # Restructures heap to be a continuous list otherwise a lot of \"Nan\" noOfElements\n # due to removal of minimums a lot of times interfere with the logic of the program\n if self.noOfRemovedElements == self.limitOfRestructuring:\n self.restructureHeap()\n self.noOfRemovedElements = 0\n return\n\n # Case when there is only one child\n elif self.heap[child2] == 'NaN':\n self.heap[i], self.heap[child1] = self.heap[child1], \"NaN\"\n self.noOfRemovedElements += 1\n # Restructures heap to be a continuous list otherwise a lot of \"Nan\" noOfElements\n # due to removal of minimums a lot of times interfere with the logic of the program\n if self.noOfRemovedElements == self.limitOfRestructuring:\n self.restructureHeap()\n self.noOfRemovedElements = 0\n return\n\n # Case when there is only one child, same as above\n elif self.heap[child1] == 'NaN':\n self.heap[i], self.heap[child2] = self.heap[child2], \"NaN\"\n self.noOfRemovedElements += 1\n # Restructures heap to be a continuous list otherwise a lot of \"Nan\" noOfElements\n # due to removal of minimums a lot of times interfere with the logic of the program\n if self.noOfRemovedElements == self.limitOfRestructuring:\n self.restructureHeap()\n self.noOfRemovedElements = 0\n return\n\n # Swapping parent with the smaller child\n # Bubbling down\n if self.heap[child1].dijkstraCriterion <= self.heap[child2].dijkstraCriterion:\n self.heap[i], self.heap[child1] = self.heap[child1], self.heap[i]\n self.removeMinimum( child1 )\n else:\n self.heap[i], self.heap[child2] = self.heap[child2], self.heap[i]\n self.removeMinimum( child2 )", "def bubble_down(self, i):\n\n smallest = self.find_smallest(i)\n\n org = i\n\n while smallest != org:\n\n self.switch(org, smallest)\n\n org = smallest\n\n smallest = self.find_smallest(smallest)", "def heap_up(self, index):\n # how can we do this recursively?\n parent_node_index = (index - 1)//2\n while self.store[index].key < self.store[parent_node_index].key and index > 0:\n self.swap(index, parent_node_index)\n index = parent_node_index\n parent_node_index = (index - 1)//2\n else:\n return self.store", "def pop_smallest(self):\n values = [item[0] for item in self.items] #list of the values\n #values = L[:]\n heapq.heapify(values)\n smallest = heapq.heappop(values)#not forgetting heapq.heapify(values)\n #directly writing t = heapq.heappop([4,2,4]) would result in t = 4\n i = self.getItemByValue(smallest)\n self.items.remove(i)\n return i[1]", "def down(self, i):\n x = self.heap[i]\n n = len(self.heap)\n while True:\n left = 2 * i # climb down the tree\n right = left + 1\n if (right < n and self.heap[right] < x and\n self.heap[right] < self.heap[left]):\n self.heap[i] = self.heap[right]\n self.rank[self.heap[right]] = i # move right child up\n i = right\n elif left < n and self.heap[left] < x:\n self.heap[i] = self.heap[left]\n self.rank[self.heap[left]] = i # move left child up\n i = left\n else:\n self.heap[i] = x # insertion index found\n self.rank[x] = i\n return", "def build_heap_helper(self, current):\n if current==-1:\n return\n parent_index=current\n min_child=self.find_min_child((parent_index * 2)+1,(parent_index * 2) + 2)\n while min_child!=None:\n if self.heap.get_at_index(parent_index)<self.heap.get_at_index(min_child):\n break\n self.heap.swap(min_child,parent_index)\n parent_index=min_child\n min_child=self.find_min_child((parent_index * 2)+1,(parent_index * 2) + 2)\n if min_child==None:\n break\n self.build_heap_helper(current-1)", "def remove_min(self):\r\n # Should raise an exception of size is 0...\r\n if self._size == 0: raise KeyError # Can't remove from an empty heap\r\n result = self._data[0] # remember the smallest\r\n self._data[0] = None # None is so we don't have a reference.\r\n self._size -= 1 # don't forget we have one less\r\n # bring the last to the front and stick the None at the end\r\n self.swap(0, self._size)\r\n # and let the item inserted at the front \"drift down\"\r\n self.down_heap(0)\r\n return result # finally return what was the minimum\r", "def _minimize(self, up_to=0):\n # Split checks into those below upper-limit and those below\n self._checks_to_do, checks_below = self._checks_to_do[:up_to], self._checks_to_do[up_to:]\n\n # Handle checks below from bottom and up\n for parent, element, child in reversed(checks_below):\n\n # Check if there exists a minimized node identical to child (identical subtree)\n if child in self._nodes:\n # Replace child with minimized node\n parent.children[element] = self._nodes[child]\n else:\n # Add chile to minimized nodes for later usage\n self._nodes[child] = child", "def insert(self, item):\n index = self.insert_at_next_index(item)\n self.items[index] = item\n while index > 1:\n parent_index = index / 2 # Truncate, e.g. 4 and 5 have parent 2.\n if self.is_heap_order(self.items[parent_index], self.items[index]):\n # The item does not need to bubble up anymore. Done.\n return\n else:\n # Swap items at index and parent_index\n temp = self.items[index]\n self.items[index] = self.items[parent_index]\n self.items[parent_index] = temp\n index = parent_index\n # The item bubbled all the way to the root. Done.\n return", "def move_up ( self ):\n list, index = self.get_info()\n self.value = (list[:index-1] + [ list[index], list[index-1] ] + \n list[index+1:])", "def update_parents(self):\n for a_parent in self.parents:\n for child in self.children:\n for a_dest in self.children[child]:\n if (a_dest[0] + a_parent.children[self][0][0],\n a_parent.children[self][0][1]) not in a_parent.children[child]:\n a_parent.children[child].append((a_dest[0] + a_parent.children[self][0][0],\n a_parent.children[self][0][1]))\n a_parent.update_parents()", "def delete(self):\n if self.left is None or self.right is None:\n if self is self.parent.left:\n self.parent.left = self.left or self.right\n if self.parent.left is not None:\n self.parent.left.parent = self.parent\n self.parent.min = self.parent.left.min\n else: \n self.parent.min = self.parent\n # Propagates the changes upwards.\n c = self.parent\n while c.parent is not None and c is c.parent.left:\n c.parent.min = c.min\n c = c.parent\n else:\n self.parent.right = self.left or self.right\n if self.parent.right is not None:\n self.parent.right.parent = self.parent\n return self\n else:\n s = self.next_larger()\n self.key, s.key = s.key, self.key\n return s.delete()", "def min_child(self, i):\n # If the current node only has one child, return the index of the unique child\n if (i * 2) + 1 > self.current_size:\n return i * 2\n else:\n # Herein the current node has two children\n # Return the index of the min child according to their values\n if self.heap_list[i * 2][0] < self.heap_list[(i * 2) + 1][0]:\n return i * 2\n else:\n return (i * 2) + 1", "def down(self, i):\n x = self.heap[i]\n n = len(self.heap)\n while True:\n left = 2 * i # climb down the tree\n right = left + 1\n if (right < n and self.heap[right] < x and\n self.heap[right] < self.heap[left]):\n self.heap[i] = self.heap[right]\n self.rank[self.heap[right]] = i # go back up right child\n i = right\n elif left < n and self.heap[left] < x:\n self.heap[i] = self.heap[left]\n self.rank[self.heap[left]] = i # go back up left child\n i = left\n else:\n self.heap[i] = x # insertion index found\n self.rank[x] = i\n return", "def heapify_top_down(self, position):\n start_index = position\n while True:\n left_child = 2 * start_index + 1\n right_child = 2 * start_index + 2\n if left_child < len(self.heap):\n if right_child < len(self.heap):\n # If right child is present\n if self.heap[right_child][0] < self.heap[left_child][0] and \\\n self.heap[right_child][0] < self.heap[start_index][0]:\n self.heap[start_index], self.heap[right_child] = self.heap[right_child], self.heap[start_index]\n start_index = right_child\n\n elif self.heap[left_child][0] < self.heap[start_index][0]:\n self.heap[start_index], self.heap[left_child] = self.heap[left_child], self.heap[start_index]\n start_index = left_child\n else:\n break\n else:\n if self.heap[left_child][0] < self.heap[start_index][0]:\n self.heap[start_index], self.heap[left_child] = self.heap[left_child], self.heap[start_index]\n start_index = left_child\n else:\n break\n else:\n # Its a leaf\n break", "def min_heap(self): \n \n for pos in range(self.size//2, 0, -1): \n self.min_heapify(pos)", "def incorrectly_nested(self):\n return self.parent is not None and self.root < self.parent.root", "def min_heapify(self, pos): \n \n # If the node is a non-leaf node and greater \n # than any of its child \n if not self.is_leaf(pos): \n if (self.Heap[pos] > self.Heap[self.left_child(pos)] or \n self.Heap[pos] > self.Heap[self.right_child(pos)]): \n \n # Swap with the left child and heapify \n # the left child \n if self.Heap[self.left_child(pos)] < self.Heap[self.right_child(pos)]: \n self.swap(pos, self.left_child(pos)) \n self.min_heapify(self.left_child(pos)) \n \n # Swap with the right child and heapify \n # the right child \n else: \n self.swap(pos, self.right_child(pos)) \n self.min_heapify(self.right_child(pos))", "def _downheap(self, node):\n num_children = self.num_children(node)\n while num_children > 0:\n if num_children == 2:\n if self.right(node).element() < self.left(node).element():\n child = self.right(node) # Pick child with minimal key\n else:\n child = self.left(node)\n else:\n child = self.left(node) # Only child must be left child\n if node.element() > child.element():\n self._swap(node, child) # Continue down-heap bubble\n num_children = self.num_children(node)\n else:\n return # Terminate loop", "def _down_heap(self, j):\n if self._has_left(j):\n small = self._left(j)\n if self._has_right(j):\n right = self._right(j)\n if self._data[small] > self._data[right]:\n small = right\n if self._data[j] > self._data[small]:\n self._swap(j, small)\n self._down_heap(small)\n return", "def _up(self, col, row):\n ones = 0\n twos = 0\n for step in range(4):\n current = self.layout[col][row + (step)] #step up\n if current == 1: ones+=1\n if current == 2: twos+=1\n\n return self._score_a_quartet(ones, twos)", "def sift_down_recursion(self, index):\n if self.size() == 0:\n return\n\n left = self.left_child(index)\n right = self.right_child(index)\n # if the element is leaf\n if left >= self.size():\n return\n\n max_child_index = left\n if right < self.size():\n if self.heap[right] > self.heap[left]:\n max_child_index = right\n\n # if already max heap, return\n if self.heap[index] >= self.heap[max_child_index]:\n return\n\n self.heap[index], self.heap[max_child_index] = self.heap[max_child_index], self.heap[index]\n\n index = max_child_index\n self.sift_down_recursion(index)", "def level_up(self):\n pass", "def shrink_up(self, cidx, amt):\n left = amt # track unused shrink amount\n # for each client before specified index\n for idx in range(0, cidx):\n # shrink by whatever is left-over of original amount\n left -= left - self._shrink(idx, left)\n # return unused shrink amount\n return left", "def sink_down(index: int, data: List[int], swaps: List[Tuple[int, int]]) -> List[Tuple[int, int]]:\n while index * 2 + 1 < len(data):\n j = index * 2 + 1\n # the other child exist and is smaller than the current one.\n if (j+1 < len(data)) and data[j+1] < data[j]:\n j += 1\n # heap order already satisfied.\n if data[index] <= data[j]:\n return swaps\n else:\n swap(index, j, data)\n swaps.append((index, j))\n index = j\n return swaps", "def up(self):\n self.set_initial_offset(self.initial_offset - self.item_heights)", "def go_first(self, e=0):\n # find level of current item\n level = len(str(self.n_parent).split(\":\")) + 1\n\n l_elem = list()\n for k, v in self.d.items():\n if len(str(k).split(\":\")) == level and str(k).startswith(self.n_parent):\n elem = str(k).split(\":\")[-1]\n l_elem.append(int(elem))\n m = min(l_elem)\n self.clear_controls()\n self.n = m\n self.set_value(self.n_parent, str(m))", "def _up_left(self, col, row):\n ones = 0\n twos = 0\n for step in range(4):\n\n current = self.layout[col + (step*-1)][row + (step)] #step up and left\n if current == 1: ones+=1\n if current == 2: twos+=1\n\n return self._score_a_quartet(ones, twos)", "def sift_down(self, start, end):\n i, j = start, 2*start+1\n # Temporary variable to decrease exchange times\n temp = self.heap_list[start]\n # end is equal to len(self.heap_list)-1\n while j <= end:\n # compare left child node with right child node\n if j<end and self.heap_list[j]<self.heap_list[j+1]:\n j += 1\n if temp >= self.heap_list[j]:\n break\n else:\n #self.heap_list[i], self.heap_list[j] = self.heap_list[j], self.heap_list[i]\n self.heap_list[i] = self.heap_list[j]\n i = j\n j = 2*j+1\n self.heap_list[i] = temp" ]
[ "0.73008174", "0.6977387", "0.69455796", "0.6931688", "0.6897218", "0.68804336", "0.6834447", "0.6793737", "0.67846876", "0.67604595", "0.672829", "0.6725234", "0.6719083", "0.66191155", "0.65959144", "0.6558018", "0.65543073", "0.6469654", "0.64037776", "0.63678527", "0.6363859", "0.6347702", "0.6327489", "0.6308135", "0.6306059", "0.6292802", "0.62894696", "0.6277973", "0.6155901", "0.61402416", "0.61215395", "0.61126506", "0.6065593", "0.6030315", "0.60061264", "0.59556854", "0.5914355", "0.5901214", "0.58418816", "0.5839708", "0.5828005", "0.58085674", "0.58083445", "0.5786949", "0.57305044", "0.5677537", "0.5677537", "0.56416863", "0.56343645", "0.5624725", "0.56226665", "0.56181806", "0.5615844", "0.5605745", "0.559788", "0.5597271", "0.5591652", "0.5585549", "0.5585181", "0.5553646", "0.554388", "0.55290914", "0.5525666", "0.5517976", "0.55167043", "0.5509629", "0.5502919", "0.5493489", "0.5490972", "0.54893553", "0.548898", "0.54718155", "0.5469487", "0.54674035", "0.5453879", "0.5439456", "0.5437373", "0.5389752", "0.5366183", "0.5365696", "0.53644186", "0.53613347", "0.5339364", "0.5337469", "0.53241706", "0.5319306", "0.5305477", "0.5287249", "0.528184", "0.526886", "0.526201", "0.52509755", "0.5249194", "0.5241969", "0.5238059", "0.5229912", "0.5228723", "0.522841", "0.5217066", "0.5213026" ]
0.69485795
2
Percolates down the item if the first element in the item is larger than the first element in the child.
def __sift_down(self, i: int): while (2 * i + 1) <= self.__len__() - 1: child_idx = self.__get_smallest_child(i) if self.__heap[i][0] > self.__heap[child_idx][0]: tmp = self.__heap[i] self.__heap[i] = self.__heap[child_idx] self.__heap[child_idx] = tmp i = child_idx
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def perc_down(self, i): #\r\n while (i * 2) <= self.size:\r\n mc = self.max_child(i) ## find max child\r\n if self.items[i] < self.items[mc]:\r\n tmp = self.items[i]\r\n self.items[i] = self.items[mc]\r\n self.items[mc] = tmp\r\n i = mc", "def percolate_down(self, i):\n while (i * 2) <= self.size:\n max_child = self.max_child(i)\n if self.heap_list[max_child] > self.heap_list[i]:\n tmp = self.heap_list[i]\n self.heap_list[i] = self.heap_list[max_child]\n self.heap_list[max_child] = tmp\n i = max_child", "def sift_down(self, i):\n #If the current value has at least one child\n while (i * 2) <= self.current_size:\n #For the current value, get the index of the child with the least value (min child)\n mc = self.min_child(i)\n # If the current value is greater than it's \"min child\" value, swap the values\n if self.heap_list[i][0] > self.heap_list[mc][0]:\n self.heap_list[i], self.heap_list[mc] = self.heap_list[mc], self.heap_list[i]\n i = mc", "def percolate_down(self, index):\n if self.min is True:\n small_child = self.min_child(index)\n if small_child is not None:\n if self._data[small_child] < self._data[index]:\n self._swap(index, small_child)\n self.percolate_down(small_child)\n if self.min is False:\n large_child = self.max_child(index)\n if large_child is not None:\n if self._data[large_child] > self._data[index]:\n self._swap(index, large_child)\n self.percolate_down(large_child)", "def percDown(self, i):\n while(i * 2) <= self.currentSize:\n mc = self.minChild(i)\n if self.heapList[i] > self.heapList[mc]:\n tmp = self.heapList[i]\n self.heapList[i] = self.heapList[mc]\n self.heapList[mc] = tmp\n i = mc", "def _shift_down(self, idx):\n\n child = (idx + 1) * 2 - 1\n while child < self.size and (\n self.value(idx) < self.value(child) or\n self.value(idx) < self.value(child + 1)):\n # Compare the left child and the right child and get the index of the larger one.\n if self.value(child + 1) > self.value(child):\n child += 1\n self.items[idx], self.items[child] = self.items[child], self.items[idx]\n idx = child\n child = (idx + 1) * 2 - 1", "def percolate_down(self, position):\n if self.has_left(position):\n\n left = self.left_child(position)\n small_child = left\n if self.has_right(position):\n right = self.right_child(position)\n if self.table[right] < self.table[left]:\n small_child = right\n\n # swap smaller element up then do again until it cant go down anymore\n if self.table[small_child] < self.table[position]:\n self.swap(position, small_child)\n self.percolate_down(small_child)", "def shift_item_down(self, parent_index):\n while 2 * parent_index <= self.currentsize:\n child_index = self.max_child_index(parent_index)\n if self.heaplist[child_index] > self.heaplist[parent_index]:\n self.heaplist[child_index], self.heaplist[parent_index] = self.heaplist[parent_index], self.heaplist[child_index]\n parent_index = child_index", "def percolate_down(self, index):\n child = self.max_child(index)\n\n # swap if child is less than than current and continue percolating\n if child and self._data[child] < self._data[index]:\n self.swap(child, index)\n self.percolate_down(child)", "def _percolate_down(self, index):\n parent = self.heap[index]\n child_2_index, child_1_index = index * 2, index * 2 + 1\n try:\n child_1_value = self.heap[child_1_index]\n except IndexError:\n child_1_value = maxint\n try:\n child_2_value = self.heap[child_2_index]\n except IndexError:\n child_2_value = maxint\n if parent > child_1_value or parent > child_2_value:\n # Swap parent with lesser child and then recursively percolate\n # the new child (previous parent) downwards\n if child_1_value > child_2_value:\n self._swap(child_2_index, index)\n self._percolate_down(child_2_index)\n else:\n self._swap(child_1_index, index)\n self._percolate_down(child_1_index)", "def heapify_down(self):\n index = 0\n while self.has_left_child(index):\n smaller_child_index = self.get_left_child_index(index)\n if self.has_right_child(index) and self.get_right_child(index) < self.get_left_child(index):\n smaller_child_index = self.get_right_child_index(index)\n if self.heap[index] < self.heap[smaller_child_index]:\n break\n else:\n self.swap_values(index, smaller_child_index)\n index = smaller_child_index", "def sort_down(self, i):\n while ((i + 1) * 2) <= len(self._heap) + 1:\n mc = self.max_child(i)\n if self._heap[i] < self._heap[mc]:\n tmp = self._heap[i]\n self._heap[i] = self._heap[mc]\n self._heap[mc] = tmp\n i = mc", "def _perc_down(self, cur_idx):\n while 2 * cur_idx + 1 < len(self._heap):\n min_child_idx = self._get_min_child(cur_idx)\n if self._heap[cur_idx] > self._heap[min_child_idx]:\n self._heap[cur_idx], self._heap[min_child_idx] = (\n self._heap[min_child_idx],\n self._heap[cur_idx],\n )\n else:\n return\n cur_idx = min_child_idx", "def perc_up(self, i):\r\n while i // 2 > 0:\r\n if self.items[i] > self.items[i // 2]:\r\n tmp = self.items[i // 2]\r\n self.items[i // 2] = self.items[i]\r\n self.items[i] = tmp\r\n i = i // 2", "def heap_down(self, index):\n left_child = (2*index) + 1\n right_child = (2*index) + 2\n\n if left_child < len(self.store):\n\n if right_child >= len(self.store):\n min_child = left_child\n elif self.store[left_child].key < self.store[right_child].key:\n min_child = left_child\n else:\n min_child = right_child\n\n if self.store[index].key > self.store[min_child].key:\n self.swap(index, min_child)\n self.heap_down(min_child)", "def _swim(self):\n child_ix = len(self) - 1\n parent_ix = self._get_parent(child_ix)\n while (parent_ix is not None and self._test(parent_ix, child_ix)):\n self._exch(parent_ix, child_ix)\n child_ix = parent_ix\n parent_ix = self._get_parent(parent_ix)", "def __sift_up(self, i: int):\n while i > 0:\n parent = (i - 1) // 2\n if self.__heap[i][0] < self.__heap[parent][0]:\n tmp = self.__heap[parent]\n self.__heap[parent] = self.__heap[i]\n self.__heap[i] = tmp\n i = parent", "def percolate_up(self, index):\n if self.min is True:\n parent = self._parent(index)\n if index > 0 and self._data[index] < self._data[parent]:\n self._swap(index, parent)\n self.percolate_up(parent)\n if self.min is False:\n parent = self._parent(index)\n if index > 0 and self._data[index] > self._data[parent]:\n self._swap(index, parent)\n self.percolate_up(parent)", "def _downheap(self, node):\n num_children = self.num_children(node)\n while num_children > 0:\n if num_children == 2:\n if self.right(node).element() < self.left(node).element():\n child = self.right(node) # Pick child with minimal key\n else:\n child = self.left(node)\n else:\n child = self.left(node) # Only child must be left child\n if node.element() > child.element():\n self._swap(node, child) # Continue down-heap bubble\n num_children = self.num_children(node)\n else:\n return # Terminate loop", "def shift_item_up(self, index):\n while index > 0:\n parent_index = index // 2\n if parent_index > 0 and self.heaplist[parent_index] < self.heaplist[index]:\n self.heaplist[parent_index], self.heaplist[index] = self.heaplist[index], self.heaplist[parent_index]\n index = index // 2", "def heapify_up(self, index):\n if index == 0:\n return\n parent_index = int((index - 1) / 2)\n if self.data[index] < self.data[parent_index]:\n self.data[index], self.data[parent_index] = self.data[parent_index], self.data[index]\n self.heapify_up(parent_index)", "def sift_up(self, i):\n #While the element is not the min value (top) or the second value in the min heap\n while i // 2 > 0:\n # Swap the values if the current value is less than it's parent value\n if self.heap_list[i][0] < self.heap_list[i // 2][0]:\n self.heap_list[i], self.heap_list[i // 2] = self.heap_list[i // 2], self.heap_list[i]\n # Move the index to the parent value (moving up the tree)\n i = i // 2", "def pop(self):\n result = self.peek()\n self.item_count -= 1\n index = 1\n mem_size = len(self.items)\n while True:\n left = index * 2\n right = left + 1\n if self.is_invalid_index(left) and self.is_invalid_index(right):\n # Neither child exists, so delete this item.\n self.mark_invalid_index(index)\n return result\n elif self.is_invalid_index(right):\n # Right child does not exist, so bubble up from left.\n self.items[index] = self.items[left]\n index = left\n elif self.is_invalid_index(left):\n # Left child does not exist, so bubble up from right.\n self.items[index] = self.items[right]\n index = right\n elif self.is_heap_order(self.items[left], self.items[right]):\n # Left child should be on top, so bubble up from left.\n self.items[index] = self.items[left]\n index = left\n else:\n # Right child should be on top, so bubble up from right.\n self.items[index] = self.items[right]\n index = right", "def down(self, i):\n x = self.heap[i]\n n = len(self.heap)\n while True:\n left = 2 * i # climb down the tree\n right = left + 1\n if (right < n and self.heap[right] < x and\n self.heap[right] < self.heap[left]):\n self.heap[i] = self.heap[right]\n self.rank[self.heap[right]] = i # move right child up\n i = right\n elif left < n and self.heap[left] < x:\n self.heap[i] = self.heap[left]\n self.rank[self.heap[left]] = i # move left child up\n i = left\n else:\n self.heap[i] = x # insertion index found\n self.rank[x] = i\n return", "def sift_down_recursion(self, index):\n if self.size() == 0:\n return\n\n left = self.left_child(index)\n right = self.right_child(index)\n # if the element is leaf\n if left >= self.size():\n return\n\n max_child_index = left\n if right < self.size():\n if self.heap[right] > self.heap[left]:\n max_child_index = right\n\n # if already max heap, return\n if self.heap[index] >= self.heap[max_child_index]:\n return\n\n self.heap[index], self.heap[max_child_index] = self.heap[max_child_index], self.heap[index]\n\n index = max_child_index\n self.sift_down_recursion(index)", "def percolate_up(self, i):\n while i // 2 > 0:\n if self.heap_list[i] > self.heap_list[i // 2]:\n tmp = self.heap_list[i // 2]\n self.heap_list[i // 2] = self.heap_list[i]\n self.heap_list[i] = tmp\n i = i // 2", "def _sift_down(self, i):\n mini = i\n l = 2*i + 1\n if l < self._size and\\\n self._heap[l] < self._heap[mini]:\n mini = l\n r = 2*i + 2\n if r < self._size and\\\n self._heap[r] < self._heap[mini]:\n mini = r\n if mini != i:\n self._swap(i, mini)\n self._sift_down(mini)", "def _shift_up(self, idx):\n\n parent = (idx - 1) // 2\n while parent >= 0 and self.value(parent) < self.value(idx):\n self.items[parent], self.items[idx] = self.items[idx], self.items[parent]\n idx = parent\n parent = (idx - 1) // 2", "def _perc_up(self, cur_idx):\n while (cur_idx - 1) // 2 >= 0:\n parent_idx = (cur_idx - 1) // 2\n if self._heap[cur_idx] < self._heap[parent_idx]:\n self._heap[cur_idx], self._heap[parent_idx] = (\n self._heap[parent_idx],\n self._heap[cur_idx],\n )\n cur_idx = parent_idx", "def sift_down(self, start, end):\n i, j = start, 2*start+1\n # Temporary variable to decrease exchange times\n temp = self.heap_list[start]\n # end is equal to len(self.heap_list)-1\n while j <= end:\n # compare left child node with right child node\n if j<end and self.heap_list[j]<self.heap_list[j+1]:\n j += 1\n if temp >= self.heap_list[j]:\n break\n else:\n #self.heap_list[i], self.heap_list[j] = self.heap_list[j], self.heap_list[i]\n self.heap_list[i] = self.heap_list[j]\n i = j\n j = 2*j+1\n self.heap_list[i] = temp", "def heapify_down(self, index):\n min_index = index\n\n for c in [index * 2 + 1, index * 2 + 2]:\n if c < len(self.data) and self.data[c] > min_index:\n min_index = c\n if min_index == index:\n return\n self.data[index], self.data[min_index] = self.data[min_index], self.data[index]\n self.heapify_down(min_index)", "def _heapify_after_remove(self,ele):\r\n \r\n if self._chk_left(ele):\r\n left = self._left(ele)\r\n find_small_child = left\r\n # below to find which child has small integer\r\n if self._chk_right(ele):\r\n right = self._right(ele)\r\n if self._data[left] > self._data[right]:\r\n find_small_child = right\r\n \r\n if self._data[find_small_child] < self._data[ele]:\r\n self.swap(ele, find_small_child)\r\n self._heapify_after_remove(find_small_child)", "def percolate_up(self, position):\n parent = self.parent(position)\n if position > 0 and self.table[position] < self.table[parent]: # not root and child > parent\n self.swap(position, parent)\n self.percolate_up(parent) # recurse", "def down(self, i):\n x = self.heap[i]\n n = len(self.heap)\n while True:\n left = 2 * i # climb down the tree\n right = left + 1\n if (right < n and self.heap[right] < x and\n self.heap[right] < self.heap[left]):\n self.heap[i] = self.heap[right]\n self.rank[self.heap[right]] = i # go back up right child\n i = right\n elif left < n and self.heap[left] < x:\n self.heap[i] = self.heap[left]\n self.rank[self.heap[left]] = i # go back up left child\n i = left\n else:\n self.heap[i] = x # insertion index found\n self.rank[x] = i\n return", "def siftDown(start, count):\n root = start\n while root * 2 + 1 < count:\n child = root * 2 + 1 # 'child' is the left children of the current node\n if child < count - 1 and self.data[child] > self.data[child + 1]:\n # Verify that right sibling is lower than the left one, if so,\n # let 'child' be the right sibling\n child += 1\n if self.data[root] > self.data[child]:\n # Swap the current child and the parent if the parent is higher than the child\n self.data[root], self.data[child] = self.data[child], self.data[root]\n root = child\n else:\n return", "def collapseDown(self):\n retval = False\n for cStartInd in range(self.col):\n lst = [self.get_cell(i) for i in range(cStartInd, self.length, self.col)]\n lst.reverse()\n lst, tmp = self.collapseRow(lst)\n lst.reverse()\n x = 0\n for i in range(cStartInd, self.length, self.col):\n self.set_cell(i, lst[x])\n x += 1\n retval = retval or tmp\n return retval", "def heapdown(h, k):\n\n #put this value in the correct place\n v = h[k]\n\n while 2 * k < len(h):\n\n #assign j to be the left child\n j = 2 * k\n\n #is there a child to the right\n if j + 1 < len(h):\n\n #is the left child smaller than the right child\n if h[j] < h[j+1]:\n j = j + 1\n\n #if v is greater than its larger child\n if v >= h[j]:\n break\n else:\n h[k] = h[j]\n k = j\n\n h[k] = v", "def percolate_up(self, index):\n # reached root\n if index == 0:\n return\n\n p_ind = (index-1)//2\n # swap if parent is greater than current and continue percolating\n if self._data[p_ind] > self._data[index]:\n self.swap(p_ind, index)\n self.percolate_up(p_ind)", "def down(self):\n if self.bottom == self.current:\n return\n else:\n self.current -= 1", "def heapify_up(self):\n index = len(self.heap) - 1\n while self.has_parent(index) and self.get_parent(index) > self.heap[index]:\n self.swap_values(self.get_parent_index(index), index)\n index = self.get_parent_index(index)", "def _down_heap(self, j):\n if self._has_left(j):\n small = self._left(j)\n if self._has_right(j):\n right = self._right(j)\n if self._data[small] > self._data[right]:\n small = right\n if self._data[j] > self._data[small]:\n self._swap(j, small)\n self._down_heap(small)\n return", "def heapify_top_down(self, position):\n start_index = position\n while True:\n left_child = 2 * start_index + 1\n right_child = 2 * start_index + 2\n if left_child < len(self.heap):\n if right_child < len(self.heap):\n # If right child is present\n if self.heap[right_child][0] < self.heap[left_child][0] and \\\n self.heap[right_child][0] < self.heap[start_index][0]:\n self.heap[start_index], self.heap[right_child] = self.heap[right_child], self.heap[start_index]\n start_index = right_child\n\n elif self.heap[left_child][0] < self.heap[start_index][0]:\n self.heap[start_index], self.heap[left_child] = self.heap[left_child], self.heap[start_index]\n start_index = left_child\n else:\n break\n else:\n if self.heap[left_child][0] < self.heap[start_index][0]:\n self.heap[start_index], self.heap[left_child] = self.heap[left_child], self.heap[start_index]\n start_index = left_child\n else:\n break\n else:\n # Its a leaf\n break", "def max_child(self, i):\r\n if i * 2 + 1 > self.size:\r\n return i * 2\r\n elif self.items[i*2] < self.items[i*2+1]:\r\n return i * 2+1\r\n else:\r\n return i * 2", "def sift_up(self, index):\n if self.size() == 1:\n return\n parent_index = self.parent(index)\n # sift up if it is larger than its parent\n while index > 0 and self.heap[index] > self.heap[parent_index]:\n self.heap[index], self.heap[parent_index] = self.heap[parent_index], self.heap[index]\n # update index\n index = parent_index\n parent_index = self.parent(index)", "def _up_heap(self, j):\n p = self._parent(j)\n if j > 0 and self._data[j] < self._data[p]:\n self._swap(j, p)\n self._up_heap(p)", "def _heapify_after_add(self,ele):\r\n parent = self._parent(ele)\r\n if ele > 0 and self._data[ele] < self._data[parent]:\r\n self.swap(ele, parent)\r\n self._heapify_after_add(parent)", "def bubble_down(self, i):\n\n smallest = self.find_smallest(i)\n\n org = i\n\n while smallest != org:\n\n self.switch(org, smallest)\n\n org = smallest\n\n smallest = self.find_smallest(smallest)", "def percUp(self, i):\n while i // 2 > 0:\n if self.heapList[i] < self.heapList[i // 2]:\n tmp = self.heapList[i // 2]\n self.heapList[i // 2] = self.heapList[i]\n self.heapList[i] = tmp\n i = i // 2", "def __siftup(heap, nodes, pos, stopPos = 0):\n # Loop until past stopping position\n while pos > stopPos:\n # Set parent position\n parentPos = (pos - 1) >> 1\n\n # Swap if child less than parent\n if heap[pos][0] < heap[parentPos][0]:\n Graph.__swapHeapNodes(heap, nodes, pos, parentPos)\n pos = parentPos\n \n # End sift if child's first tuple is greater than or equal to parent\n else: break", "def bubble_up(self, i):\n\n parent = self.parent(i)\n\n while (parent is not None) and (self.heap[i][1] < self.heap[parent][1]):\n\n self.switch(i, parent)\n\n parent = self.parent(parent)", "def sink_down(index: int, data: List[int], swaps: List[Tuple[int, int]]) -> List[Tuple[int, int]]:\n while index * 2 + 1 < len(data):\n j = index * 2 + 1\n # the other child exist and is smaller than the current one.\n if (j+1 < len(data)) and data[j+1] < data[j]:\n j += 1\n # heap order already satisfied.\n if data[index] <= data[j]:\n return swaps\n else:\n swap(index, j, data)\n swaps.append((index, j))\n index = j\n return swaps", "def _sift_up(self, i):\n while i > 0:\n p = (i-1)//2\n if self._heap[i] < self._heap[p]:\n self._swap(i, p)\n i = p\n else:\n break", "def _upheap(self, node):\n parent = self.parent(node)\n while parent is not None and node.element() < parent.element():\n self._swap(node, parent) # Move node upward while key\n parent = self.parent(node) # smaller than parent's key", "def collapseUp(self):\n retval = False\n for cStartInd in range(self.col):\n lst = [self.get_cell(i) for i in range(cStartInd, self.length, self.col)]\n lst, tmp = self.collapseRow(lst)\n x = 0\n for i in range(cStartInd, self.length, self.col):\n self.set_cell(i, lst[x])\n x += 1\n retval = retval or tmp\n return retval", "def minChild(self, i):\n if i * 2 + 1 > self.currentSize:\n return i*2\n else:\n if self.heapList[i*2] < self.heapList[i*2+1]:\n return i*2\n else:\n return i*2+1", "def min_child(self, i):\n # If the current node only has one child, return the index of the unique child\n if (i * 2) + 1 > self.current_size:\n return i * 2\n else:\n # Herein the current node has two children\n # Return the index of the min child according to their values\n if self.heap_list[i * 2][0] < self.heap_list[(i * 2) + 1][0]:\n return i * 2\n else:\n return (i * 2) + 1", "def up_heap(self, index):\r\n while index: # while not at the root\r\n parent = self._parent(index) # who is my parent?\r\n # Am I smaller than my parent?\r\n if self._data[index] < self._data[parent]:\r\n self.swap(index, parent) # if so, swap me and my parent\r\n index = parent # and continue bubbling up\r\n else:\r\n return # otherwise we are done\r", "def next_larger(self):\n if self.right is not None:\n return self.right.find_min()\n current = self\n while current.parent is not None and current is current.parent.right:\n current = current.parent\n return current.parent", "def up(self):\n if self.top == self.current:\n return\n else:\n self.current += 1", "def sift_down(heap, start, end):\n # Swap first node with children until no longer smaller.\n i = start\n heaped = False\n while not heaped:\n left = i * 2 + 1\n right = i * 2 + 2\n largest = i\n\n # Find largest of i, left and right\n if left < end and compare(heap[left], heap[largest]) > 0:\n largest = left\n if right < end and compare(heap[right], heap[largest]) > 0:\n largest = right\n\n # If left or right is larger than i, swap and repeat\n if largest == i:\n heaped = True\n else:\n heap[i], heap[largest] = heap[largest], heap[i]\n i = largest", "def sift_down(array, start, end):\n root = start\n while root*2+1 <= end:\n child = root*2+1\n swap = root\n if array[swap] < array[child]:\n swap = child\n if child+1 <= end and array[swap] < array[child+1]:\n swap = child+1\n if swap != root:\n array[root], array[swap] = array[swap], array[root]\n root = swap\n else:\n break", "def grow_down(self, width, height):\r\n old_self = copy.copy(self)\r\n self.used = True\r\n self.x = self.y = 0\r\n self.height += height\r\n self.right = old_self\r\n self.down = SquareAlgorithmNode(x=0,\r\n y=old_self.height,\r\n width=self.width,\r\n height=height)\r\n\r\n node = self.find(self, width, height)\r\n if node:\r\n return self.split(node, width, height)\r\n return None", "def shrink_down(self, cidx, amt):\n left = amt # track unused shrink amount\n # for each client after specified index\n for idx in range(cidx + 1, len(self.relative_sizes)):\n # shrink by current total left-over amount\n left -= left - self._shrink(idx, left)\n # return unused shrink amount\n return left", "def down(self):\n self.set_initial_offset(self.initial_offset + self.item_heights)", "def _heapify(self):\n start = self._parent(len(self) - 1)\n for i in range(start, -1, -1):\n self._down_heap(i)", "def _bubble_down_task(self) -> None:\n index = 0\n last_index = len(self.queue) - 1\n\n while True:\n current_value = self.queue[index]\n right_child_index, left_child_index = self._get_child_indices(index)\n right_task = self.queue[right_child_index] if right_child_index <= last_index else None\n left_task = self.queue[left_child_index] if left_child_index <= last_index else None\n\n if left_task and right_task and left_task[\"priority\"] > current_value[\"priority\"] and right_task[\"priority\"] > current_value[\"priority\"]:\n largest_child_index = left_child_index if left_task[\"priority\"] > right_task[\"priority\"] else right_child_index\n self._swap_tasks(index, largest_child_index)\n index = largest_child_index\n elif left_task and left_task[\"priority\"] > current_value[\"priority\"]:\n self._swap_tasks(index, left_child_index)\n index = left_child_index\n elif right_task and right_task[\"priority\"] > current_value[\"priority\"]:\n self._swap_tasks(index, right_child_index)\n index = right_child_index\n else:\n break", "def GetNextExpanded(self, item): \r\n\r\n return self.GetNext(item, False)", "def move_down(self):\n\n next_sibling = self.get_next_sibling()\n if next_sibling!=None: \n self.move_to(next_sibling,'right')\n self.save()", "def __build(self) -> None:\n parent_idx = 0\n left_idx = 1\n right_idx = 2\n length = len(self._array)\n\n # While the bottom/end of the min heap has not been reached\n while left_idx < length or right_idx < length:\n\n # initialize the child_idx to the child with the smaller value\n if right_idx < length:\n child_idx = right_idx if self._array[left_idx] > self._array[right_idx] else left_idx\n else:\n child_idx = left_idx\n\n # Swap the parent and child if the child's value is smaller than the parent's value\n if self._array[child_idx] < self._array[parent_idx]:\n self._swap(parent_idx, child_idx)\n parent_idx = child_idx\n right_idx = (2 * child_idx) + 2\n left_idx = (2 * child_idx) + 1\n # Otherwise, break out of the while loop\n else:\n break", "def siftup(self, node, pos):\n p = self.parent(pos)\n while p is not None and self.heap[p].key > node.key:\n self.heap[pos] = self.heap[p]\n self.heap[pos].pos = pos\n pos = p\n p = self.parent(p)\n self.heap[pos] = node\n node.pos = pos", "def down(self, node):\r\n if self._row(node.count) < self.height - 1:\r\n return self.nodes[node.count + self.width]\r\n else:\r\n return None", "def _siftdown(self, entry, pos=0):\n heap, imap = self._heap, self._index_map\n heaplen = len(heap)\n left = pos * 2 + 1\n while left < heaplen:\n right = left + 1\n minpos = left + (right < heaplen and heap[right] < heap[left])\n minchild = heap[minpos]\n heap[pos] = minchild\n imap[minchild.vertex] = pos\n pos = minpos\n left = pos * 2 + 1\n return self._siftup(pos, entry)", "def siftdown(self, node, pos):\n c = self.minchild(pos)\n while c is not None and self.heap[c].key < node.key:\n self.heap[pos] = self.heap[c]\n self.heap[pos].pos = pos\n pos = c\n c = self.minchild(c)\n self.heap[pos] = node\n node.pos = pos", "def _move_up(self, p):\n if p == self._data.first():\n count = p.element()._count\n walk = self._data.before(p)\n if count > walk.element()._count: # must shift forward\n while (walk != self._data.first() and\n count > self._data.before(walk).element()._count):\n walk = self._data.before(walk)\n\n self._data.add_before(walk, self._data.delete(p)) # delete/reinsert", "def _unbalanced(self):\n if self.internal():\n if self.full():\n if abs(self._leftchild._height-self._rightchild._height) >= 2:\n return True\n elif self._leftchild and not self._rightchild:\n if self._leftchild._height >= 2:\n return True\n elif self._rightchild._height >= 2:\n return True\n return False", "def decreasing_children(self, v):\n children = []\n root = None\n for i in range(v + 1, self.size() + 1):\n if not self.le(i, v):\n break\n if root is None or not self.le(i, root):\n children.append(i)\n root = i\n return children", "def _heapify(self):\n for _ in range(len(self.elements)):\n for i in range(len(self.elements)-1, 0, -1):\n parentPosition = (i-1)/2 # defaults to int i.e. 7/2=3, and 6/2=3\n if parentPosition < 0:\n parentPosition = 0\n \n # change this condition to '>' if coding for max-heap. This is for min-heap.\n if self.elements[i] < self.elements[parentPosition]:\n self.elements[i], self.elements[parentPosition] = self.elements[parentPosition], self.elements[i]", "def proportional_down(node: PhyloNode):\n\n # only project to marked children and if there is anything to project\n prj_children = [child for child in node.children if child.marked and child.sum_of_reads > 0]\n total_children_reads = sum(child.sum_of_reads for child in prj_children)\n\n # pass reads proportionally downwards\n if len(node.reads) > 0 and total_children_reads > 0:\n i = 0\n for child in prj_children:\n share = ceil(child.sum_of_reads / total_children_reads * len(node.reads))\n j = i + int(share)\n child.reads += node.reads[i:j]\n i = j\n if i < len(node.reads) - 1:\n prj_children[-1].reads += node.reads[i:]\n node.reads.clear()\n\n # propagate\n for child in prj_children:\n proportional_down(child)", "def down(self, num):\r\n if not len(self.items):\r\n return\r\n self.item_sel += num\r\n if self.item_sel < 0:\r\n self.item_sel = 0\r\n if self.item_sel > len(self.items) - 1:\r\n self.item_sel = len(self.items) - 1\r\n\r\n last_line = self.height - 1 - self.reserved_lines\r\n if self.item_sel < self.item_top:\r\n self.item_top = self.item_sel\r\n if self.item_sel - self.item_top > last_line:\r\n self.item_top = self.item_sel - last_line\r\n\r\n self.do_paint()", "def GetPrevExpanded(self, item): \r\n\r\n return self.GetPrev(item, False)", "def extract_min(self):\r\n if self.is_empty():\r\n return None\r\n min_elem = self.heap_array[0]\r\n aux_elem = self.heap_array.pop()\r\n\r\n if self.is_empty() == False:\r\n self.heap_array[0] = aux_elem\r\n\r\n current_index = 0\r\n left_child_index = (2 * current_index) + 1\r\n current_value = self.heap_array[current_index]\r\n\r\n while left_child_index < len(self.heap_array): # loop that will repeat until no violation of the minheap properties exist\r\n current_min = current_value\r\n\r\n for i in range(2): # this loop is in place so that both children are compared and the smaller of the two is chosen \r\n if (left_child_index + i) > len(self.heap_array)-1: # condition to avoid out of bounds\r\n continue\r\n else:\r\n if int(self.heap_array[left_child_index + i]) < int(current_min): # if child is smaller than parent\r\n current_min = self.heap_array[left_child_index + i ] # set current minimum value\r\n current_min_index = left_child_index + i # and cureent minimim index( index where current minimum value is found )\r\n if current_min == current_value: # if no property is broken (in this case, the parent is actually less than its' children)\r\n break\r\n else: # if propert is broken\r\n self.heap_array[current_index], self.heap_array[current_min_index] = self.heap_array[current_min_index], self.heap_array[current_index] # swap the elements \r\n current_index = current_min_index\r\n left_child_index = int((2 * current_index) + 1)\r\n return min_elem", "def up(self, i):\n x = self.heap[i]\n while i > 1 and x < self.heap[i // 2]:\n self.heap[i] = self.heap[i // 2]\n self.rank[self.heap[i // 2]] = i\n i //= 2\n self.heap[i] = x # insertion index found\n self.rank[x] = i", "def up(self, i):\n x = self.heap[i]\n while i > 1 and x < self.heap[i // 2]:\n self.heap[i] = self.heap[i // 2]\n self.rank[self.heap[i // 2]] = i\n i //= 2\n self.heap[i] = x # insertion index found\n self.rank[x] = i", "def min_heapify(self, pos): \n \n # If the node is a non-leaf node and greater \n # than any of its child \n if not self.is_leaf(pos): \n if (self.Heap[pos] > self.Heap[self.left_child(pos)] or \n self.Heap[pos] > self.Heap[self.right_child(pos)]): \n \n # Swap with the left child and heapify \n # the left child \n if self.Heap[self.left_child(pos)] < self.Heap[self.right_child(pos)]: \n self.swap(pos, self.left_child(pos)) \n self.min_heapify(self.left_child(pos)) \n \n # Swap with the right child and heapify \n # the right child \n else: \n self.swap(pos, self.right_child(pos)) \n self.min_heapify(self.right_child(pos))", "def move_down ( self ):\n list, index = self.get_info()\n self.value = (list[:index] + [ list[index+1], list[index] ] + \n list[index+2:])", "def heapify(self):\r\n if self._size:\r\n start = self._parent(len(self._data)-1) # who'se the last parent?\r\n for index in range(start, -1, -1): # for all parents\r\n self.down_heap(index) # fix your heap\r", "def removeMinimum(self, i = 1):\n\n # print(\"I\", i, self.heap[i], self.noOfRemovedElements)\n\n # Base cases\n if self.heap[i] == 'NaN' :\n self.noOfRemovedElements += 1\n # Restructures heap to be a continuous list otherwise a lot of \"Nan\" noOfElements\n # due to removal of minimums a lot of times interfere with the logic of the program\n if self.noOfRemovedElements == self.limitOfRestructuring:\n self.restructureHeap()\n self.noOfRemovedElements = 0\n return\n if 2 * i + 1 > self.noOfElements or 2 * i > self.noOfElements:\n self.heap[i] == \"NaN\"\n self.noOfRemovedElements += 1\n # Restructures heap to be a continuous list otherwise a lot of \"Nan\" noOfElements\n # due to removal of minimums a lot of times interfere with the logic of the program\n if self.noOfRemovedElements == self.limitOfRestructuring:\n self.restructureHeap()\n self.noOfRemovedElements = 0\n return\n\n # Initializing children element positions\n child1 = 2 * i\n child2 = ( 2 * i ) + 1\n # print(\"child 1\", child1, self.heap[child1])\n # print(\"child 2\", child2, self.heap[child2])\n\n # Case when there are no children\n if self.heap[child1] == 'NaN' and self.heap[child2] == 'NaN':\n self.heap[i] = 'NaN'\n self.noOfRemovedElements += 1\n # Restructures heap to be a continuous list otherwise a lot of \"Nan\" noOfElements\n # due to removal of minimums a lot of times interfere with the logic of the program\n if self.noOfRemovedElements == self.limitOfRestructuring:\n self.restructureHeap()\n self.noOfRemovedElements = 0\n return\n\n # Case when there is only one child\n elif self.heap[child2] == 'NaN':\n self.heap[i], self.heap[child1] = self.heap[child1], \"NaN\"\n self.noOfRemovedElements += 1\n # Restructures heap to be a continuous list otherwise a lot of \"Nan\" noOfElements\n # due to removal of minimums a lot of times interfere with the logic of the program\n if self.noOfRemovedElements == self.limitOfRestructuring:\n self.restructureHeap()\n self.noOfRemovedElements = 0\n return\n\n # Case when there is only one child, same as above\n elif self.heap[child1] == 'NaN':\n self.heap[i], self.heap[child2] = self.heap[child2], \"NaN\"\n self.noOfRemovedElements += 1\n # Restructures heap to be a continuous list otherwise a lot of \"Nan\" noOfElements\n # due to removal of minimums a lot of times interfere with the logic of the program\n if self.noOfRemovedElements == self.limitOfRestructuring:\n self.restructureHeap()\n self.noOfRemovedElements = 0\n return\n\n # Swapping parent with the smaller child\n # Bubbling down\n if self.heap[child1].dijkstraCriterion <= self.heap[child2].dijkstraCriterion:\n self.heap[i], self.heap[child1] = self.heap[child1], self.heap[i]\n self.removeMinimum( child1 )\n else:\n self.heap[i], self.heap[child2] = self.heap[child2], self.heap[i]\n self.removeMinimum( child2 )", "def build_heap_helper(self, current):\n if current==-1:\n return\n parent_index=current\n min_child=self.find_min_child((parent_index * 2)+1,(parent_index * 2) + 2)\n while min_child!=None:\n if self.heap.get_at_index(parent_index)<self.heap.get_at_index(min_child):\n break\n self.heap.swap(min_child,parent_index)\n parent_index=min_child\n min_child=self.find_min_child((parent_index * 2)+1,(parent_index * 2) + 2)\n if min_child==None:\n break\n self.build_heap_helper(current-1)", "def left_child(self, pos): \n return 2 * pos", "def _decrease_parent_count(self):\n if self.parent is not None:\n self.parent.size -= 1\n self.parent._decrease_parent_count()", "def _get_min_child(self, parent_idx):\n if 2 * parent_idx + 2 > len(self._heap) - 1:\n return 2 * parent_idx + 1\n if self._heap[2 * parent_idx + 1] < self._heap[2 * parent_idx + 2]:\n return 2 * parent_idx + 1\n return 2 * parent_idx + 2", "def heapify_subtree(x, idx):\n n = len(x)\n n2 = n >> 1\n\n def body_func(state):\n x, idx, _ = state\n largest = largest_plr(x, idx)\n change = largest != idx\n x = lax.cond(change,\n lambda x: vec_swap_entries(x, largest, idx),\n lambda x: x,\n x)\n return x, largest, change\n\n def cond_func(state):\n x, idx, change = state\n return jnp.logical_and(idx < n2, change)\n\n state = x, idx, True\n state = lax.while_loop(cond_func, body_func, state)\n x, idx, change = state\n return x", "def delMin(self):\n retval = self.heapList[1]\n self.heapList[1] = self.heapList[self.currentSize]\n self.currentSize = self.currentSize - 1\n self.heapList.pop()\n self.percDown(1)\n return retval", "def _move_up(self, p):\n if p != self.data.first():\n self.data.add_first(self.data.delete(p))", "def _height2(self, p):\n if self.is leaf(p):\n return 0\n else:\n return 1 + max(self._height2(c) for c in self.children(p))", "def sift_up(heap, start, end):\n # Swap last node with parents until no longer greater.\n i = end - 1\n heaped = False\n while i > start and not heaped:\n parent = (i - 1) // 2\n if compare(heap[i], heap[parent]) > 0:\n heap[i], heap[parent] = heap[parent], heap[i]\n i = parent\n else:\n heaped = True", "def _siftup(self, pos, entry):\n heap, imap = self._heap, self._index_map\n while pos > 0:\n parent_pos = (pos - 1) // 2\n parent_entry = self._heap[parent_pos]\n if not entry < parent_entry:\n break\n heap[pos] = parent_entry\n imap[parent_entry.vertex] = pos\n pos = parent_pos\n heap[pos] = entry\n imap[entry.vertex] = pos", "def _height2(self, p):\n if self.is_leaf(p):\n return 0\n else:\n return 1 + max(self._height2(c) for c in self.children(p))", "def _height2(self, p):\n if self.is_leaf(p):\n return 0\n else:\n return 1 + max(self._height2(c) for c in self.children(p))", "def _height2(self, p):\n if self.is_leaf(p):\n return 0\n else:\n return 1 + max(self._height2(c) for c in self.children(p))" ]
[ "0.7993369", "0.7516703", "0.7435907", "0.7400076", "0.7334863", "0.7306124", "0.719238", "0.71682036", "0.710217", "0.70360464", "0.68641955", "0.6791557", "0.6775774", "0.66466707", "0.65461844", "0.653898", "0.64824", "0.64718825", "0.64513063", "0.6429633", "0.6399211", "0.63723856", "0.6330467", "0.63014406", "0.6296097", "0.62912685", "0.6281141", "0.6259654", "0.62277484", "0.62273955", "0.62256444", "0.6213633", "0.6206779", "0.62059915", "0.61178833", "0.6098758", "0.6092482", "0.6074381", "0.60448956", "0.6037808", "0.6037026", "0.60299814", "0.6010655", "0.6005312", "0.6004652", "0.59896976", "0.5973854", "0.5940428", "0.59301305", "0.59257233", "0.59094954", "0.5833741", "0.57758003", "0.5705511", "0.5695442", "0.5693668", "0.5683648", "0.5678001", "0.56376696", "0.56356305", "0.5612116", "0.5604863", "0.5600045", "0.5590258", "0.5566734", "0.55596197", "0.55556035", "0.5550791", "0.55191326", "0.5482911", "0.54693776", "0.546028", "0.54554474", "0.543193", "0.54135215", "0.5406613", "0.5386613", "0.53632843", "0.53617287", "0.53523254", "0.5332036", "0.5324396", "0.5324396", "0.5312019", "0.5310302", "0.5300492", "0.5287636", "0.5275552", "0.5273281", "0.52439094", "0.523902", "0.5236246", "0.5233229", "0.52237236", "0.5217185", "0.52135205", "0.52000654", "0.5199087", "0.5199087", "0.5199087" ]
0.71848524
7
Push item to its leaf in the tree and update sums.
def push(self, value): idx = self.__capacity - 1 + self.__size self.__tree[idx] = value self.__update(idx) self.__size += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(self, item):\r\n self.root = self.recurse_add(self.root, item)", "def push(self, item): # 05:27 Lecture Week 2 \"Stacks\" (16:24)\n oldfirst = self.first # Save a link to the list\n self.first = self._Node(item, oldfirst) # first points to most recent Node\n self.N += 1", "def add(tree, item):\n # This is a non recursive add method. A recursive method would be cleaner.\n if tree.root == None: # ... Empty tree ...\n tree.root = Node(item, None, None) # ... so, make this the root\n else:\n lst = []\n # Find where to put the item\n child_tree = tree.root\n while child_tree != None:\n parent = child_tree\n lst.append(parent)\n if item < child_tree.item: # If smaller ...\n child_tree = child_tree.left # ... move to the left\n elif item > child_tree.item:\n child_tree = child_tree.right\n\n # child_tree should be pointing to the new node, but we've gone too far\n # we need to modify the parent nodes\n if item < parent.item:\n parent.left = Node(item, None, None)\n elif item > parent.item:\n parent.right = Node(item, None, None)\n # Ignore the case where the item is equal.\n for items in lst[-2::-1]:\n if abs(tree.recurse_height(items.left) - tree.recurse_height(items.right)) > 1:\n return items.item\n\n #\n # Note that you can get the height of a node by calling tree.recurse_height().\n # For example, the height of the root is tree.recurse_height(tree.root)\n #", "def add(self, item):\n if item <= self.item:\n if self.left is None:\n self.left = branch(item)\n else:\n self.left.add(item)\n else:\n if self.right is None:\n self.right = branch(item)\n else:\n self.right.add(item)", "def update(\n self, index: Union[int, np.ndarray], value: Union[float, np.ndarray]\n ):\n\n tree_index = self.capacity + index\n self._tree[tree_index] = value\n\n # Propagate up the tree.\n parent = tree_index // 2\n while np.any(parent > 0):\n left = self._tree[2 * parent] # Children/sibling.\n right = self._tree[2 * parent + 1]\n # Note: Due to possible floating point error in the sum-tree case,\n # it's safer to recompute the parent nodes directly rather than to\n # accumulate an \"update\" up the tree which could be faster.\n self._tree[parent] = self.operation(left, right)\n parent = parent // 2", "def add(self, i: int, v: int) -> None:\n while i < self.size:\n self.tree[i] += v\n i += self._lsb(i)", "def add_item(self, payload, coord):\n\n self.root.add(payload, coord, self.maxiter)", "def __update(self, idx):\n parent = (idx - 1) // 2\n while parent >= 0:\n left, right = 2 * parent + 1, 2 * parent + 2\n self.__tree[parent] = self.__tree[left] + self.__tree[right]\n parent = (parent - 1) // 2", "def push(self, item):\n\t\tself.top+=1;\n\t\tself.arr.insert(self.top, item);", "def push(self, item):\n # Reassign the link to top of the stack and update the\n self.top = [item] + self.top", "def push(self, item):\n self.heap.append(self.m * item)\n self._sift_up()", "def _add(self, item):\n if isinstance(item, Node):\n if item in self:\n return #already added\n elif item.name in self:\n if item.parent:\n #maintain consistency as we're replacing an existing item\n item.parent._remove(item)\n self._children[item.name] = item\n item._parent = self\n else:\n raise ValueError(\"Expected argument to be of type Node or one of \"\n \"its descendents\")", "def push(stack, item):\n new_node = Node(item, stack.top)\n stack.top = new_node\n stack.size = stack.size + 1", "def push(self, item):\n heapq.heappush(self.heap, item)", "def push(self, key, val):\n # create new node and add to data\n new_ele = Node(key, val)\n self._data.append(new_ele)\n # percolate number into correct place\n self.percolate_up(len(self)-1)", "def push(self, item):\n array = self.array\n compare = self.compare\n array.append(item)\n self.pos[item] = len(array) - 1\n high = len(array) - 1\n while high > 0:\n low = (high-1)/2\n if compare(array[low], array[high]) <= 0:\n break\n self.pos[array[high]] = low\n self.pos[array[low]] = high\n array[low], array[high] = array[high], array[low]\n high = low", "def push(self, item):\r\n self.stack.insert(0, item)", "def push(self, item):\n self._items.append(item)\n # This operation increments the number of items\n # in the stack, we need to update the count variable\n self._update_count()", "def insert(self, item):\n index = self.insert_at_next_index(item)\n self.items[index] = item\n while index > 1:\n parent_index = index / 2 # Truncate, e.g. 4 and 5 have parent 2.\n if self.is_heap_order(self.items[parent_index], self.items[index]):\n # The item does not need to bubble up anymore. Done.\n return\n else:\n # Swap items at index and parent_index\n temp = self.items[index]\n self.items[index] = self.items[parent_index]\n self.items[parent_index] = temp\n index = parent_index\n # The item bubbled all the way to the root. Done.\n return", "def push(self, node):\n try:\n self._load(True)\n\n # Stow the new node at our head and increment it\n self.db[self.head] = node\n self.head = self.head + 1\n if self.head >= self.size:\n self.head -= self.size\n self.db['head'] = self.head\n\n # If we haven't just also pushed out an old item,\n # increment the count of items in our db.\n if self.count < self.size:\n self.count += 1\n self.db['count'] = self.count\n except KeyError:\n # HACK\n self.clear()", "def push(self, item):\n self.stack.append(item)", "def push(self, item):\n self.stack.append(item)", "def push(self, item):\n pass", "def addItem(self, item):\n self.items.append(item)\n self.totalWeight += item", "def push(self, item):\n\t\tself.items.append(item)", "def push(self,item):\n self.items.append(item)", "def push(self, val):\n self.high_low.append(val)\n try:\n self.compare_parent(self.high_low.index(self.high_low[-1]))\n except (ValueError, IndexError):\n pass", "def update(self, i, v):\n # index in BTree is 1 more than index in arr[]\n i += 1\n\n # Traverse to ancestors of BITree[i]\n while i <= self.size:\n self.BITree[i] += v\n\n # Update index to next set bit in binary representation\n i += i & (-i)", "def push(self, Item):\n self.data_container.insert(0, Item)", "def push(self, item):\n self.items.append(item)", "def push(self, item):\n self.items.append(item)", "def push(self, item):\n self.items.append(item)", "def push(self, item):\n self.items.append(item)", "def push(self, item):\n self.items.append(item)", "def push(self, item: tuple):\n self.__heap.append(item)\n self.__sift_up(self.__len__() - 1)", "def add(self, val):\n if val < self.val:\n if self.left:\n self.left.add(val)\n else:\n self.left = Tree1(val)\n elif val > self.val:\n if self.right:\n self.right.add(val)\n else:\n self.right = Tree1(val)", "def push(self, key, val):\n self._data.append(Node(key, val))\n self.percolate_up(len(self._data) - 1)", "def updateTree(self, i, val, cur):\n start, end = cur.start, cur.end\n if start == end == i:\n cur.val = val\n return\n mid = start+(end-start)/2\n if i <= mid:\n cur.val -= cur.left.val\n self.updateTree(i, val, cur.left)\n cur.val += cur.left.val\n else:\n cur.val -= cur.right.val\n self.updateTree(i, val, cur.right)\n cur.val += cur.right.val", "def push(self, x):\n assert x not in self.rank\n i = len(self.heap)\n self.heap.append(x) # add a new leaf\n self.rank[x] = i\n self.up(i) # maintain heap order", "def push(self, x):\n assert x not in self.rank\n i = len(self.heap)\n self.heap.append(x) # add a new leaf\n self.rank[x] = i\n self.up(i) # maintain heap order", "def __setitem__(self, k, v):\n #if tree is empty\n if self.is_empty():\n # inherited from LinkedBinaryTree class\n # _Item(k, v) is inheritated from MapBase class\n leaf = self._add_root(self._Item(k,v)) \n else:\n p = self._subtree_search(self.root(), k)\n #if k is present in current tree\n if p.key() == k:\n #it's not p.value()!!\n p.element()._value = v\n self._rebalance_access(p)\n return\n #didn't find k in current tree; create a new object of Item\n # and add to either left or right of the last node searched\n else:\n item = self._Item(k, v)\n if k > p.key():\n leaf = self._add_right(p, item)\n else:\n leaf = self._add_left(p, item)\n self._rebalance_insert(leaf)", "def sum_tree(t):\n \"*** YOUR CODE HERE ***\"\n if is_leaf(t):\n return entry(t)\n total = entry(t)\n for subtree in subtrees(t):\n total += sum_tree(subtree)\n return total", "def push(self, item):\n self.stack.append(item)\n\n if not self.max or item >= self.max[-1]: # add if empty or if greater\n self.max.append(item)", "def add_sorted(self, val):\n if self.root is None:\n self.root = TreeNode(val)\n else:\n self._add_sorted(val, self.root)", "def push(self, item):\n\n self._stack.append(item)", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def insert(self, item):\n self.pool.append(item)\n if len(self.pool) == self.min_tree_size:\n self.trees.append(_ExtendedVPTree(self.pool, self.dist_fn))\n self.pool = []\n while len(self.trees) > 1 and self.trees[-1].size == self.trees[-2].size:\n a = self.trees.pop()\n b = self.trees.pop()\n self.trees.append(_ExtendedVPTree(a.points + b.points, self.dist_fn))", "def push(self, item):\n self._data.append(item)", "def insert(self, item):\n # Handle the case where the tree is empty\n if self.is_empty():\n # if self.root is None:\n # TODO: Create a new root node\n self.root = ...\n # TODO: Increase the tree size\n self.size ...\n return\n # Find the parent node of where the given item should be inserted\n parent = self._find_parent_node(item)\n # TODO: Check if the given item should be inserted left of the parent node\n if ...:\n # TODO: Create a new node and set the parent's left child\n parent.left = ...\n # TODO: Check if the given item should be inserted right of the parent node\n elif ...:\n # TODO: Create a new node and set the parent's right child\n parent.right = ...\n # TODO: Increase the tree size\n self.size ...", "def push(self, item: Any) -> None:\n if self.hit is True and self._stack:\n logger.warning(\n \"Previous value of argument %r is overwritten with %r.\",\n self.namestr(),\n item,\n )\n self._stack = []\n\n if self.hit or not self._stack:\n self._stack.append([])\n self._stack[-1].append(item)\n self.hit = False", "def push(self, item) -> None:\n self.items.append(item)", "def push(self, new_item):\n self.items.append(new_item)", "def lpush(self, item):\n\n self.r.lpush(self.joblist, item)", "def set(self, node_index, value):\n if value < 0.0:\n raise ValueError(\n 'Sum tree values should be nonnegative. Got {}'.format(value))\n self.highest_set = max(node_index, self.highest_set)\n node_index = node_index + self.low_idx\n self.max_recorded_priority = max(value, self.max_recorded_priority)\n\n delta_value = value - self.nodes[node_index]\n\n # Now traverse back the tree, adjusting all sums along the way.\n for _ in reversed(range(self.depth)):\n # Note: Adding a delta leads to some tolerable numerical inaccuracies.\n self.nodes[node_index] += delta_value\n node_index = (node_index - 1) // 2\n\n self.nodes[node_index] += delta_value\n assert node_index == 0, ('Sum tree traversal failed, final node index '\n 'is not 0.')", "def push(self, item):\n self._pushed.append(item)", "def push(self, item):\n self._items.append(item)", "def push(self, item):\n if self.top == None:\n self.top = Node(item)\n else:\n new_node = Node(item)\n new_node.next = self.top\n self.top = new_node", "def __setitem__(self, idx: int, val: float) -> None:\n assert 0 <= idx < self.capacity, f\"idx={idx} capacity={self.capacity}\"\n\n # Index of the leaf to insert into (always insert in \"second half\"\n # of the tree, the first half is reserved for already calculated\n # reduction-values).\n idx += self.capacity\n self.value[idx] = val\n\n # Recalculate all affected reduction values (in \"first half\" of tree).\n idx = idx >> 1 # Divide by 2 (faster than division).\n while idx >= 1:\n update_idx = 2 * idx # calculate only once\n # Update the reduction value at the correct \"first half\" idx.\n self.value[idx] = self.operation(\n self.value[update_idx], self.value[update_idx + 1]\n )\n idx = idx >> 1 # Divide by 2 (faster than division).", "def push(self, item) -> None:\n self._items.append(item)", "def add_leaf(self, leaf):\n cur_tree_size = self.tree_size\n leaf_hash = self.__hasher.hash_leaf(leaf)\n with self.__db.write_batch() as wb:\n wb.put(self.__leaves_db_prefix + encode_int(cur_tree_size), leaf_hash)\n wb.put(self.__index_db_prefix + leaf_hash, encode_int(cur_tree_size))\n wb.put(self.__stats_db_prefix + 'tree_size', str(cur_tree_size + 1))\n return cur_tree_size", "def update(self, idx, value):\n idx = self.__capacity - 1 + idx\n self.__tree[idx] = value\n self.__update(idx)", "def fn(node, x):\n if not node: return x\n x = fn(node.right, x) # sum of right subtree\n x += node.val \n node.val = x\n return fn(node.left, x)", "def _put(self, k, v, currNode):\n if k < currNode.key:\n if currNode.hasLeftChild():\n self._put(k, v, currNode.leftChild)\n else:\n currNode.leftChild = TreeNode(k, v, parent=currNode)\n\n elif k > currNode.key:\n if currNode.hasRightChild():\n self._put(k, v, currNode.rightChild)\n else:\n currNode.rightChild = TreeNode(k, v, parent=currNode)\n\n else:\n currNode.payload = v\n self.size -= 1", "def heap_update(self):\n print 'SumTree pre-update:', self.replay.tree[0].sum\n last_ixs = self.replay.last_ixs(True)\n while True:\n if len(last_ixs) == 0:\n break\n if len(last_ixs) < 10000:\n ixs = last_ixs\n last_ixs = []\n else:\n ixs = last_ixs[:10000]\n last_ixs = last_ixs[10000:]\n batch = [self.replay.tree[ix].pointer for ix in ixs]\n delta = self.get_delta(batch)\n self.get_p_weights(delta, batch, ixs)\n print 'SumTree post-update:', self.replay.tree[0].sum\n print 'SumTree updated'", "def add(self, node):\n cost = self.costfn(node)\n heapq.heappush(self.heap, (cost, node))\n self.states[node.state] = node", "def push(self, item):\n node = Node(item)\n node.next = self._top\n self._top = node", "def add_item(self, new_value):\n\n # Allocate more memory if necessary\n # This keeps add_item to O(1), generally.\n # Otherwise, have to duplicate ndarray every time\n # last_item is an index, heap_size is a limit (index + 1)\n if self.last_item >= self.heap_size - 1:\n # Allocate double the memory\n new_heap_list = self.heap.tolist() + [0] * self.heap_size\n self.heap = np.array(new_heap_list, dtype=np.int)\n self.heap_size *= 2\n\n # Add item index and value\n # to already allocated memory\n self.last_item = self.last_item + 1\n self.heap[self.last_item] = new_value\n\n # Update heap level\n self.level = np.floor(np.log(self.last_item + 1) / np.log(2))", "def append(self, item):\n\t\theapq.heappush(self.heap, (self.f(item), item))", "def _itemAdded(self, item):\n group = self.item()\n if group is None:\n return\n\n row = group.getItems().index(item)\n self.addRow(nodeFromItem(item), row + self._CHILDREN_ROW_OFFSET)", "def add_item(self) -> None:\n item = self.get_selected_item(self.tree_cache)\n if item is None:\n return\n\n text, ok = QInputDialog.getText(self, \"Appending new data\", \"Data:\")\n if ok:\n parent_id = item.data().get_id()\n data = Data(text, parent_id)\n data_node = DataNode(instance=data)\n self.data_cache.append(data_node)\n self._data_controller.update_node_hierarchy(self.data_cache, remove_from_list=True)\n self.sync_tree_cache()", "def push(self, item: Any) -> None:\r\n node: Node = Node(item)\r\n if self.is_empty():\r\n self.top = node\r\n else:\r\n # each node points to the item \"lower\" in the stack\r\n node.next = self.top\r\n self.top = node", "def add(self, value: object) -> None:\n #binary search tree == empty\n if self.root is None:\n self.root = TreeNode(value)\n return\n\n #variables loop/traverse bt\n child = self.root\n parent = None\n\n # place node via while loop\n while child is not None:\n parent = child\n if value < child.value:\n child = child.left\n else:\n child = child.right\n\n #new_node/ child\n if value < parent.value:\n parent.left = TreeNode(value)\n else:\n parent.right = TreeNode(value)", "def push(self, item: Any) -> None:\n self._items.append(item)", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value) # - leaf_value because the MCTS tree is a max-min tree\n self.update(leaf_value)", "def push(self, x: int) -> None:\r\n self.items.append(x)", "def update(self, leaf_value,visits_count=1):\n # Count visit.\n self._n_visits += visits_count\n # Update Q, a running average of values for all visits.\n self._Q += 1.0*(leaf_value - self._Q) / self._n_visits\n\n if self.is_root():\n self.last_leafvalue = leaf_value", "def push(self, item):\n # 0(1)\n # Your code here\n self.stack.push(item)\n # update max if necessary\n # if self.max is None or self.max < item:\n # self.max = item\n # get the current max, compate it to item, and if current max < item, new max - item\n current_max = self.get_max()\n if current_max is None or current_max < item:\n current_max = item\n # push the max onto max_stack\n self.max_stack.push(current_max)", "def append(self, item):\r\n self.stack.append(item)", "def push(self, item: Any) -> None:\n self._data.append(item)", "def insert(self, item):\n insert_location = self.__find(item)\n if insert_location is None: #No root\n self.root = Node(item, None)\n elif item < insert_location.item:\n insert_location.left_child = Node(item, insert_location)\n else: # it should be that item >= insert_location.item\n insert_location.right_child = Node(item, insert_location)", "def update(self, leaf_value):\n # Count visit.\n self._n_visits += 1\n # Update Q, a running average of values for all visits.", "def insert(self, item):\n self._heap.append(item)\n self._perc_up(len(self._heap) - 1)", "def push(self, val):\n self.head = Node(val, self.head)", "def update(self, leaf_value):\n # Count visit.\n self._n_visits += 1\n # Update Q, a running average of values for all visits.\n self._Q += 1.0 * (leaf_value - self._Q) / self._n_visits", "def update(self, leaf_value):\n # Count visit.\n self._n_visits += 1\n # Update Q, a running average of values for all visits.\n self._Q += 1.0*(leaf_value - self._Q) / self._n_visits", "def push(self, transition, priority):\n priority = priority * 10000\n priority = self._clip_p(priority)\n priority = int(priority)\n # if we reached the capacity, overwrite the oldest item\n if (self.size == self.capacity):\n self.queue[self.to_write%self.capacity] = transition\n self.sum_tree.update(self.to_write%self.capacity,priority)\n else:\n self.queue.append(transition)\n self.sum_tree.push(priority)\n self.to_write = self.to_write + 1", "def _add(self, root, element, currentDepth):\n # When adding an element from the actual node, all elements less important\n # than the actual node are ALWAYS in the right branch, but the most importants\n # are on the left branch\n if root.data < element:\n if root.left == None:\n root.left = Node(element)\n if currentDepth > self.depth:\n self.depth = currentDepth\n return root.left\n else:\n # print \"Going to left branch at depth\", currentDepth\n return self._add(root.left, element, currentDepth + 1)\n else:\n if root.right == None:\n # print \"Adding new right leave\", element\n root.right = Node(element)\n if currentDepth > self.depth:\n self.depth = currentDepth\n return root.right\n else:\n # print \"Going to right branch at depth\", currentDepth\n return self._add(root.right, element, currentDepth + 1)", "def add(self, item):\n\n assert self.size < self.max_size, \"Cannot add item! The MaxHeap is full!\"\n self.items[self.size] = item\n self.size += 1\n self._shift_up(self.size - 1)", "def push(self, item: Any) -> None:\n # TODO: Implement this method\n ...", "def push(self, item):\n\n self._list.append(item)", "def add_to_group(self,item):\n self.items.append(item)\n self.n += 1", "def insert(self, root: TreeNode, item: int):\n if not root:\n return TreeNode(item)\n if item < root.value:\n root.left = self.insert(root.left, item)\n else:\n root.right = self.insert(root.right, item)\n return root", "def add(perm, i):\n for j in self.increasing_children(i):\n add(perm, j)\n perm.append(i)", "def append(self, tree):\n self.insert(len(self), tree)", "def push(self, val):\n self._linkedlist.push(val)\n self._update_attr()", "def update(self, leaf_value):\n # Count visit.\n self._n_visits += 1\n # Update Q, a running average of values for all visits. wtf ??? this line is rigth but kind of wired !\n self._Q += 1.0 * (leaf_value - self._Q) / self._n_visits", "def insert(self, key, val=None):\n self.root = self._insert(self.root, key, val) # Returns root of resulting tree after insertion - update it\n self.n += 1", "def add(self, value):\n self.stack_list.appen(value)" ]
[ "0.73173857", "0.66109264", "0.6454965", "0.6436707", "0.6278545", "0.6239465", "0.6177632", "0.6154286", "0.6113809", "0.6101891", "0.6088831", "0.60733277", "0.60294855", "0.59839755", "0.59787667", "0.597623", "0.59745806", "0.5972004", "0.596576", "0.5932941", "0.5916967", "0.5916967", "0.5915042", "0.5883667", "0.5880913", "0.58808804", "0.5878976", "0.58663034", "0.5865895", "0.58426327", "0.58426327", "0.58426327", "0.58426327", "0.58426327", "0.58385766", "0.5836522", "0.5817681", "0.5814446", "0.5800742", "0.5800742", "0.57996905", "0.5795664", "0.5793052", "0.578989", "0.5786823", "0.5782181", "0.5782181", "0.5782181", "0.5780385", "0.5770113", "0.57690537", "0.57608896", "0.5759866", "0.5753177", "0.5728355", "0.57206404", "0.5720069", "0.5719188", "0.5715028", "0.56904495", "0.56904227", "0.56880784", "0.56860834", "0.5672787", "0.5661186", "0.5661145", "0.56590784", "0.5655534", "0.5653835", "0.5649326", "0.5644685", "0.5641469", "0.56379235", "0.5634097", "0.56018466", "0.5598487", "0.55969244", "0.5593482", "0.5591443", "0.5587726", "0.5587223", "0.55848664", "0.55837584", "0.5581649", "0.55771327", "0.5568893", "0.55674225", "0.5564968", "0.55619866", "0.55619335", "0.55526024", "0.55525035", "0.5548278", "0.55426836", "0.55386907", "0.55380327", "0.5530402", "0.5526463", "0.5522382", "0.551994" ]
0.6273207
5
Updates the value of a leaf node and all the sums above it. Idx expected in the [0, capacity] range.
def update(self, idx, value): idx = self.__capacity - 1 + idx self.__tree[idx] = value self.__update(idx)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(\n self, index: Union[int, np.ndarray], value: Union[float, np.ndarray]\n ):\n\n tree_index = self.capacity + index\n self._tree[tree_index] = value\n\n # Propagate up the tree.\n parent = tree_index // 2\n while np.any(parent > 0):\n left = self._tree[2 * parent] # Children/sibling.\n right = self._tree[2 * parent + 1]\n # Note: Due to possible floating point error in the sum-tree case,\n # it's safer to recompute the parent nodes directly rather than to\n # accumulate an \"update\" up the tree which could be faster.\n self._tree[parent] = self.operation(left, right)\n parent = parent // 2", "def __setitem__(self, idx: int, val: float) -> None:\n assert 0 <= idx < self.capacity, f\"idx={idx} capacity={self.capacity}\"\n\n # Index of the leaf to insert into (always insert in \"second half\"\n # of the tree, the first half is reserved for already calculated\n # reduction-values).\n idx += self.capacity\n self.value[idx] = val\n\n # Recalculate all affected reduction values (in \"first half\" of tree).\n idx = idx >> 1 # Divide by 2 (faster than division).\n while idx >= 1:\n update_idx = 2 * idx # calculate only once\n # Update the reduction value at the correct \"first half\" idx.\n self.value[idx] = self.operation(\n self.value[update_idx], self.value[update_idx + 1]\n )\n idx = idx >> 1 # Divide by 2 (faster than division).", "def set(self, node_index, value):\n if value < 0.0:\n raise ValueError(\n 'Sum tree values should be nonnegative. Got {}'.format(value))\n self.highest_set = max(node_index, self.highest_set)\n node_index = node_index + self.low_idx\n self.max_recorded_priority = max(value, self.max_recorded_priority)\n\n delta_value = value - self.nodes[node_index]\n\n # Now traverse back the tree, adjusting all sums along the way.\n for _ in reversed(range(self.depth)):\n # Note: Adding a delta leads to some tolerable numerical inaccuracies.\n self.nodes[node_index] += delta_value\n node_index = (node_index - 1) // 2\n\n self.nodes[node_index] += delta_value\n assert node_index == 0, ('Sum tree traversal failed, final node index '\n 'is not 0.')", "def __update(self, idx):\n parent = (idx - 1) // 2\n while parent >= 0:\n left, right = 2 * parent + 1, 2 * parent + 2\n self.__tree[parent] = self.__tree[left] + self.__tree[right]\n parent = (parent - 1) // 2", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value) # - leaf_value because the MCTS tree is a max-min tree\n self.update(leaf_value)", "def update(self, leaf_value):\n # Count visit.\n self._n_visits += 1\n # Update Q, a running average of values for all visits. wtf ??? this line is rigth but kind of wired !\n self._Q += 1.0 * (leaf_value - self._Q) / self._n_visits", "def update(self, leaf_value,visits_count=1):\n # Count visit.\n self._n_visits += visits_count\n # Update Q, a running average of values for all visits.\n self._Q += 1.0*(leaf_value - self._Q) / self._n_visits\n\n if self.is_root():\n self.last_leafvalue = leaf_value", "def update(self, leaf_value):\n # Count visit.\n self._n_visits += 1\n # Update Q, a running average of values for all visits.\n self._Q += 1.0 * (leaf_value - self._Q) / self._n_visits", "def update(self, leaf_value):\n # Count visit.\n self._n_visits += 1\n # Update Q, a running average of values for all visits.\n self._Q += 1.0*(leaf_value - self._Q) / self._n_visits", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def _update_node(node, value):\n node.N += 1\n node.W += value\n node.Q = node.W / node.N", "def update(self, leaf_value):\n # Count visit.\n self._n_visits += 1\n # Update Q, a running average of values for all visits.", "def fn(node, x):\n if not node: return x\n x = fn(node.right, x) # sum of right subtree\n x += node.val \n node.val = x\n return fn(node.left, x)", "def update(self, i, v):\n # index in BTree is 1 more than index in arr[]\n i += 1\n\n # Traverse to ancestors of BITree[i]\n while i <= self.size:\n self.BITree[i] += v\n\n # Update index to next set bit in binary representation\n i += i & (-i)", "def updateTree(self, i, val, cur):\n start, end = cur.start, cur.end\n if start == end == i:\n cur.val = val\n return\n mid = start+(end-start)/2\n if i <= mid:\n cur.val -= cur.left.val\n self.updateTree(i, val, cur.left)\n cur.val += cur.left.val\n else:\n cur.val -= cur.right.val\n self.updateTree(i, val, cur.right)\n cur.val += cur.right.val", "def include_final_offset(node, offset):\n for leaf in node.leaves:\n leaf.value = leaf.value * offset", "def __init__(self, capacity):\n assert isinstance(capacity, int)\n if capacity <= 0:\n raise ValueError(\n 'Sum tree capacity should be positive. Got: {}'.format(capacity))\n\n self.nodes = []\n self.depth = int(np.ceil(np.log2(capacity)))\n self.low_idx = (2**self.depth) - 1 # pri_idx + low_idx -> tree_idx\n self.high_idx = capacity + self.low_idx\n self.nodes = np.zeros(2**(self.depth + 1) - 1) # Double precision.\n self.capacity = capacity\n\n self.highest_set = 0\n\n self.max_recorded_priority = 1.0", "def heap_update(self):\n print 'SumTree pre-update:', self.replay.tree[0].sum\n last_ixs = self.replay.last_ixs(True)\n while True:\n if len(last_ixs) == 0:\n break\n if len(last_ixs) < 10000:\n ixs = last_ixs\n last_ixs = []\n else:\n ixs = last_ixs[:10000]\n last_ixs = last_ixs[10000:]\n batch = [self.replay.tree[ix].pointer for ix in ixs]\n delta = self.get_delta(batch)\n self.get_p_weights(delta, batch, ixs)\n print 'SumTree post-update:', self.replay.tree[0].sum\n print 'SumTree updated'", "def backup_nodes(self, value: float, backup_until=None):\n current = self.current\n parent = current.parent\n sum_from_leaf = value\n\n while parent is not backup_until:\n parent.visits += 1\n sum_from_leaf += current.score - parent.score\n\n # average rewards\n parent.reward += (sum_from_leaf - parent.reward) / parent.visits\n current, parent = parent, parent.parent", "def value(d,o):\n # return memoized value if possible\n if (d,o) in v:\n return v[(d,o)]\n\n thisitem = int(t[d][o])\n # the total of a subtree that starts at the leaf, is just the value of the leaf\n if d == maxdepth:\n val = thisitem\n else:\n val = thisitem + max(value(d+1,o),value(d+1,o+1))\n\n v[(d,o)]=val\n return val", "def get(self, subtree_sum):\n idx = 0\n while True:\n # if idx is a leaf node return the idx and the value\n if idx >= self.__capacity - 1:\n return (idx - self.__capacity + 1, self.__tree[idx])\n\n # else continue down\n left = 2 * idx + 1\n right = 2 * idx + 2\n left_sum = self.__tree[left]\n if left_sum >= subtree_sum:\n idx = left\n else:\n idx = right\n subtree_sum -= left_sum", "def include_final_offset(node, offset):\n for leaf in node.leaves:\n leaf.value = leaf.value + offset", "def include_final_offset(node, offset):\n if offset != 0.0:\n for leaf in node.leaves:\n leaf.value = leaf.value + offset", "def refresh(self):\n node, ans = self.list_head.next.next, 0\n # first update key_nodes in even positions\n while node:\n ans += 1\n node = node.next.next\n # then update tree_nodes's current_btree_node in odd positions\n node = self.list_head.next\n while node:\n node.current_btree_node = self\n if node.next:\n node = node.next.next\n else:\n break\n self.size = ans", "def backup(self, value):\n current = self\n while current.parent is not None:\n value *= -1\n current.number_visits += 1\n current.total_value += value\n current = current.parent", "def increment(self):\n if self.is_empty():\n return 0\n else:\n self.get_root().value += 1\n if self.get_left():\n self.get_left().increment()\n if self.get_right():\n self.get_right().increment()", "def capacity_enlarge(self, k):\n count = 0\n idx = self.capacity - 1\n while count < k:\n left = self.tree[idx]\n right = priorityNode(0, None)\n insert_pos = self.tree.shape[0]\n self.tree = np.insert(self.tree, insert_pos, [left,right])\n idx += 1\n count += 1\n\n self.last_capacity = self.capacity # mark down the last capacity for adding operation\n self.capacity += k # Update the value of capacity", "def _update_value_at(self, index, value):\n node = self._get_node_at(index)\n if node is None:\n raise IndexError('List index out of range.')\n node.value = value", "def update(self, x, factor=0):\n if x is self.nil:\n return\n\n if x.balance > 1 or x.balance < -1:\n self.rebalance(x)\n return\n if x is self.root:\n return\n\n if x is x.parent.left:\n x.parent.balance += factor\n elif x is x.parent.right:\n x.parent.balance -= factor\n\n if x.parent.balance != 0:\n self.update(x.parent, factor)", "def update_tree(root, executed_acts, total_rew):\n root.value = max(total_rew, root.value)\n root.visits += 1\n new_nodes = 0\n\n node = root\n for step, act in enumerate(executed_acts):\n if act not in node.children:\n node.children[act] = Node()\n new_nodes += 1\n node = node.children[act]\n node.value = max(total_rew, node.value)\n node.visits += 1\n\n return new_nodes", "def add(self, i: int, v: int) -> None:\n while i < self.size:\n self.tree[i] += v\n i += self._lsb(i)", "def heap_update(heap, index, node, cost):\n if node in index and cost < index[node]:\n heap.remove([index[node], node])\n heap.append([cost, node])\n heapq.heapify(heap)\n index[node] = cost\n elif node not in index:\n heapq.heappush(heap, [cost, node])\n index[node] = cost", "def _put(self, k, v, currNode):\n if k < currNode.key:\n if currNode.hasLeftChild():\n self._put(k, v, currNode.leftChild)\n else:\n currNode.leftChild = TreeNode(k, v, parent=currNode)\n\n elif k > currNode.key:\n if currNode.hasRightChild():\n self._put(k, v, currNode.rightChild)\n else:\n currNode.rightChild = TreeNode(k, v, parent=currNode)\n\n else:\n currNode.payload = v\n self.size -= 1", "def _sink(self, val, cur_node):\n if val > cur_node.data:\n if not cur_node.right:\n cur_node.right = Node(val, cur_node)\n self.size_number += 1\n if cur_node.balance_number == 0:\n cur_node.balance_number = 1\n else:\n count = self._sink(val, cur_node.right)\n if cur_node.balance_number <= count:\n cur_node.balance_number += 1\n elif val < cur_node.data:\n if not cur_node.left:\n cur_node.left = Node(val, cur_node)\n self.size_number += 1\n if cur_node.balance_number == 0:\n cur_node.balance_number = 1\n else:\n count = self._sink(val, cur_node.left)\n if cur_node.balance_number <= count:\n cur_node.balance_number += 1\n return cur_node.balance_number", "def update(self, probs: torch.Tensor):\n tree, capacity = self._create_tree(probs, self.tree)\n self.tree = tree\n self.capacity = capacity", "def update_nodes(self):\n\n L = self.level\n P = L.prob\n\n # only if the level has been touched before\n assert L.status.unlocked\n\n # get number of collocation nodes for easier access\n M = self.coll.num_nodes\n\n # gather all terms which are known already (e.g. from the previous iteration)\n # this corresponds to u0 + QF(u^k) - QdF(u^k) + tau\n\n # get QF(u^k)\n integral = self.integrate()\n for m in range(M):\n # get -QdF(u^k)_m\n for j in range(1, M + 1):\n integral[m] -= L.dt * self.QI[m + 1, j] * L.f[j]\n\n # add initial value\n integral[m] += L.u[0]\n # add tau if associated\n if L.tau[m] is not None:\n integral[m] += L.tau[m]\n\n # do the sweep\n for m in range(0, M):\n # build rhs, consisting of the known values from above and new values from previous nodes (at k+1)\n rhs = P.dtype_u(integral[m])\n for j in range(1, m + 1):\n rhs += L.dt * self.QI[m + 1, j] * L.f[j]\n\n # implicit solve with prefactor stemming from the diagonal of Qd\n L.u[m + 1] = P.solve_system(\n rhs, L.dt * self.QI[m + 1, m + 1], L.u[m + 1], L.time + L.dt * self.coll.nodes[m]\n )\n # update function values\n L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m])\n\n # indicate presence of new values at this level\n L.status.updated = True\n\n return None", "def update(self, tree_path, value):\n\t\traise NotImplementedError", "def __setitem__(self, k, v):\n #if tree is empty\n if self.is_empty():\n # inherited from LinkedBinaryTree class\n # _Item(k, v) is inheritated from MapBase class\n leaf = self._add_root(self._Item(k,v)) \n else:\n p = self._subtree_search(self.root(), k)\n #if k is present in current tree\n if p.key() == k:\n #it's not p.value()!!\n p.element()._value = v\n self._rebalance_access(p)\n return\n #didn't find k in current tree; create a new object of Item\n # and add to either left or right of the last node searched\n else:\n item = self._Item(k, v)\n if k > p.key():\n leaf = self._add_right(p, item)\n else:\n leaf = self._add_left(p, item)\n self._rebalance_insert(leaf)", "def __getitem__(self, index):\n node = self.head\n index += 1\n for level in reversed(range(self.max_levels)):\n while node.width[level] <= index:\n index -= node.width[level]\n node = node.next[level]\n return node.value", "def __balance(self) -> \"Node\":\n current = self\n\n while True:\n current.__update_height()\n\n if current.balance_factor == 2: # right subtree is higher\n middle = current.right\n if middle.balance_factor < 0: # left subtree of middle node is higher\n middle.__rotate_right()\n current = current.__rotate_left()\n\n elif current.balance_factor == -2: # left subtree is higher\n middle = current.left\n if middle.balance_factor > 0: # right subtree of middle node is higher\n middle.__rotate_left()\n current = current.__rotate_right()\n\n else:\n current.__update_size()\n current.__update_sum()\n\n if current.parent is None:\n return current # return root\n\n current = current.parent # go up if not root", "def push(self, value):\n idx = self.__capacity - 1 + self.__size\n self.__tree[idx] = value\n self.__update(idx)\n self.__size += 1", "def __update(self, node: _AVLTreeNode) -> None:\n\n left_height = self.__get_node_height(node.left)\n right_height = self.__get_node_height(node.right)\n\n node.height = max(left_height, right_height) + 1\n node.balance_factor = right_height - left_height", "def update(self, value, index):\n\n length = self.get_length()\n if type(index) is int:\n if index > length:\n # The index value is out of range and prompts and exits\n print(\"Index is out of range.\")\n return\n else:\n this_node = Node(data=value)\n if index == 0:\n this_node.next = self.head.next\n this_node.prev =None\n self.head = this_node\n else:\n cur = self.head\n while index - 1:\n cur = cur.next\n index -= 1\n this_node.next = cur.next.next\n this_node.prev = cur.next.prev\n cur.next = this_node\n return\n else:\n print(\"Index value is not int.\")\n return", "def _rebalance(self, val=None):\n node = self.search(val)\n if node is None:\n node = self.root\n if self.root is None:\n return\n try:\n self._rebalance(node.left.data)\n except AttributeError:\n pass\n try:\n self._rebalance(node.right.data)\n except AttributeError:\n pass\n if self.balance(node) > 1:\n if self.balance(node.right) == -1: # should this be > 0?\n self.rotate(node.right.left.data)\n self.rotate(node.right.data)\n elif self.balance(node) < -1:\n if self.balance(node.left) == 1:\n self.rotate(node.left.right.data)\n self.rotate(node.left.data)\n # result = [x for x in self.breadth_first_traversal()]\n # for num in result:\n # print(num)\n # print()\n # print()", "def __setitem__(self, vertex, cost):\n pos = self._index_map.get(vertex)\n if pos is not None:\n entry = self._heap[pos]\n entry.cost = cost\n else:\n pos = len(self._heap)\n entry = _Node(vertex, cost)\n self._heap.append(entry)\n self._siftup(pos, entry)", "def update(self, index: int, x: int):\n index += self.n2\n self.tree[index] = self.binary(self.tree[index], x)\n while index > 1:\n # (index ^ 1) はiと1の排他的論理和(XOR)\n x = self.binary(x, self.tree[index ^ 1])\n index >>= 1 # 右ビットシフトで親ノードのインデックスへ移動\n self.tree[index] = self.binary(self.tree[index], x)", "def _finalize_leaf(self, node):\n node.value = -self.shrinkage * node.sum_gradients / (\n node.sum_hessians + self.splitter.l2_regularization)\n self.finalized_leaves.append(node)", "def fn(node, val):\n if not node: return 0\n val = 10*val + node.val\n if not node.left and not node.right: return val \n return fn(node.left, val) + fn(node.right, val)", "def fn(node):\n if not node: return 0 \n ans = node.val + fn(node.left) + fn(node.right)\n vals.append(ans)\n return ans", "def root(self, value: List[int]):\n max_len = max((len(node) for node in self._nodes))\n self._root = value + (max_len - len(value)) * [0]", "def update_value(self, reward):\n\t\tval = self.value\n\t\tval = val + ((reward - val)/self.visited)\n\t\tself.value = val", "def backup_edges(self, value: float, backup_until=None):\n current = self.current\n parent = current.parent\n sum_from_leaf = value\n\n while parent is not backup_until:\n edge = parent.edges[current.index]\n edge.visits += 1\n sum_from_leaf += current.score - parent.score\n edge.value += (sum_from_leaf - edge.value) / edge.visits\n current, parent = parent, parent.parent", "def set_val(self, k, a):\n k += self.n - 1\n self.dat[k] = a\n while k > 0:\n k = (k - 1) // 2 # parent\n self.dat[k] = self.op(self.dat[k * 2 + 1], self.dat[k * 2 + 2])", "def _fix_up_to_root(self, idx):\n combine_fn = self._combine_fn\n while idx >= 1:\n # self.data[idx] = combine_fn(self.data[self._left(idx)], self.data[self._right(idx)])\n self.data[idx] = combine_fn(self.data[2 * idx], self.data[2 * idx + 1])\n # idx = self._parent(idx)\n idx = idx >> 1", "def balance(self):\n if self.val is None:\n return 0\n return self.left.depth() - self.right.depth()", "def cell_setLeaf(self, curr):\r\n curr.n_count = 0\r\n return", "def leafScore(self) :\n return 0", "def apply(self, f):\n if self.is_empty():\n return 0\n else:\n self.get_root().value = f(self.get_root().value)\n if self.get_left():\n self.get_left().apply(f)\n if self.get_right():\n self.get_right().apply(f)", "def increment_node_value(self, val: Union[int, float], i: int, j: int,\n key: str = 'target') -> None:\n if key not in self._dist:\n raise KeyError('key parameter must be a valid distribution: ' +\n '[\\'initial\\', \\'current\\', \\'target\\']')\n\n elif not self._status['target'] or not self._status['initial']:\n raise ValueError(\n 'No initial or target distribution has been defined.')\n\n mat_i, mat_j = to_matrix(self._param['n_v'], np.array([i, j]))\n self._dist[key][mat_i, mat_j] += val * self._param['size_fraction']", "def sum_tree(t):\n \"*** YOUR CODE HERE ***\"\n if is_leaf(t):\n return entry(t)\n total = entry(t)\n for subtree in subtrees(t):\n total += sum_tree(subtree)\n return total", "def __setitem__(self, key, value):\n if self._root:\n self._setItemHelper(key, value, self._root)\n else:\n self._root = _Node(key, value)\n self._size += 1", "def set_leaf_node(self, leaf_value):\n\n if not self.empty:\n try:\n node_key = self.node_key\n except AttributeError:\n node_key = '_'\n raise ValueError(\n 'Cannot modify a non-empty node. ' + \\\n 'If you meant to change type of node {}, '.format(node_key) + \\\n 'delete it first and then add an empty node with ' + \\\n 'the same key.')\n\n # check if leaf_value is a list-like object\n try:\n _ = iter(leaf_value)\n is_list = True\n except TypeError:\n is_list = False\n\n try:\n if is_list:\n leaf_value = [float(i) for i in leaf_value]\n else:\n leaf_value = float(leaf_value)\n except TypeError:\n raise TreeliteError('leaf_value parameter should be either a ' + \\\n 'single float or a list of floats')\n\n try:\n if is_list:\n _check_call(_LIB.TreeliteTreeBuilderSetLeafVectorNode(\n self.tree.handle,\n ctypes.c_int(self.node_key),\n c_array(ctypes.c_double, leaf_value),\n ctypes.c_size_t(len(leaf_value))))\n else:\n _check_call(_LIB.TreeliteTreeBuilderSetLeafNode(\n self.tree.handle,\n ctypes.c_int(self.node_key),\n ctypes.c_double(leaf_value)))\n self.empty = False\n except AttributeError:\n raise TreeliteError('This node has never been inserted into a tree; '\\\n + 'a node must be inserted before it can be a leaf node')", "def update_priority(self, indexes, values):\n values = values * 10000\n values = self._clip_p(values)\n values = int(values)\n self.sum_tree.update(indexes, values)", "def getSum(root, level, h):\n if root == None:\n return\n \n h[level] = root.data\n \n getSum(root.left, level+1, h)\n getSum(root.right, level+1, h)", "def add_leaf(self, leaf):\n cur_tree_size = self.tree_size\n leaf_hash = self.__hasher.hash_leaf(leaf)\n with self.__db.write_batch() as wb:\n wb.put(self.__leaves_db_prefix + encode_int(cur_tree_size), leaf_hash)\n wb.put(self.__index_db_prefix + leaf_hash, encode_int(cur_tree_size))\n wb.put(self.__stats_db_prefix + 'tree_size', str(cur_tree_size + 1))\n return cur_tree_size", "def update_balances(self, recursive=True):\n if self.node:\n if recursive:\n if self.node.left:\n self.node.left.update_balances()\n if self.node.right:\n self.node.right.update_balances()\n\n self.balance = self.node.left.height - self.node.right.height\n else:\n self.balance = 0", "def mutate(self, node, _):\n new_node = ast.Num(n=node.n + 1)\n return new_node", "def grow(self):\n self.capacity = self.capacity * 2\n self.rehash()", "def backUp(self, value):\n return value / len(self.children)", "def update(self, old, new):\n i = self.rank[old] # change value at index i\n del self.rank[old]\n self.heap[i] = new\n self.rank[new] = i\n if old < new: # maintain heap order\n self.down(i)\n else:\n self.up(i)", "def update(self, old, new):\n i = self.rank[old] # change value at index i\n del self.rank[old]\n self.heap[i] = new\n self.rank[new] = i\n if old < new: # maintain heap order\n self.down(i)\n else:\n self.up(i)", "def find_sum(root, desired_sum, level=0, buffer_list=None, result=[]):\n if not buffer_list:\n buffer_list = []\n\n if not root:\n return result\n\n buffer_list.append(root.key)\n temp = desired_sum\n\n for i in range(level, -1, -1):\n temp -= buffer_list[i]\n\n if temp == 0:\n result.append(buffer_list[i:level + 1])\n\n find_sum(root.left, desired_sum, level + 1, buffer_list[:], result)\n find_sum(root.right, desired_sum, level + 1, buffer_list[:], result)\n\n return result", "def update_subtree(self, old_subroot: 'GraphNode', new_subroot: 'GraphNode'):\n self.operator.update_subtree(old_subroot, new_subroot)", "def __init__(self, capacity, tuple, alpha=0.6, beta=0.4):\n self.tree = SumTree(capacity)\n self.capacity = capacity\n self.alpha = alpha\n self.beta = beta\n self.tuple = tuple", "def __init__(self, size: int, values: List[int]=None) -> int:\n self.tree = [el for el in values] if values else ([0] * (size + 1))\n self.size = (len(values)) if values else (size + 1)\n\n if values:\n for i in range(1, self.size):\n parent = i + self._lsb(i)\n if parent < self.size:\n self.tree[parent] += self.tree[i]", "def resize(self, new_capacity):\n # Your code here\n self.capacity = new_capacity\n\n # make new array to store the current self.hash_table\n # update self.hash_table to be array of size new_capacity\n # for each item in our copy array\n # self.put(item) in our newly size self.hash_table\n # if item.next is not None\n # make sure to self.put(item.next) to get all chained nodes\n\n old_storage = self.hash_table\n self.hash_table = [None] * new_capacity\n\n for i, el in enumerate(old_storage):\n if el is not None:\n self.put(el.key, el.value)\n\n curr_node = el\n\n if curr_node is not None:\n # add all chained nodes\n while curr_node.next is not None:\n curr_node = curr_node.next\n if curr_node is not None:\n self.put(curr_node.key, curr_node.value)", "def __call__(self, table, hashes, count):\n if 0 > count:\n raise ValueError(\n 'Conservative updating does not support negative counts')\n\n current_values = [table.get(i, hashes[i]) for i in range(table.depth)]\n new_current_min = min(current_values) + count\n [table.set(i, hashes[i], new_current_min)\n for i, value in izip(range(table.depth), current_values)\n if value < new_current_min]\n\n return new_current_min", "def update_value(x, y, x_prev, y_prev, matrix_dict):\n to_add = matrix_dict[(x_prev, y_prev)]['accumulated_value']\n if to_add is None:\n to_add = matrix_dict[(x_prev, y_prev)]['value']\n values_sum = matrix_dict[(x, y)]['value'] + to_add\n\n # if no accumulated value is present in the cell yet, update it\n if matrix_dict[(x, y)]['accumulated_value'] is None:\n matrix_dict[(x, y)]['accumulated_value'] = values_sum\n matrix_dict[(x, y)]['parent'] = (x_prev, y_prev)\n return True\n # else, update only if the new accumulated value would be lower\n else:\n if values_sum < matrix_dict[(x, y)]['accumulated_value']:\n matrix_dict[(x, y)]['accumulated_value'] = values_sum\n matrix_dict[(x, y)]['parent'] = (x_prev, y_prev)\n return True\n return False", "def __setitem__(self, i: int, o: 'Tree') -> None:\n ...", "def change_leaf_value(self, general_node, hasElement):\n raise NotImplementedError", "def update_node_count(self, node, add_to_count):\r\n current_score = 0\r\n count_string = self.parser.getAttribute(node, 'gravityNodes')\r\n if count_string:\r\n current_score = int(count_string)\r\n\r\n new_score = current_score + add_to_count\r\n self.parser.setAttribute(node, \"gravityNodes\", str(new_score))", "def increment_node_index(self):\n self.node_index += 1", "def update_attr(self):\n\n # Retrieve all current values\n all_values = nx.get_node_attributes(self.G, 'value')\n\n new_values = {}\n\n # Loop over all nodes\n for i in range(self.n_v):\n\n # Obtain list of neighbors\n neighbors = list(nx.all_neighbors(self.G, i))\n\n # Compute part dependent on own node\n val_i = all_values[i]\n new_value = (1 - self.eps) * (1 - self.a * val_i * val_i)\n\n # Compute part dependent on neighbor nodes\n neighbors_value = 0\n for neighbor in neighbors:\n val_n = all_values[neighbor]\n neighbors_value += (1 - self.a * val_n * val_n)\n\n # Catch nodes without neighbors\n try:\n new_value += neighbors_value * (self.eps/len(neighbors))\n except ZeroDivisionError:\n pass\n\n # Save new value\n new_values[i] = {'value': new_value}\n\n nx.set_node_attributes(self.G, new_values)", "def add(self, idx):\n # add the cost\n self.g += self.graph[self.visited[-1], idx]\n # add the to the visited place and remove from the unvisited places\n self.visited.append(idx)\n self.not_visited.remove(idx)", "def update_node_count(self, node, add_to_count):\n current_score = 0\n count_string = self.parser.getAttribute(node, 'gravityNodes')\n if count_string:\n current_score = int(count_string)\n\n new_score = current_score + add_to_count\n self.parser.setAttribute(node, \"gravityNodes\", str(new_score))", "def __setitem__(self, index, value):\n if isinstance(index, slice):\n del self[index]\n offset = 0\n if len(self) == 0:\n for x in value:\n self.append(x)\n else:\n for x in xrange(*index.indices(len(self))):\n self.__insert(x + offset, value)\n offset += value.length\n if not index.step:\n break\n return\n\n self.__verify_index(index)\n\n if index < 0:\n index += self.length\n\n index, prev_node, cur_node = self.__find_node_index(index)\n cur_node.data_list[index] = value", "def sum(self) -> int:\n return self.root.sum", "def __update_size_tree(self, node, delete=False):\r\n if not delete:\r\n node.size_tree += 1\r\n while node.parent:\r\n node = node.parent\r\n node.size_tree += 1\r\n else:\r\n node.size_tree -= 1\r\n while node.parent:\r\n node = node.parent\r\n node.size_tree -= 1", "def backpropagate(self, search_path, value):\n\n for node in search_path:\n node.n_visits += 1\n node.n_a[node.action_taken] += 1 \n # Incremental mean calculation\n node.q_a[node.action_taken] = (node.q_a[node.action_taken] * \n (node.n_visits - 1) + value) / \\\n node.n_visits", "def _put(self, key, value, current_node):\n pass", "def update(self, idx: int, new_priority: T.Union[int, float]):\n old_priority, item = self.__heap[idx]\n self.__heap[idx] = (new_priority, item)\n\n if new_priority < old_priority:\n self.__sift_up(idx)\n else:\n self.__sift_down(idx)", "def helper(node: TreeNode, cur: int, target: int):\n if not node:\n return\n \n cur += node.val\n \n if cur == target:\n self.output += 1\n \n helper(node.left, cur, target)\n helper(node.right, cur, target)", "def mutate(self):\n num_leafs_before = self.num_leafs()\n non_leafs = [v for v, d in self.out_degree() if d > 0]\n box = non_leafs[np.random.choice(len(non_leafs))]\n children = list(self[box])\n for child in children:\n self.remove_subtree(child)\n num_leafs_after = self.num_leafs()\n num_removed = num_leafs_before - num_leafs_after\n self.generate(num_removed)", "def update_weight(self,ctr,new_weight):\n self.sum1 -= self.data_set[ctr].weight\n self.data_set[ctr].weight = new_weight\n self.sum1 += new_weight", "def _insert_in_tree(self, k: str, current_node: str) -> int:\n dist_current_node = self.distance_function(\n self.hash_dict[k], self.dict_all[current_node].node_value\n )\n condition_insert_current_node_child = (\n not self.dict_all[current_node].children\n ) or (\n dist_current_node not in list(self.dict_all[current_node].children.values())\n )\n if condition_insert_current_node_child:\n self.dict_all[current_node].children[k] = dist_current_node\n self.dict_all[k] = BkTreeNode(\n k, self.hash_dict[k], parent_name=current_node\n )\n else:\n for i, val in self.dict_all[current_node].children.items():\n if val == dist_current_node:\n node_to_add_to = i\n break\n self._insert_in_tree(k, node_to_add_to)\n return 0", "def _expand_node(expand_n, base_cost, randomizer):\n\n for next_n, props in nb[expand_n].items():\n randomizer -= 1\n total_cost = props['weight'] + base_cost\n e_cost = (total_cost, props['weight'], randomizer)\n\n # Check for tree membership as this signifies a loop back to the tree\n if next_n not in scanned or e_cost < scanned[next_n] and not tree.has_node(next_n):\n heappush(queue, (e_cost[0], e_cost[1], e_cost[2], next_n))\n scanned[next_n] = e_cost\n p[next_n] = expand_n", "def increment_location(self,G):\n d = self.loc.index(-1) -1 # Depth of current location\n if d < self.k - 2: # Then we can still go down\n d += 1\n self.loc[d] = 0\n else:\n self.loc[d] += 1\n G[d+1] = [] \n while self.loc[d] > self.branches[d]-1: # If end of branches at this depth, \n d -= 1 # go up until we can go down again\n G[d+1] = [] \n self.loc[d+1] = -1\n if d < 0:\n break # All done with the tree\n self.loc[d] += 1\n self.nLoc += 1", "def increment(self, index, value):\n self._inrange(index)\n if value==0:\n return\n found,ii = self._find_index(index)\n if found:\n self.value[ii] += value\n if self.value[ii] == 0:\n del self.index[ii]\n del self.value[ii]\n else:\n self.index.insert(ii, index)\n self.value.insert(ii, value)" ]
[ "0.74209356", "0.6767374", "0.6753369", "0.6640915", "0.62751794", "0.6238642", "0.61967045", "0.61868465", "0.618199", "0.6152516", "0.6152516", "0.6152516", "0.6084542", "0.5986752", "0.5953413", "0.5944968", "0.58795923", "0.581223", "0.5790687", "0.57693315", "0.57603663", "0.570496", "0.5703129", "0.56980103", "0.5690032", "0.5679459", "0.56651527", "0.56528497", "0.5648007", "0.5642546", "0.56211865", "0.5588307", "0.55878246", "0.5579639", "0.55734175", "0.557131", "0.55479646", "0.552831", "0.5526042", "0.54972494", "0.5484235", "0.54670936", "0.54351836", "0.5433254", "0.5414504", "0.53958505", "0.53945297", "0.5375592", "0.5375562", "0.5367276", "0.5347399", "0.5345283", "0.5342374", "0.53370607", "0.5329013", "0.5322122", "0.5301846", "0.52906847", "0.5281991", "0.5271573", "0.52702284", "0.52396977", "0.52375036", "0.5221729", "0.5181949", "0.5180166", "0.5169777", "0.5165395", "0.5164056", "0.5153843", "0.51518786", "0.5151529", "0.5151529", "0.51489997", "0.5147822", "0.51462257", "0.5138787", "0.5134016", "0.5129637", "0.5114742", "0.51113164", "0.5107387", "0.5093964", "0.5092068", "0.50815666", "0.5079008", "0.507369", "0.5071966", "0.50687027", "0.5060109", "0.5050517", "0.50415224", "0.50378525", "0.5036109", "0.50354445", "0.50329727", "0.50329006", "0.5031856", "0.50266874", "0.5023344" ]
0.74235
0
Returns the leaf in a given interval.
def get(self, subtree_sum): idx = 0 while True: # if idx is a leaf node return the idx and the value if idx >= self.__capacity - 1: return (idx - self.__capacity + 1, self.__tree[idx]) # else continue down left = 2 * idx + 1 right = 2 * idx + 2 left_sum = self.__tree[left] if left_sum >= subtree_sum: idx = left else: idx = right subtree_sum -= left_sum
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def interval(self):\n return Interval(self._ll_tree.get_left(), self._ll_tree.get_right())", "def get_leaves(self):\n raise NotImplementedError()", "def get_leaf(self, leaf_index):\n return self.__leaves_db.get(encode_int(leaf_index))", "def insert(self, interval):\n if self.root == None:\n self.root = Node(interval)\n return self.root\n\n (start, end) = interval\n node = self.root\n while True:\n if node.key <= start:\n path = 'right'\n else:\n path = 'left'\n\n # Maintain the high invariant, each node contains the leftmost\n # value in it's subtree.\n if node.high < end:\n node.high = end\n\n # Add a new node leaf.\n if getattr(node, path, None) is None:\n setattr(node, path, Node(interval))\n getattr(node, path).parent = node\n break\n else:\n node = getattr(node, path)\n\n return getattr(node, path)", "def _leaf_start(self):\n return (self._bounds_tree.shape[0] + 1) // 2 - 1", "def find_leaf(self, _key):\n cur_node = self.root\n while type(cur_node) is not leaf:\n\n flag = True\n for i, key in enumerate(cur_node.keys):\n if key > _key:\n cur_node = cur_node.pt[i]\n flag = False\n break\n \n # the value passed in is greater than all the keys in this node\n if flag:\n cur_node = cur_node.pt[-1]\n \n return cur_node", "def get_leaf(self, descr):\n matches = [x for x in self.leaves if x.descr == descr]\n if matches == []:\n raise RuntimeError(f\"Did not find any leaves matching '{descr}'\")\n if len(matches) > 1:\n raise RuntimeError(f\"Found multiple matching leaves: {matches}\")\n return matches[0]", "def leaf(self, value, depth, available):\n method_name = 'leaf_' + value.__class__.__name__\n method = getattr(self, method_name, self.generic_leaf)\n return method(value, depth, available)", "def FindLeafNode(self, node, index):\n if node.start > index or node.end() <= index:\n if self.debug:\n print node.ToPrettyString();\n print index;\n raise ValueError(\"Node don't contain index\");\n if node.start == index and node.level == 0: return node;\n if not node.children:\n raise ValueError(\"Didn't find the index\");\n for child in node.children:\n if child.start <= index and child.end() > index:\n return self.FindLeafNode(child, index);\n if self.debug:\n print node.ToPrettyString();\n print index;\n print \"node.start=%d\" % node.start;\n print \"node.end=%d\" % node.end();\n raise ValueError(\"Shouldn't reach the end\");", "def _extract_leaf(leaf):\n try:\n return re.match(r'leaf-(\\d+)', leaf).group(1)\n except:\n return None", "def get_leaf_nodes(self):\n pass", "def findInterval(intervals,interval):\n low,ind = algorithms.binsearch(intervals,interval.start-1,lambda a,b: cmp(a.start,b))\n return (low,ind)", "def leaf_node_gen(self):\n return (v for v in self.node_gen if v.is_leaf)", "def test_find_root_in_interval(self):\n\n print(\"\\nFalse Position Method: Root in Interval\")\n\n # input\n xi = 0 # initial value\n xf = 1 # final value\n et = 1e-6 # relative error threshold\n\n # function\n root, ic, msg = FalsePosition.find_root_in_interval(self.fn, xi, xf, et)\n\n # output\n if root != None:\n print(\"\\tInterval: [{xi}, {xf}]\\n\\t{msg}: {x}\\n\\tFunction Value: {fx}\\n\\tIterations: {ic}\".format(xi=xi, xf=xf, msg=msg, x=root, fx=self.fn(root), ic=ic))\n else:\n print(\"\\t{msg}.\".format(msg=msg))", "def get_leaves(self, start=0, stop=None):\n if stop is None:\n stop = self.tree_size\n return [l for l in self.__leaves_db.iterator(start=encode_int(start), stop=encode_int(stop), include_key=False)]", "def _get_leaves(self, root_id, bbox, mip):\n root_id = int(root_id)\n url = posixpath.join(self.meta.server_url, \"segment\", str(root_id), \"leaves\")\n bbox = Bbox.create(bbox, context=self.meta.bounds(mip), bounded=self.bounded)\n\n response = requests.post(url, json=[ root_id ], params={\n 'bounds': bbox.to_filename(),\n })\n response.raise_for_status()\n\n return np.frombuffer(response.content, dtype=np.uint64)", "def leaf_nodes(self):\n return self.nodes.filter(lft=models.F('rght') - 1)", "def _get_leaf(leaf, d, pattern):\n xleaf = d.rsplit('/', 1)[-1].strip()\n check_pattern = re.match('\\*(\\.[a-zA-Z0-9]+)$', pattern)\n if check_pattern:\n xten = check_pattern.groups()[0]\n if xleaf[-len(xten):] == xten:\n xleaf = xleaf[:-len(xten)].strip()\n if xleaf.find(ROOT_LEAF_PREFIX) == 0:\n return leaf\n elif leaf.strip():\n return '{0}.{1}'.format(leaf, xleaf)\n else:\n return xleaf", "def bisection(leaf, args):\n #retrieve the lower/upper bound of given Most Promising Region\n lb = leaf.lb\n ub = leaf.ub\n #find the dimension number of decision variabless\n dimX = len(lb)\n #determine the dimension that should be partitioned\n dimID = (leaf.level + 1) % dimX \n #determine the partition threshold\n thr = (lb[dimID]+ub[dimID])/2.0\n #create new lower/upper middle bound [lb,umb], [lmd,ub]\n lmb,umb = [np.array([]) for i in range(2)]\n for i in range(dimX):\n lmb = np.append(lmb,[lb[i],thr][i==dimID])\n umb = np.append(umb,[ub[i],thr][i==dimID])\n subRegions = [[lb,umb],[lmb,ub]]\n return {'parent':leaf,'thr':thr,'subRegions':subRegions}", "def _intersect(self, interval):\n first = self.intervals.bisect_left(interval)\n last = first\n while first > 0 and \\\n self.intervals[first - 1].upper > interval.lower:\n first -= 1\n while last < len(self.intervals) and \\\n self.intervals[last].lower < interval.upper:\n last += 1\n return first, last", "def letter(leaf):\n return root(branches(leaf)[0])", "def FindLeaf(self, node):\n result = []\n # If current node is not leaf node, just iter to find all the node with MBR\n if self.level != 1:\n for leaf in self.leaves:\n if contain(leaf.MBR, node.MBR):\n result.append(leaf.FindLeaf(node))\n for x in result:\n if x != None:\n return x\n # If current node is leaf, just iter this node to check whether index is same, and return\n else:\n for leaf in self.leaves:\n if leaf.index == node.index:\n return self", "def getInterval(self) -> float:\n\t\treturn self[self._bcni]", "def _intersect_continuous(self, interval):\n first = self.intervals.bisect_left(interval)\n last = first\n while first > 0 and \\\n self.intervals[first - 1].upper >= interval.lower:\n first -= 1\n while last < len(self.intervals) and \\\n self.intervals[last].lower <= interval.upper:\n last += 1\n return first, last", "def eat_leaves(self, branch):\n\n if self.hop_interval < 1:\n return\n\n curr_leaf = branch.root\n while curr_leaf:\n if curr_leaf.val % self.hop_interval == 0:\n curr_leaf.val = -1\n curr_leaf = curr_leaf.next", "def binary_trees(self):\n for ip in self.lower_contained_intervals():\n yield ip.upper_binary_tree()", "def get_leaf(model):\n sub_model = get_sub_model(model)\n if sub_model is None:\n return model\n\n return get_leaf(sub_model)", "def tree_to_leaf(self,\n x_row):\n node = self.tree[0]\n while True:\n if node.is_leaf:\n return node\n val = x_row[node.label]\n if np.isnan(val):\n node = self.tree[node.id_null]\n elif val <= node.cutoff:\n node = self.tree[node.id_lower]\n elif val >= node.cutoff:\n node = self.tree[node.id_higher]\n else:\n raise NameError", "def la(x) :\r\n return Feature(x, \"leaf_area\")", "def boundary_leaves(root):\n\n def bottom_nodes(node):\n if not node:\n return\n bottom_nodes(node.left)\n if not node.left and not node.right:\n print(node.data, end=\" \")\n bottom_nodes(node.right)\n\n def left_nodes(node):\n if node:\n if node.left:\n print(node.data, end=\" \")\n left_nodes(node.left)\n elif node.right:\n print(node.data, end=\" \")\n left_nodes(node.right)\n\n def right_nodes(node):\n if node:\n if node.right:\n right_nodes(node.right)\n print(node.data, end=\" \")\n elif node.left:\n right_nodes(node.left)\n print(node.data, end=\" \")\n print(root.data, end=\" \")\n left_nodes(root.left)\n bottom_nodes(root)\n right_nodes(root.right)", "def ChooseLeaf(self, node):\n\n if self.level == node.level + 1:\n # if current node level is higher than the node we want to insert, we find the good point.\n return self\n else:\n # Or iter its child nodes, to find the node with min area.\n increment = [(i, space_increase(self.leaves[i].MBR, node.MBR)) for i in range(len(self.leaves))]\n res = min(increment, key=lambda x: x[1])\n return self.leaves[res[0]].ChooseLeaf(node)", "def select_leaf(self):\n current = self\n best_child = None\n selected_nodes_R = 0\n while current.isExpanded:\n maxUCT = - float('inf')\n for child in current.children.values():\n UCT = child.compute_uct()\n if UCT > maxUCT:\n maxUCT = UCT\n best_child = child\n\n current = best_child\n selected_nodes_R += current.score\n return current, selected_nodes_R", "def get_leaves(tree):\n tree = Tree(tree, format=1)\n midpoint = tree.get_midpoint_outgroup()\n tree.set_outgroup(midpoint)\n tree_style = TreeStyle()\n tree_style.show_leaf_name = False\n nodes = tree.search_nodes(name=\"#1\")[0]\n leaves = sorted(['_'.join(leaf.name.split('_')[:2]) for leaf in nodes.iter_leaves()])\n leaves = tuple(leaves)\n return leaves", "def simple_root(self, i):", "def getLeaves(ob, pre=\"\"):\n return ob._getLeaves(pre)", "def age_to_interval(i):\n for idx, a in enumerate(intervals):\n if i >= a[0] and i < a[1]:\n return idx\n return idx", "def _height1(self): # works, but O(n^2) worst-case time\n return max(self.depth(p) for p in self.positions() if self.is_leaf(p))", "def get_root(g,r,n): # g: graph; r: range; n: node address\n if 'head' in g.nodes[n]:\n if g.nodes[n]['head'] not in r:\n return n\n else:\n return get_root(g,r,g.nodes[n]['head'])\n return n", "def leaves(node, res):\n leaf = True\n if node.lesser:\n leaf = False\n leaves(node.lesser, res)\n if node.greater:\n leaf = False\n leaves(node.greater, res)\n if leaf:\n res.append(node.indices)", "def the_function(interval):\n if math.ceil(interval.upper) % 2:\n return interval * type(interval).closed(\n fractions.Fraction(3, 2),\n fractions.Fraction(3, 2)\n )\n else:\n return interval * type(interval).closed(\n fractions.Fraction(1, 2),\n fractions.Fraction(1, 2)\n )", "def get_leaves( self ):\n branches = self.get_branches()\n leaves = []\n\n for branch in branches:\n if branch.is_leaf:\n leaves.append( branch )\n\n return leaves", "def r_to_depth(x, interval):\n return x * interval / 3600.0", "def get_span(tree):\n if tree[2][0]=='X':\n return tree[2][2]\n elif type(tree[2][0])==list:\n return tree[2][0][2]\n else:\n print 'Error in get_span'\n return None", "def get_leaves(self):\n\n leaves = []\n curr_leaf = self.root\n while curr_leaf:\n if curr_leaf.val != -1:\n leaves.append(curr_leaf.val)\n curr_leaf = curr_leaf.next\n\n return leaves", "def get_leaves(tree):\n if tree.is_leaf:\n return [tree.indices]\n else:\n return get_leaves(tree.left_child) + get_leaves(tree.right_child)", "def test_is_leaf(self):\n self.assertEqual(True, comparator.is_leaf(None))\n self.assertEqual(True, comparator.is_leaf(True))\n self.assertEqual(True, comparator.is_leaf(False))\n self.assertEqual(True, comparator.is_leaf(int()))\n self.assertEqual(True, comparator.is_leaf(str()))\n self.assertEqual(False, comparator.is_leaf(list()))\n self.assertEqual(False, comparator.is_leaf(dict()))", "def make_leaves(self):\n\n curr_leaf = self.root\n i = 1\n while i < self.length:\n next_leaf = Leaf(curr_leaf.val+1)\n curr_leaf.next = next_leaf\n curr_leaf = next_leaf\n i += 1", "def get_leaf_nodes(self, selector=None):\n\n if selector is None:\n if self.is_root:\n selector = \"forest\"\n else:\n selector = \"tree\"\n\n uids = self[selector, \"uid\"]\n desc_uids = self[selector, \"desc_uid\"]\n lids = np.where(~np.in1d(uids, desc_uids))[0]\n for lid in lids:\n yield self.get_node(selector, lid)", "def get_interval(interval):\n interval_list = interval.split(\"-\")\n if len(interval_list) == 1:\n return (int(interval_list[0]), int(interval_list[0])+1)\n else:\n return (int(interval_list[0]), int(interval_list[1])+1)", "def span(self):\n return self.interval.span", "def get_leaf_node(self, current_word):\n node = self.wordlist.find(current_word)\n\n if node is None:\n # current word is not in the Trie\n return None\n elif node.value != TRIE_BRANCH:\n # current word is already a leaf\n return current_word\n \n # descend down a random branch down the trie\n # until we hit a leaf\n while node.children:\n next_letter = random.choice(list(node.children.keys()))\n current_word += next_letter\n node = node.children.get(next_letter)\n \n return current_word", "def get_leafs(self) -> list:\n return self._leafs[:]", "def test_get_leaf_cells(mock_amg):\n\n # for no splitting, this should be the same as the cell list\n assert mock_amg.get_all_leaf_cells() == mock_amg.cells\n\n # split the middle cell\n mock_amg.cells[4].split()\n\n # expected output\n exp_list = []\n exp_list.extend(mock_amg.cells[0:4])\n exp_list.extend(mock_amg.cells[-4:])\n exp_list.extend(mock_amg.cells[5:-4])\n\n assert exp_list == mock_amg.get_all_leaf_cells()", "def _height1(self): # works, but O(n^2) worst-case time\n return max(self.depth(p) for p in self.positions() if self.is_leaf(p))", "def leafScore(self) :\n return 0", "def get_list_leaf(self, orderbook, virtual_market_segment):\n \n leaf = None\n for gmp in orderbook.GroupMaps():#check if there is a leaf on this orderbook\n if gmp.Group().Name() == virtual_market_segment.Name():\n leaf = gmp \n break \n \n if not leaf:\n try: \n leaf = acm.FInstrGroupMap() \n leaf.Instrument(orderbook.Instrument())\n leaf.Group(virtual_market_segment)\n leaf.OrderBook(orderbook)\n #leaf.Commit()\n logger.LOG(\"Leaf %s *Successfully* created for order book <%s>\"%(virtual_market_segment.Name(), orderbook.Name()))\n except Exception as e:\n logger.ELOG('**Error** while creating a leaf for the order book %s on market segment %s : %s'%(orderbook.Name(), virtual_market_segment.Name(), e))\n else: \n logger.DLOG(\"Sending first match of an FInstrGroupMap for MarketSegment '%s' (FInstrGroupMap) for the order book '%s'.\"%(virtual_market_segment.Name(), orderbook.Name())) \n \n return leaf", "def label(tree):\n return tree[0]", "def __repr__(self):\n s = \"{}, {}\".format(self.left_endpoint, self.right_endpoint)\n if self.left_closed:\n left_bracket = '['\n else:\n left_bracket = '('\n\n if self.right_closed:\n right_bracket = ']'\n else:\n right_bracket = ')'\n interval_string = left_bracket + s + right_bracket\n return 'TreeNode({})'.format(interval_string)", "def branches(tree):\n\n return tree[1:]", "def _height1(self): #works but n^2 time\n return max(self.depth(p) for p in self.positions() if self.is_leaf(p))", "def find(self, value):\n index = self.intervals.bisect_left(value)\n if index < len(self.intervals) and self.intervals[index].lower == value:\n return self.intervals[index]\n if index > 0 and self.intervals[index - 1].contains(value):\n return self.intervals[index - 1]\n return None", "def son_is_leaf(self, place):\n raise NotImplementedError", "def binary_search_find_root(number, min, max, accuracy, n):\n if max <= min: # Can't find\n return -1\n\n mid_val = (max + min)/2 # Returns a float.\n if round(mid_val ** n, accuracy) == number:\n return mid_val\n elif mid_val ** n > number: # Need to make mid_val**2 less so it matches number.\n return binary_search_find_root(number, min, mid_val, accuracy, n) # Look at values between min and mid, discard > mid point.\n elif mid_val ** n < number:\n return binary_search_find_root(number, mid_val, max, accuracy, n) # Discard values lower than mid to make number bigger.", "def get_cluster_bounds(i, cluster_bounds):\n con1 = np.where(i >= cluster_bounds)[0]\n j = con1[len(con1) -1]+1\n\n # for j in range(1, len(cluster_bounds)):\n # if(i < cluster_bounds[j] and i >= cluster_bounds[j-1]):\n # break\n return np.array([cluster_bounds[j-1], cluster_bounds[j]], dtype=np.int64)", "def IsLeaf(self):\n return not self.subtests", "def get_uneaten_leaves(branch_length, caterpillars):\n\n if type(branch_length) != int:\n return None\n\n if branch_length <= 0:\n return 0\n\n if not caterpillars:\n return branch_length\n\n # Break up caterpillars\n prime_cats = get_prime_caterpillars(caterpillars)\n\n branch_break = prime_cats[-1] # Requires list is sorted\n multi = branch_length // branch_break\n remainder = branch_length % branch_break\n # Get \"whole-branch\" leaves\n leaf_c = get_remaining_leaves_from_branch(branch_break, prime_cats) * multi\n # Get partial-branch leaves, if any\n if remainder:\n leaf_c += get_remaining_leaves_from_branch(remainder, prime_cats)\n\n return leaf_c", "def test_instantiate_leaf_node(self):\n try:\n LeafNode('my_label')\n except Exception:\n message = \"LeafNode instantiation failed\"\n self.fail(message)", "def __arb__(self):\n if self.tree.total < 1:\n return None\n if self.tree.total % 2 == 0:\n return self.first()\n else:\n return self.last()", "def search(self, val):\n if type(val) not in [int, float]:\n raise TypeError('This tree accepts numbers only.')\n current_node = self._root\n while current_node:\n if val == current_node._data:\n return current_node\n if val > current_node._data:\n current_node = current_node._rkid\n else:\n current_node = current_node._lkid\n return", "def branches(tree):\n return tree[1:]", "def _interval(cls,best,lo,hi):\n return ugali.utils.stats.interval(best,lo,hi)", "def find_start_index():\n def recursive_find_index(lower_bound, upper_bound):\n if upper_bound - lower_bound <= 1:\n if intervals[upper_bound][0] <= start_dt:\n return upper_bound\n return lower_bound\n index = (upper_bound + lower_bound) // 2\n if intervals[index][0] <= start_dt:\n return recursive_find_index(index, upper_bound)\n else:\n return recursive_find_index(lower_bound, index)\n\n if start_dt <= intervals[0][0] - tolerance:\n return -1\n if end_dt >= intervals[-1][1] + tolerance:\n return -1\n return recursive_find_index(0, len(intervals) - 1)", "def get_lca_in_bst(root, node_0, node_1):\n res = root\n s = node_0 if node_0.data < node_1.data else node_1\n b = node_1 if node_0.data < node_1.data else node_0\n while (res.data < s.data) or (res.data > b.data):\n while res.data < s.data:\n res = res.right\n while res.data > b.data:\n res = res.left\n return res", "def generic_leaf(self, value, depth, available):\n return repr(value), False", "def get_leaf_index(self, leaf_hash):\n raw_index = self.__index_db.get(leaf_hash)\n if raw_index:\n return decode_int(raw_index)\n else:\n return -1", "def sInterval(self, val):\n\n # NB: the support attribute has been added by precomputing __levels__\n if val in self.support:\n return self.support[val]\n else:\n return ()", "def leaves(self):\r\n return Node.s_leaves(self)", "def is_leaf(self, pos):\n if pos >= (self.size//2) and pos <= self.size: \n return True\n return False", "def is_leaf(self):\n # TODO: Check if both left child and right child have no value\n return ... and ...", "def create_leaves(self, parent_node, leaf_values):\n # TODO: find generalization!!\n import numpy as np\n # creating the leaf object\n parent_node.child_nodes[0] = self.leaf_type(0.0, 0, diagram_type=self.__class__)\n\n # creating the offsets\n # deciding on mult or add rule\n # additive_coefficient = np.mean(leaf_values)\n # new_offsets = np.array([leaf_values[i]-additive_coefficient for i in range(self.base)])\n # max_difference = np.max(np.abs(new_offsets))\n # mult_coefficient = max_difference if max_difference != 0.0 else 1.0\n # for i in range(self.base):\n # node.child_nodes[i] = node.child_nodes[0]\n # node.offsets[i] = np.array([((new_offsets[i])/mult_coefficient), mult_coefficient], dtype='float64')\n # return node, [additive_coefficient, mult_coefficient]\n if leaf_values[0] == 0 or (leaf_values[1]-leaf_values[0] < leaf_values[1]/leaf_values[0]):\n parent_node.offsets[0] = np.array([0, 1], dtype='float64')\n for i in range(1, self.base, 1):\n parent_node.child_nodes[i] = parent_node.child_nodes[0]\n parent_node.offsets[i] = np.array([(leaf_values[i]-leaf_values[0]), 1], dtype='float64')\n return parent_node, [leaf_values[0], 1]\n else:\n parent_node.offsets[0] = np.array([1, 1], dtype='float64')\n for i in range(1, self.base, 1):\n parent_node.child_nodes[i] = parent_node.child_nodes[0]\n parent_node.offsets[i] = np.array([leaf_values[i]/leaf_values[0], (leaf_values[i]/leaf_values[0])],\n dtype='float64')\n return parent_node, [0, leaf_values[0]]", "def span(self):\n return self.right - self.left", "def labelRoot(lut, label):\n result = lut[label]\n if lut[result] != result:\n result = labelRoot(lut, result)\n lut[label] = result\n return result", "def a_long_simple_root(self):\n if self.dynkin_diagram().rank() == 1:\n return self.simple_roots()[self.index_set()[0]]\n longest=self.dynkin_diagram().outgoing_edges()[0]\n for j in self.dynkin_diagram().outgoing_edges():\n if j[2]>longest[2]:\n longest=j\n return self.simple_roots()[longest[0]]", "def count_leaf(self):\n if self.is_empty():\n return 0\n elif self.is_leaf():\n return 1\n else:\n if self.get_left():\n if self.get_right():\n return 0 + self.get_left().count_leaf() + self.get_right().count_leaf()\n else:\n return 0 + self.get_left().count_leaf()\n else:\n return 0 + self.get_right().count_leaf()", "def _get_leaf_node_path_suffix(p):\n return _LeafNodePath(_as_root_node_tensor(p.middle[0]), p.middle[1:], p.tail)", "def full_leaf_nums(self) -> Sequence[str | int]:\n if not self._separate_leaves:\n return [\n self.leaf_num,\n ]\n else:\n return [\n f\"{LEFT_MLC_PREFIX}{self.leaf_num}\",\n f\"{RIGHT_MLC_PREFIX}{self.leaf_num}\",\n ]", "def create_interval_hsb(confidence, n_samples, data_point):\n if n_samples == 0:\n return Interval(0, 1)\n delta = margin_hsb(confidence, n_samples, data_point)\n return Interval(float(max(data_point - delta, 0)), float(min(data_point + delta, 1)))", "def findspan(self, u):\n #if u >= self.kv[-self.p-1]:\n # return self.kv.size - self.p - 2 # last interval\n #else:\n # return self.kv.searchsorted(u, side='right') - 1\n return pyx_findspan(self.kv, self.p, u)", "def bucket_boundaries(self, bucket):\n\n if bucket < 0 or bucket >= self.total_buckets:\n raise IndexError('bucket %d out of range' % bucket)\n if bucket == self.total_buckets - 1:\n return (self._lower_bounds[bucket], float('Inf'))\n return (self._lower_bounds[bucket], self._lower_bounds[bucket + 1])", "def _is_leaf(self, index):\r\n return 2*index+1 > self._size - 1", "def isLeaf(self, pos):\n if (self.size // 2) <= pos <= self.size:\n return True\n return False", "def isLeaf(self, pos):\n if (self.size // 2) <= pos <= self.size:\n return True\n return False", "def get_branch( self, target_id ):\n if ( target_id == self.branch_id ):\n return self\n\n if ( self.is_leaf ):\n return None\n\n if ( target_id < self.children[1].branch_id ):\n return self.children[0].get_branch( target_id )\n else:\n return self.children[1].get_branch( target_id )", "def get_leaf_nodes(self):\n leaf_nodes = []\n for i in range(8):\n node = self.children[i]\n if node:\n if node.is_leaf():\n leaf_nodes.append(node)\n else:\n leaf_nodes.extend(node.get_leaf_nodes())\n return leaf_nodes", "def interval(self):\n raise NotImplementedError()", "def CheckLeafType(leaf):\n if leaf not in ['upper', 'lower', 'both']:\n raise ValueError()\n return leaf", "def _recursive_minheight(self, start, end, l, parent):\n\t\t#base case - 0, 1 elements left in the list\n\t\tif (end < start):\n\t\t\treturn None\n\t\tif (end == start):\n\t\t\treturn BTNode(value=l[end], parent = parent)\n\n\t\t#split list in half and returns\n\t\tmid = start + (end-start)/2\n\t\tmidnode = BTNode(value=l[mid], parent=parent)\n\t\tmidnode.setlchild(self._recursive_minheight(start, mid-1, l, midnode))\n\t\tmidnode.setrchild(self._recursive_minheight(mid+1, end, l, midnode))\n\t\treturn midnode", "def _append_subset(self, node, interval):\n if interval.contains(node.interval):\n node.subset.append(interval)\n return None\n if node.left is not None:\n self._append_subset(node.left, interval)\n if node.right is not None:\n self._append_subset(node.right, interval)", "def next_leaf(node):\n return len(node[1][0][1]) == 0", "def _branch(self):\n printer = Printer(None)\n ci_manager = CIManager(printer)\n return ci_manager.get_branch()", "def break_at_integer(interval):\n unproven = []\n lower_ceil = math.ceil(interval.lower) if interval.lower < math.ceil(interval.lower) else interval.lower + 1\n while lower_ceil != math.ceil(interval.upper):\n unproven.append(\n FractionInterval.open_closed(interval.lower, lower_ceil)\n )\n interval = FractionInterval.open_closed(lower_ceil, interval.upper)\n lower_ceil += 1\n if interval:\n unproven.append(interval)\n\n return unproven" ]
[ "0.6567261", "0.6479266", "0.64509547", "0.64048004", "0.6272358", "0.59726596", "0.58964354", "0.58870226", "0.5835878", "0.58030605", "0.578581", "0.5713284", "0.5709979", "0.5675937", "0.56747264", "0.56653345", "0.5579153", "0.5558996", "0.54881215", "0.5477241", "0.5458274", "0.5421156", "0.53981185", "0.5395875", "0.53915757", "0.5369129", "0.5361364", "0.53556687", "0.5351066", "0.5332556", "0.5320417", "0.53001314", "0.5296139", "0.5284007", "0.5270216", "0.5253743", "0.5253413", "0.5236985", "0.5227393", "0.5226646", "0.5197341", "0.519488", "0.5191205", "0.51846963", "0.5161898", "0.515361", "0.5149833", "0.5142969", "0.5142117", "0.5138282", "0.5130803", "0.51304096", "0.5122884", "0.5116176", "0.5113952", "0.511119", "0.51097363", "0.5093629", "0.5092686", "0.5091299", "0.50822544", "0.5079636", "0.50734967", "0.50714564", "0.5065721", "0.50604004", "0.5057602", "0.50567746", "0.50549054", "0.50516075", "0.5043675", "0.5035195", "0.5014826", "0.50130934", "0.5002547", "0.49991593", "0.49900603", "0.49858344", "0.4982645", "0.49778828", "0.49726844", "0.4968795", "0.49676248", "0.49603185", "0.49554223", "0.49521154", "0.4947466", "0.49427873", "0.49376178", "0.4918044", "0.49180153", "0.49180153", "0.4915035", "0.49088228", "0.4902903", "0.4897743", "0.4890345", "0.48885038", "0.4886544", "0.48771924", "0.48743686" ]
0.0
-1
Return the sum of all elements in the tree.
def get_sum(self): return self.__tree[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sum(self) -> int:\n return self.root.sum", "def sum_tree(t):\n \"*** YOUR CODE HERE ***\"\n if is_leaf(t):\n return entry(t)\n total = entry(t)\n for subtree in subtrees(t):\n total += sum_tree(subtree)\n return total", "def sum_of_tree(root_elem):\r\n\tif root_elem is None:\r\n\t\treturn 0\r\n\treturn root_elem.value + sum_of_tree(root_elem.left) + sum_of_tree(root_elem.right)", "def sum(self):\n import numpy as np\n\n # covering zero-matrices\n if self.child_nodes == {}:\n return self.null_value\n\n def sum_rec(node, offset):\n # making sure the node exists\n if not node:\n return 0\n # checking whether the node is a leaf\n elif node.is_leaf():\n return np.sum(node.dtype.to_mat(node, offset))\n else:\n tmp_result = 0\n # the recursive call\n # checking for the kind of diagram. MTxxx?\n if self.offsets == {}:\n for edge_name in node.child_nodes:\n tmp_result += sum_rec(node.child_nodes[edge_name], node.dtype.to_mat(node, 0, 0))\n # or edge-value dd?\n else:\n for edge_name in node.child_nodes:\n tmp_result += sum_rec(node.child_nodes[edge_name], node.dtype.to_mat(node,\n node.offsets[edge_name],\n offset))\n\n return tmp_result\n\n return sum_rec(self, None)", "def total(tree):\n if tree is None:\n return 0\n return total(tree.left) + total(tree.right) + tree.cargo", "def sum(self):\n return self.aggregate(np.sum)", "def calculate_sum(self):\n\n left_sum = self.left.calculate_sum() if self.left else 0\n right_sum = self.right.calculate_sum() if self.right else 0\n return self.data + left_sum + right_sum", "def sum(self):\n return self._reduce_for_stat_function(F.sum, only_numeric=True)", "def sum(self):\n return sum(self.values)", "def total(self):\n return sum(self.meta) + sum(child.total() for child in self.children)", "def sum_elements(arr):\n return sum(arr)", "def sum (self):\n return self.values.sum ()", "def sum (self):\n return self.values.sum ()", "def compute_node_sums(nodes):\n for node in nodes:\n node.children_summed = 0 # Dynamically add a meta field to Node to improve runtime when computing sums.\n\n leaf_nodes = []\n for node in nodes:\n if len(node.children) == 0:\n leaf_nodes.append(node)\n to_process = leaf_nodes\n while to_process:\n node = to_process.pop()\n # if leaf_node or all child notes computed their sum.\n if len(node.children) == 0 or len(node.children) == node.children_summed:\n node.sum = node.value\n if len(node.children) > 0:\n node.sum = node.sum + sum([child.sum for child in list(node.children.values())])\n if node.parent:\n node.parent.children_summed += 1\n if len(\n node.parent.children) == node.parent.children_summed: # all children have computed their sums\n to_process.append(node.parent)\n\n for node in nodes:\n del node.children_summed", "def sum(self) -> float:\n return sum(self.values)", "def __binary_tree_node_sum(self, root, depth=0, node_type=None):\n if root == None:\n return self.node_sum\n multiplication_factor = 1\n if node_type == \"left\" and depth % 2 == 0:\n multiplication_factor = 2\n self.node_sum += depth * multiplication_factor\n self.__binary_tree_node_sum(root.left, depth=depth+1, node_type=\"left\")\n self.__binary_tree_node_sum(root.right, depth= depth+1, node_type=\"right\")\n return self.node_sum", "def _sum(self):\n s = 0\n for element, value in self.items():\n s += value\n return s", "def sum(self):\n return self.vsum", "def sum(self):\n return sum(self.items())", "def sum_node_list(node_list):\r\n from operator import add\r\n from functools import reduce\r\n return reduce(add, node_list)", "def get_sum(self, node: Optional[TreeNode]) -> int:\n if not node:\n return 0\n l_sub_sum, r_sub_sum = self.get_sum(node.left), self.get_sum(node.right)\n self.ans += abs(l_sub_sum - r_sub_sum)\n\n return node.val + l_sub_sum + r_sub_sum", "def sum(self):\n total = 0\n for el in self.__list:\n if type(el) is int or type(el) is float:\n total += el\n elif not el:\n continue\n else:\n total += len(el)\n return total", "def sum_node_list(node_list):\n from operator import add\n from functools import reduce\n return reduce(add, node_list)", "def total_nodes(self)->int:\n\t\tqueue=[]\n\t\tsum=0\n\t\tqueue.append(self)\n\t\twhile(len(queue)>0):\n\t\t\tnode=queue.pop(0)\n\t\t\tsum+=1\n\t\t\tif(node.right!=None):\n\t\t\t\tqueue.append(node.right)\n\t\t\tif(node.left!=None):\n\t\t\t\tqueue.append(node.left)\n\t\treturn sum", "def BinaryTreeNodeDepthSum(self, root):\n return self.__binary_tree_node_sum(root)", "def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)", "def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)", "def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)", "def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)", "def sum(self, start=0, end=None):\n return super(SumSegmentTree, self).reduce(start, end)", "def fn(node):\n if not node: return 0 \n ans = node.val + fn(node.left) + fn(node.right)\n vals.append(ans)\n return ans", "def sum_of_nodes(t):\n return label(t) + sum([sum_of_nodes(b) for b in branches(t)])", "def sum(self):\n return self._summarize(lambda c: c.sum)", "def sum(self):\n return sum(self._values.values())", "def fn(node, x):\n if not node: return x\n x = fn(node.right, x) # sum of right subtree\n x += node.val \n node.val = x\n return fn(node.left, x)", "def getValue(self):\n result = 0.0\n for e in self.children:\n result += e.getValue()\n return result", "def op_sum(self, args):\n sum = 0\n stack_levels = len(self.stack)\n if args != None:\n stack_levels = int(args[0])\n self.require_stack(stack_levels)\n for i in range(0, stack_levels):\n sum += self.stack.pop()\n self.stack.append(sum)", "def summation(self):\n return sum(self.read_ints())", "def sum(self):\n # skipna == True\n # only_numerical == True\n # skipna == True\n return self._lift(\"sum\")", "def get_sum(self, i):\n s = 0\n\n # index in BITree is 1 more than index in arr[]\n i += 1\n\n # Traverse to leaves of BITree[i]:\n while i > 0:\n s += self.BITree[i]\n\n # Move index to parent node (next set bit in binary representation)\n i -= i & (-i)\n\n return s", "def minimum_path_sum(self, root) -> int:\n\n def minimum_path_sum_aux(root, path=None):\n path.append(root.value)\n new_path = path[:]\n\n # Stop condition\n if root.is_leaf():\n return\n else:\n if root.left is not None:\n minimum_path_sum_aux(root.left, path=path)\n elif root.right is not None:\n minimum_path_sum_aux(root.right, path=path)\n\n if root.right is not None and root.left is not None:\n paths.append(new_path)\n minimum_path_sum_aux(root.right, path=new_path)\n\n paths = [[]]\n\n minimum_path_sum_aux(root, path=paths[0])\n return min([sum(path) for path in paths])", "def value(self):\n if self.children == tuple():\n return sum(self.meta)\n total = 0\n for meta in self.meta:\n if 0 < meta <= len(self.children):\n total += self.children[meta-1].value()\n return total", "def summed(L):\r\n result = 0\r\n for e in L:\r\n result = result + e # or result += e\r\n return result", "def sum(matrix):\n\n return float(sum([sum(row) for row in matrix]))", "def getSum(root, level, h):\n if root == None:\n return\n \n h[level] = root.data\n \n getSum(root.left, level+1, h)\n getSum(root.right, level+1, h)", "def get_nested_sum():\n l_int = [1,2,[], 3,[4,[], 5,[6]],[7],[8,9], 10,[[],11]]\n print 'Sum:', nested_sum(l_int) \n return", "def n(self):\n return sum(list(self.nodes.values()))", "def sum_node_list(node_list):\n node_list = [n for n in node_list if n is not None]\n if node_list == []:\n return None\n\n from operator import add\n from functools import reduce\n return reduce(add, node_list)", "def sum(self):\n if self.isscalar():\n s = self.defval\n else:\n if self.defval:\n msg = \"Sum of a tensor wish defval != 0 not implemented.\"\n raise NotImplementedError(msg)\n s = 0\n for v in self.sects.values():\n s += np.sum(v)\n return s", "def total(self):\n return sum(self.d.values())", "def sum(self, values):\n return self.aggregate(values, \"sum\")", "def sum_node_depths(node, current_sum, level):\n # Base case\n if node is None:\n return current_sum\n\n current_sum += level\n current_sum = sum_node_depths(node.left, current_sum, level + 1)\n current_sum = sum_node_depths(node.right, current_sum, level + 1)\n\n return current_sum", "def sum_values(values):\n return (sum(values))", "def fn(node, val):\n if not node: return 0\n val = 10*val + node.val\n if not node.left and not node.right: return val \n return fn(node.left, val) + fn(node.right, val)", "def sum(self) -> FrameLike:\n return super().sum()", "def sum(self) -> FrameLike:\n return super().sum()", "def sum(self) -> FrameLike:\n return super().sum()", "def sum(self) -> FrameLike:\n return super().sum()", "def total(self):\n total = sum(self.d.values())\n return total", "def sum(self):\n return sum(self.times)", "def sum(self):\n return sum(self.times)", "def sum(self):\n return sum(self.times)", "def sum(self, axis=None, keepdims=False):\n return F.Sum.apply(self, axis, keepdims)", "def xpathSumFunction(self, nargs):\n libxml2mod.xmlXPathSumFunction(self._o, nargs)", "def sum_of(self, names):\n vals = self.get_values(names)\n if vals is None:\n return None\n return sum(vals)", "def depthSum(self, nestedList: List[NestedInteger]) -> int:\n final_sum = 0\n def dfs(nlist,depth):\n nonlocal final_sum\n #no base case\n \n #logic\n for ele in nlist:\n if ele.isInteger():\n #add the value to the sum\n final_sum += ele.getInteger() * depth\n else:\n dfs(ele.getList(),depth+1)\n dfs(nestedList,1)\n return final_sum", "def sum_plus(t, init):\n total = init\n for x in t:\n total += x\n return total", "def sum(values):\n total = 0\n for i in values:\n total += i\n return total", "def sum(self):\n return np.dot(self.data.T, self.weights)", "def all_sum(structure, name=None):\n num_replicas = get_num_replicas()\n\n if num_replicas <= 1:\n return structure\n\n tf_replicator = get_tf_replicator()\n if tf_replicator:\n return tf_replicator.all_sum(structure)\n\n elif tf.distribute.has_strategy() and tf.distribute.get_replica_context():\n return tf.distribute.get_replica_context().all_reduce(\n tf.distribute.ReduceOp.SUM, structure)\n\n elif is_tpu_replicated():\n def tpu_all_sum(tensor):\n return tpu_ops.cross_replica_sum(tensor, name=name)\n\n return nest.map_structure(tpu_all_sum, structure)\n\n return structure", "def sum_of_numbers(numbers):\r\n return sum(numbers)", "def sum(self) -> \"Stream[float]\":\n return self.agg(np.sum).astype(\"float\")", "def sum_values(self):\n raise NotImplementedError", "def max_path_sum(root_elem):\r\n\tif root_elem is None:\r\n\t\treturn 0\r\n\tif root_elem.left is None and root_elem.right is None:\r\n\t\treturn root_elem.value\r\n\tmax_child = max(max_path_sum(root_elem.left),max_path_sum(root_elem.left))\r\n\treturn root_elem.value + max_child", "def with_sum_sum_reduction(self):\n return self.with_reduction(lambda x: x.sum())", "def sum(self, start=0, end=None):\n return super().reduce(start, end)", "def recursive_sum(lst):\n\n if lst == []:\n return 0\n\n else:\n\n return lst[0] + recursive_sum(lst[1:])", "def sum(self):\n\n return time_stat(self, stat=\"sum\")", "def sum(lst):\n total = 0\n for i in lst:\n total += i\n return total", "def sum_sum(t, init):\n return sum(t, init)", "def fsum(iterable):\n return 0.0", "def _get_sum(self):\r\n try:\r\n return self._sum\r\n except AttributeError:\r\n self._sum = self.no_nan.sum()\r\n # The following 2 lines are needede as in Python 3.3 with NumPy\r\n # 1.7.1, numpy.ndarray and numpy.memmap aren't hashable.\r\n if type(self._sum) is numpy.memmap:\r\n self._sum = numpy.asarray(self._sum).item()\r\n if self.has_nan and self.no_nan.mask.all():\r\n # In this case the sum is not properly computed by numpy.\r\n self._sum = 0\r\n if numpy.isinf(self._sum) or numpy.isnan(self._sum):\r\n # NaN may happen when there are both -inf and +inf values.\r\n if self.has_nan:\r\n # Filter both NaN and Inf values.\r\n mask = self.no_nan.mask + numpy.isinf(self[1])\r\n else:\r\n # Filter only Inf values.\r\n mask = numpy.isinf(self[1])\r\n if mask.all():\r\n self._sum = 0\r\n else:\r\n self._sum = numpy.ma.masked_array(self[1], mask).sum()\r\n # At this point there should be no more NaN.\r\n assert not numpy.isnan(self._sum)\r\n return self._sum", "def __call__(self):\n return self._left() + self._right()", "def som(getallenlijst):\r\n total = sum(getallenlijst)\r\n return total", "def reduce(self) -> float:\n # Note: Reduction over segments not supported/needed for now.\n return self._tree[1]", "def containNode(self, root, sum):\n if (root is None):\n return 0\n \n res = 0\n if (root.val == sum):\n res += 1\n \n res += self.containNode(root.left, sum - root.val)\n res += self.containNode(root.right, sum - root.val)\n \n return res", "def sum(self, rows: List[Row], column: NumberColumn) -> Number:\n cell_values = [row.values[column.name] for row in rows if row.values[column.name] is not None]\n if not cell_values:\n return 0.0 # type: ignore\n return sum(cell_values) # type: ignore", "def ll_sum(x):\n xlist = []\n for i in x:\n for num in i:\n xlist.append(num)\n return sum(xlist)", "def evaluate_expression_tree(root:Node) -> float:\n if root is None:\n return 0\n if root._left is None and root._right is None:\n return float(root._data)\n left_sum = evaluate_expression_tree(root._left)\n right_sum = evaluate_expression_tree(root._right)\n if root._data == '+':\n return left_sum + right_sum\n elif root._data == '-':\n return left_sum - right_sum\n elif root._data == '*':\n return left_sum * right_sum\n elif root._data == '/':\n return left_sum / right_sum\n elif root._data == '^':\n return left_sum ** right_sum\n else:\n raise ArithmeticError(root._data)", "def calcularTotal(self):\n subtotales=[]\n for row in range(0,self.tableNC.rowCount()):\n subtotales.append(float(self.tableNC.item(row,2).text()))\n return sum(subtotales)", "def calculate_sum_of_all_attributes(self):\n\n sum = 0\n\n for key, val in self.__dict__.items():\n\n if isinstance(val, (int, float)):\n sum += val\n\n return sum", "def leaf_count(T):\n if T.is_leaf:\n return 1\n else:\n# s = 0\n# for child in T:\n# s += leaf_count(child)\n# return s\n return reduce(add, map(leaf_count, T))", "def nodalSum(val,elems,work,avg):\n nodes = unique1d(elems)\n for i in nodes:\n wi = where(elems==i)\n vi = val[wi]\n if avg:\n vi = vi.sum(axis=0)/vi.shape[0]\n else:\n vi = vi.sum(axis=0)\n val[wi] = vi", "def convert_sum(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mx_axis = attrs.get(\"axis\", None)\n axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None\n\n keepdims = get_boolean_attribute_value(attrs, \"keepdims\")\n\n if axes:\n node = onnx.helper.make_node(\n 'ReduceSum',\n inputs=input_nodes,\n outputs=[name],\n axes=axes,\n keepdims=keepdims,\n name=name\n )\n else:\n node = onnx.helper.make_node(\n 'ReduceSum',\n inputs=input_nodes,\n outputs=[name],\n keepdims=keepdims,\n name=name\n )\n return [node]", "def get_sum(lst):\n _sum=0\n for i in lst:\n _sum+=i\n return _sum", "def path_sum(self, node):\n if self.is_leaf(node):\n return node.val, node.val\n if node is None:\n return 0, -2147483648\n\n left, sub1 = self.path_sum(node.left)\n right, sub2 = self.path_sum(node.right)\n left = left if left > 0 else 0\n right = right if right > 0 else 0\n\n if left > right:\n maximum_path = node.val + left\n else:\n maximum_path = node.val + right\n\n sub_result = max(max(sub1, sub2), node.val + left + right)\n return maximum_path, sub_result", "def find_sum(root, desired_sum, level=0, buffer_list=None, result=[]):\n if not buffer_list:\n buffer_list = []\n\n if not root:\n return result\n\n buffer_list.append(root.key)\n temp = desired_sum\n\n for i in range(level, -1, -1):\n temp -= buffer_list[i]\n\n if temp == 0:\n result.append(buffer_list[i:level + 1])\n\n find_sum(root.left, desired_sum, level + 1, buffer_list[:], result)\n find_sum(root.right, desired_sum, level + 1, buffer_list[:], result)\n\n return result", "def getSum2(root, level=0, maxLevel=None, sum=None):\n if root == None:\n return 0\n \n if maxLevel == None:\n maxLevel = [-1]\n sum = [0]\n \n if maxLevel[0] < level:\n sum[0] += root.data\n maxLevel[0] = level\n \n getSum2(root.right, level+1, maxLevel, sum) \n getSum2(root.left , level+1, maxLevel, sum)\n\n if level == 0:\n return sum[0]", "def rowsums (self):\n return self.values.sum (axis=0)", "def fn(node):\n nonlocal ans\n if not node: return 0 \n sm = fn(node.left) + fn(node.right)\n if sm == node.val: ans += 1\n return sm + node.val" ]
[ "0.8006212", "0.7781466", "0.77158576", "0.7709356", "0.7609589", "0.71323264", "0.7114631", "0.70373267", "0.7006873", "0.7004098", "0.68861806", "0.68853563", "0.68853563", "0.68846256", "0.688014", "0.6859513", "0.68298334", "0.6807194", "0.68041164", "0.6771166", "0.6747318", "0.6746709", "0.6745385", "0.674047", "0.67248", "0.6716253", "0.6716253", "0.6716253", "0.6716253", "0.6716253", "0.67139715", "0.6660269", "0.66546464", "0.66519785", "0.6631596", "0.6498494", "0.6471673", "0.64713854", "0.64601815", "0.64583343", "0.6393222", "0.6383669", "0.63628626", "0.63513064", "0.635065", "0.63255364", "0.6315623", "0.6311165", "0.62922996", "0.61794335", "0.61621016", "0.61385083", "0.60749555", "0.60712117", "0.60694355", "0.60694355", "0.60694355", "0.60694355", "0.60683906", "0.6059509", "0.6059509", "0.6059509", "0.60392666", "0.6014046", "0.6013197", "0.6005319", "0.60048205", "0.5957826", "0.5929891", "0.5928412", "0.5918795", "0.5917535", "0.5909767", "0.5903486", "0.590257", "0.5890982", "0.58821267", "0.58674216", "0.58617985", "0.5858568", "0.58578336", "0.58473325", "0.58402896", "0.58371246", "0.58368725", "0.58366877", "0.581317", "0.5797478", "0.578911", "0.5784757", "0.57831204", "0.57831055", "0.57805973", "0.5774807", "0.5752447", "0.574191", "0.57349855", "0.5734586", "0.5726722", "0.5724537" ]
0.755793
5
Receives the idx of a leaf node and updates the sums on all the nodes above it based on its current value.
def __update(self, idx): parent = (idx - 1) // 2 while parent >= 0: left, right = 2 * parent + 1, 2 * parent + 2 self.__tree[parent] = self.__tree[left] + self.__tree[right] parent = (parent - 1) // 2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(\n self, index: Union[int, np.ndarray], value: Union[float, np.ndarray]\n ):\n\n tree_index = self.capacity + index\n self._tree[tree_index] = value\n\n # Propagate up the tree.\n parent = tree_index // 2\n while np.any(parent > 0):\n left = self._tree[2 * parent] # Children/sibling.\n right = self._tree[2 * parent + 1]\n # Note: Due to possible floating point error in the sum-tree case,\n # it's safer to recompute the parent nodes directly rather than to\n # accumulate an \"update\" up the tree which could be faster.\n self._tree[parent] = self.operation(left, right)\n parent = parent // 2", "def set(self, node_index, value):\n if value < 0.0:\n raise ValueError(\n 'Sum tree values should be nonnegative. Got {}'.format(value))\n self.highest_set = max(node_index, self.highest_set)\n node_index = node_index + self.low_idx\n self.max_recorded_priority = max(value, self.max_recorded_priority)\n\n delta_value = value - self.nodes[node_index]\n\n # Now traverse back the tree, adjusting all sums along the way.\n for _ in reversed(range(self.depth)):\n # Note: Adding a delta leads to some tolerable numerical inaccuracies.\n self.nodes[node_index] += delta_value\n node_index = (node_index - 1) // 2\n\n self.nodes[node_index] += delta_value\n assert node_index == 0, ('Sum tree traversal failed, final node index '\n 'is not 0.')", "def update(self, idx, value):\n idx = self.__capacity - 1 + idx\n self.__tree[idx] = value\n self.__update(idx)", "def fn(node, x):\n if not node: return x\n x = fn(node.right, x) # sum of right subtree\n x += node.val \n node.val = x\n return fn(node.left, x)", "def _fix_up_to_root(self, idx):\n combine_fn = self._combine_fn\n while idx >= 1:\n # self.data[idx] = combine_fn(self.data[self._left(idx)], self.data[self._right(idx)])\n self.data[idx] = combine_fn(self.data[2 * idx], self.data[2 * idx + 1])\n # idx = self._parent(idx)\n idx = idx >> 1", "def update(self, leaf_value):\n # Count visit.\n self._n_visits += 1\n # Update Q, a running average of values for all visits. wtf ??? this line is rigth but kind of wired !\n self._Q += 1.0 * (leaf_value - self._Q) / self._n_visits", "def __setitem__(self, idx: int, val: float) -> None:\n assert 0 <= idx < self.capacity, f\"idx={idx} capacity={self.capacity}\"\n\n # Index of the leaf to insert into (always insert in \"second half\"\n # of the tree, the first half is reserved for already calculated\n # reduction-values).\n idx += self.capacity\n self.value[idx] = val\n\n # Recalculate all affected reduction values (in \"first half\" of tree).\n idx = idx >> 1 # Divide by 2 (faster than division).\n while idx >= 1:\n update_idx = 2 * idx # calculate only once\n # Update the reduction value at the correct \"first half\" idx.\n self.value[idx] = self.operation(\n self.value[update_idx], self.value[update_idx + 1]\n )\n idx = idx >> 1 # Divide by 2 (faster than division).", "def update(self, leaf_value,visits_count=1):\n # Count visit.\n self._n_visits += visits_count\n # Update Q, a running average of values for all visits.\n self._Q += 1.0*(leaf_value - self._Q) / self._n_visits\n\n if self.is_root():\n self.last_leafvalue = leaf_value", "def update(self, i, v):\n # index in BTree is 1 more than index in arr[]\n i += 1\n\n # Traverse to ancestors of BITree[i]\n while i <= self.size:\n self.BITree[i] += v\n\n # Update index to next set bit in binary representation\n i += i & (-i)", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value)\n self.update(leaf_value)", "def update(self, leaf_value):\n # Count visit.\n self._n_visits += 1\n # Update Q, a running average of values for all visits.\n self._Q += 1.0*(leaf_value - self._Q) / self._n_visits", "def updateTree(self, i, val, cur):\n start, end = cur.start, cur.end\n if start == end == i:\n cur.val = val\n return\n mid = start+(end-start)/2\n if i <= mid:\n cur.val -= cur.left.val\n self.updateTree(i, val, cur.left)\n cur.val += cur.left.val\n else:\n cur.val -= cur.right.val\n self.updateTree(i, val, cur.right)\n cur.val += cur.right.val", "def update(self, leaf_value):\n # Count visit.\n self._n_visits += 1\n # Update Q, a running average of values for all visits.\n self._Q += 1.0 * (leaf_value - self._Q) / self._n_visits", "def update(self, leaf_value):\n # Count visit.\n self._n_visits += 1\n # Update Q, a running average of values for all visits.", "def fn(node):\n if not node: return 0 \n ans = node.val + fn(node.left) + fn(node.right)\n vals.append(ans)\n return ans", "def include_final_offset(node, offset):\n for leaf in node.leaves:\n leaf.value = leaf.value * offset", "def update_recursive(self, leaf_value):\n # If it is not root, this node's parent should be updated first.\n if self._parent:\n self._parent.update_recursive(-leaf_value) # - leaf_value because the MCTS tree is a max-min tree\n self.update(leaf_value)", "def sum_tree(t):\n \"*** YOUR CODE HERE ***\"\n if is_leaf(t):\n return entry(t)\n total = entry(t)\n for subtree in subtrees(t):\n total += sum_tree(subtree)\n return total", "def update_tree(root, executed_acts, total_rew):\n root.value = max(total_rew, root.value)\n root.visits += 1\n new_nodes = 0\n\n node = root\n for step, act in enumerate(executed_acts):\n if act not in node.children:\n node.children[act] = Node()\n new_nodes += 1\n node = node.children[act]\n node.value = max(total_rew, node.value)\n node.visits += 1\n\n return new_nodes", "def find_prefixsum_idx(self, prefixsum):\n if isinstance(prefixsum, float):\n prefixsum = np.array([prefixsum])\n assert 0 <= np.min(prefixsum)\n assert np.max(prefixsum) <= self.sum() + 1e-5\n assert isinstance(prefixsum[0], float)\n\n idx = np.ones(len(prefixsum), dtype=int)\n cont = np.ones(len(prefixsum), dtype=bool)\n\n while np.any(cont): # while not all nodes are leafs\n idx[cont] = 2 * idx[cont]\n prefixsum_new = np.where(\n self._value[idx] <= prefixsum, prefixsum - self._value[idx], prefixsum\n )\n # prepare update of prefixsum for all right children\n idx = np.where(\n np.logical_or(self._value[idx] > prefixsum, np.logical_not(cont)),\n idx,\n idx + 1,\n )\n # Select child node for non-leaf nodes\n prefixsum = prefixsum_new\n # update prefixsum\n cont = idx < self._capacity\n # collect leafs\n return idx - self._capacity", "def include_final_offset(node, offset):\n for leaf in node.leaves:\n leaf.value = leaf.value + offset", "def include_final_offset(node, offset):\n if offset != 0.0:\n for leaf in node.leaves:\n leaf.value = leaf.value + offset", "def update(self, index: int, x: int):\n index += self.n2\n self.tree[index] = self.binary(self.tree[index], x)\n while index > 1:\n # (index ^ 1) はiと1の排他的論理和(XOR)\n x = self.binary(x, self.tree[index ^ 1])\n index >>= 1 # 右ビットシフトで親ノードのインデックスへ移動\n self.tree[index] = self.binary(self.tree[index], x)", "def get(self, subtree_sum):\n idx = 0\n while True:\n # if idx is a leaf node return the idx and the value\n if idx >= self.__capacity - 1:\n return (idx - self.__capacity + 1, self.__tree[idx])\n\n # else continue down\n left = 2 * idx + 1\n right = 2 * idx + 2\n left_sum = self.__tree[left]\n if left_sum >= subtree_sum:\n idx = left\n else:\n idx = right\n subtree_sum -= left_sum", "def fn(node, val):\n if not node: return 0\n val = 10*val + node.val\n if not node.left and not node.right: return val \n return fn(node.left, val) + fn(node.right, val)", "def sum(self) -> int:\n return self.root.sum", "def getSum(root, level, h):\n if root == None:\n return\n \n h[level] = root.data\n \n getSum(root.left, level+1, h)\n getSum(root.right, level+1, h)", "def find_sum(root, desired_sum, level=0, buffer_list=None, result=[]):\n if not buffer_list:\n buffer_list = []\n\n if not root:\n return result\n\n buffer_list.append(root.key)\n temp = desired_sum\n\n for i in range(level, -1, -1):\n temp -= buffer_list[i]\n\n if temp == 0:\n result.append(buffer_list[i:level + 1])\n\n find_sum(root.left, desired_sum, level + 1, buffer_list[:], result)\n find_sum(root.right, desired_sum, level + 1, buffer_list[:], result)\n\n return result", "def heap_update(self):\n print 'SumTree pre-update:', self.replay.tree[0].sum\n last_ixs = self.replay.last_ixs(True)\n while True:\n if len(last_ixs) == 0:\n break\n if len(last_ixs) < 10000:\n ixs = last_ixs\n last_ixs = []\n else:\n ixs = last_ixs[:10000]\n last_ixs = last_ixs[10000:]\n batch = [self.replay.tree[ix].pointer for ix in ixs]\n delta = self.get_delta(batch)\n self.get_p_weights(delta, batch, ixs)\n print 'SumTree post-update:', self.replay.tree[0].sum\n print 'SumTree updated'", "def dist_from_root(self, index):\n if index == 0:\n return 0\n return self.dist_from_root(self.parent(index)) + 1", "def add(self, idx):\n # add the cost\n self.g += self.graph[self.visited[-1], idx]\n # add the to the visited place and remove from the unvisited places\n self.visited.append(idx)\n self.not_visited.remove(idx)", "def iter_leaf_idx(self):\n def leaf_idx(tree, total):\n total[0] += 1\n for elem in tree[1:]:\n if isinstance(elem, Tree):\n for elem2 in leaf_idx(elem, total):\n yield total[0]\n else:\n yield total[0]\n total[0] += 1\n return leaf_idx(self, [0])", "def index_tree(tree, offset):\n for idx, _ in enumerate(tree.leaves()):\n tree_location = tree.leaf_treeposition(idx)\n non_terminal = tree[tree_location[:-1]]\n # non_terminal.append(idx)\n non_terminal[0] = (non_terminal[0], idx+offset)\n return tree", "def sum_value(self, lv, rv):", "def value(d,o):\n # return memoized value if possible\n if (d,o) in v:\n return v[(d,o)]\n\n thisitem = int(t[d][o])\n # the total of a subtree that starts at the leaf, is just the value of the leaf\n if d == maxdepth:\n val = thisitem\n else:\n val = thisitem + max(value(d+1,o),value(d+1,o+1))\n\n v[(d,o)]=val\n return val", "def nodalSum(val,elems,work,avg):\n nodes = unique1d(elems)\n for i in nodes:\n wi = where(elems==i)\n vi = val[wi]\n if avg:\n vi = vi.sum(axis=0)/vi.shape[0]\n else:\n vi = vi.sum(axis=0)\n val[wi] = vi", "def _update_value_at(self, index, value):\n node = self._get_node_at(index)\n if node is None:\n raise IndexError('List index out of range.')\n node.value = value", "def sumRangeTree2(self, i, j, cur):\n if i > j:\n return 0\n start, end = cur.start, cur.end\n if i == start and j == end:\n return cur.val\n mid = start+(end-start)/2\n return self.sumRangeTree(i, min(j, mid), cur.left) + self.sumRangeTree(max(mid+1, i), j, cur.right)", "def sum_of_nodes(t):\n return label(t) + sum([sum_of_nodes(b) for b in branches(t)])", "def compute_node_sums(nodes):\n for node in nodes:\n node.children_summed = 0 # Dynamically add a meta field to Node to improve runtime when computing sums.\n\n leaf_nodes = []\n for node in nodes:\n if len(node.children) == 0:\n leaf_nodes.append(node)\n to_process = leaf_nodes\n while to_process:\n node = to_process.pop()\n # if leaf_node or all child notes computed their sum.\n if len(node.children) == 0 or len(node.children) == node.children_summed:\n node.sum = node.value\n if len(node.children) > 0:\n node.sum = node.sum + sum([child.sum for child in list(node.children.values())])\n if node.parent:\n node.parent.children_summed += 1\n if len(\n node.parent.children) == node.parent.children_summed: # all children have computed their sums\n to_process.append(node.parent)\n\n for node in nodes:\n del node.children_summed", "def fn(node):\n nonlocal ans\n if not node: return 0 \n sm = fn(node.left) + fn(node.right)\n if sm == node.val: ans += 1\n return sm + node.val", "def helper(node: TreeNode, cur: int, target: int):\n if not node:\n return\n \n cur += node.val\n \n if cur == target:\n self.output += 1\n \n helper(node.left, cur, target)\n helper(node.right, cur, target)", "def update_point(tree, y, x, diff):\n max_y = len(tree)\n max_x = len(tree[0])\n x_orig = x\n\n while y < max_y:\n x = x_orig\n while x < max_x:\n tree[y][x] += diff\n x += (x & -x)\n y += (y & -y)", "def add(self, i: int, v: int) -> None:\n while i < self.size:\n self.tree[i] += v\n i += self._lsb(i)", "def increment_node_index(self):\n self.node_index += 1", "def update(self, idx, add):\n idx += 1\n while idx < len(self.array):\n self.array[idx] += add\n idx += idx & -idx #Adding the last bit", "def get_sum(self, i):\n s = 0\n\n # index in BITree is 1 more than index in arr[]\n i += 1\n\n # Traverse to leaves of BITree[i]:\n while i > 0:\n s += self.BITree[i]\n\n # Move index to parent node (next set bit in binary representation)\n i -= i & (-i)\n\n return s", "def sum_node_depths(node, current_sum, level):\n # Base case\n if node is None:\n return current_sum\n\n current_sum += level\n current_sum = sum_node_depths(node.left, current_sum, level + 1)\n current_sum = sum_node_depths(node.right, current_sum, level + 1)\n\n return current_sum", "def apply(self, f):\n if self.is_empty():\n return 0\n else:\n self.get_root().value = f(self.get_root().value)\n if self.get_left():\n self.get_left().apply(f)\n if self.get_right():\n self.get_right().apply(f)", "def fn(node):\n if not node: return 0 \n ans = node.val + fn(node.left) + fn(node.right)\n freq[ans] += 1\n return ans", "def backpropagate(self, search_path, value):\n\n for node in search_path:\n node.n_visits += 1\n node.n_a[node.action_taken] += 1 \n # Incremental mean calculation\n node.q_a[node.action_taken] = (node.q_a[node.action_taken] * \n (node.n_visits - 1) + value) / \\\n node.n_visits", "def _update_node(node, value):\n node.N += 1\n node.W += value\n node.Q = node.W / node.N", "def sum_of_tree(root_elem):\r\n\tif root_elem is None:\r\n\t\treturn 0\r\n\treturn root_elem.value + sum_of_tree(root_elem.left) + sum_of_tree(root_elem.right)", "def __binary_tree_node_sum(self, root, depth=0, node_type=None):\n if root == None:\n return self.node_sum\n multiplication_factor = 1\n if node_type == \"left\" and depth % 2 == 0:\n multiplication_factor = 2\n self.node_sum += depth * multiplication_factor\n self.__binary_tree_node_sum(root.left, depth=depth+1, node_type=\"left\")\n self.__binary_tree_node_sum(root.right, depth= depth+1, node_type=\"right\")\n return self.node_sum", "def setSubtreeDF(self, index, subtree):\n if index == 0:\n try:\n self[:] = subtree\n except TypeError:\n del self[1:]\n self[0] = subtree\n return\n \n total = 0\n for i, child in enumerate(self):\n if total == index:\n self[i] = subtree\n return\n nbr_child = child.size\n if nbr_child + total > index:\n child.setSubtreeDF(index-total, subtree)\n return\n total += nbr_child", "def total(tree):\n if tree is None:\n return 0\n return total(tree.left) + total(tree.right) + tree.cargo", "def getSum2(root, level=0, maxLevel=None, sum=None):\n if root == None:\n return 0\n \n if maxLevel == None:\n maxLevel = [-1]\n sum = [0]\n \n if maxLevel[0] < level:\n sum[0] += root.data\n maxLevel[0] = level\n \n getSum2(root.right, level+1, maxLevel, sum) \n getSum2(root.left , level+1, maxLevel, sum)\n\n if level == 0:\n return sum[0]", "def add(self, idx):\n self.g += graph[self.visited[-1], self.not_visited[idx]]\n self.visited.append(self.not_visited.pop(idx))\n if len(self.not_visited) > 0:\n self.h = minimum_spanning_arborescence(self)\n else:\n self.h = 0", "def draw_leaf_value(self, idx_data_points):\n R_j = self.get_residuals()[idx_data_points]\n draw = self.mean(R_j)\n return draw", "def update_priority(self, indexes, values):\n values = values * 10000\n values = self._clip_p(values)\n values = int(values)\n self.sum_tree.update(indexes, values)", "def calc_weight(base):\n return weights[base] + sum([calc_weight(i) for i in leafs[base]])", "def containNode(self, root, sum):\n if (root is None):\n return 0\n \n res = 0\n if (root.val == sum):\n res += 1\n \n res += self.containNode(root.left, sum - root.val)\n res += self.containNode(root.right, sum - root.val)\n \n return res", "def increment(self):\n if self.is_empty():\n return 0\n else:\n self.get_root().value += 1\n if self.get_left():\n self.get_left().increment()\n if self.get_right():\n self.get_right().increment()", "def rangeSumBST(self, root: TreeNode, lo: int, hi: int) -> int:\n\n def visit(node: TreeNode) -> int:\n if not node:\n return 0\n if node.val < lo:\n return visit(node.right)\n elif hi < node.val:\n return visit(node.left)\n else:\n return node.val + visit(node.left) + visit(node.right)\n return visit(root)", "def __getitem__(self, index):\n node = self.head\n index += 1\n for level in reversed(range(self.max_levels)):\n while node.width[level] <= index:\n index -= node.width[level]\n node = node.next[level]\n return node.value", "def _update_accumulation(self, index, grad):\n self.accumulation[index] = self.accumulation[index] + grad**2", "def leafScore(self) :\n return 0", "def __adjust_leaf_indices(self, pattern: Pattern):\n leaf_mapping = {}\n # update the leaves\n for leaf in self.get_leaves():\n current_index = leaf.get_leaf_index()\n correct_index = pattern.get_index_by_event_name(leaf.get_event_name())\n leaf.set_leaf_index(correct_index)\n leaf_mapping[current_index] = correct_index\n # update the event definitions in the internal nodes\n # note that it is enough to only update the root since it contains all the event definition objects\n for event_def in self.__root.get_event_definitions():\n event_def.index = leaf_mapping[event_def.index]", "def update(self, value, index):\n\n length = self.get_length()\n if type(index) is int:\n if index > length:\n # The index value is out of range and prompts and exits\n print(\"Index is out of range.\")\n return\n else:\n this_node = Node(data=value)\n if index == 0:\n this_node.next = self.head.next\n this_node.prev =None\n self.head = this_node\n else:\n cur = self.head\n while index - 1:\n cur = cur.next\n index -= 1\n this_node.next = cur.next.next\n this_node.prev = cur.next.prev\n cur.next = this_node\n return\n else:\n print(\"Index value is not int.\")\n return", "def simple_root(self, i):", "def increment_node_value(self, val: Union[int, float], i: int, j: int,\n key: str = 'target') -> None:\n if key not in self._dist:\n raise KeyError('key parameter must be a valid distribution: ' +\n '[\\'initial\\', \\'current\\', \\'target\\']')\n\n elif not self._status['target'] or not self._status['initial']:\n raise ValueError(\n 'No initial or target distribution has been defined.')\n\n mat_i, mat_j = to_matrix(self._param['n_v'], np.array([i, j]))\n self._dist[key][mat_i, mat_j] += val * self._param['size_fraction']", "def update(self, loss, samples_id) -> None:\n self.sum_tree.bulk_update(torch.abs(loss + 1e-6).pow(self.alpha).cpu(), samples_id)", "def assignBinValue(logBin, sortedIdx, nodes, p):\n numRemained = len(nodes) # number of nodes not yet assigned\n binValue = 0\n start = 0\n while numRemained > 0:\n # number of nodes with the lowest value\n numLowest = int(math.ceil(p * numRemained))\n # assign bin value for nodes in (start, end)\n end = start + numLowest\n for i in range(start, end):\n node = nodes[sortedIdx[i]]\n logBin[node].append(binValue)\n start = end\n numRemained -= numLowest\n binValue += 1", "def nodalSum2(val,elems,tol):\n nodes = unique1d(elems)\n for i in nodes:\n wi = where(elems==i)\n vi = val[wi]\n ai,ni = average_close(vi,tol=tol)\n ai /= ni.reshape(ai.shape[0],-1)\n val[wi] = ai", "def update_root_statistics_and_totals(self):\n\n self.average_radius = 0\n self.total_root_length = 0\n\n total_radius = 0\n\n for root in self.root_dict.values():\n\n root.calculate_root_statistics()\n\n self.total_root_length += root.total_length\n\n total_radius += root.total_length * root.average_radius\n\n self.average_radius = total_radius / self.total_root_length", "def backup_nodes(self, value: float, backup_until=None):\n current = self.current\n parent = current.parent\n sum_from_leaf = value\n\n while parent is not backup_until:\n parent.visits += 1\n sum_from_leaf += current.score - parent.score\n\n # average rewards\n parent.reward += (sum_from_leaf - parent.reward) / parent.visits\n current, parent = parent, parent.parent", "def _prefix_sum(self, i: int) -> int:\n pref_sum = 0\n while i > 0:\n pref_sum += self.tree[i]\n i &= ~self._lsb(i) # Equivalent to i -= _lsb(i)\n \n return pref_sum", "def update_value(x, y, x_prev, y_prev, matrix_dict):\n to_add = matrix_dict[(x_prev, y_prev)]['accumulated_value']\n if to_add is None:\n to_add = matrix_dict[(x_prev, y_prev)]['value']\n values_sum = matrix_dict[(x, y)]['value'] + to_add\n\n # if no accumulated value is present in the cell yet, update it\n if matrix_dict[(x, y)]['accumulated_value'] is None:\n matrix_dict[(x, y)]['accumulated_value'] = values_sum\n matrix_dict[(x, y)]['parent'] = (x_prev, y_prev)\n return True\n # else, update only if the new accumulated value would be lower\n else:\n if values_sum < matrix_dict[(x, y)]['accumulated_value']:\n matrix_dict[(x, y)]['accumulated_value'] = values_sum\n matrix_dict[(x, y)]['parent'] = (x_prev, y_prev)\n return True\n return False", "def update_totals(self):\n # Reset counts to 0\n self.total_f = self.total_s = self.total_intra = self.total_mac_regular = self.total_mac_infected = \\\n self.total_mac_activated = self.total_regular_fast = self.total_regular_slow = self.total_infected_fast = \\\n self.total_infected_slow = self.total_activated_fast = self.total_activated_slow = self.total_f_degree = \\\n self.total_s_degree = self.total_activation = 0\n self.total_f_o2 = self.total_s_o2 = 0.0\n\n for node in self.node_list.values():\n # Get values from node\n fast_in_node = node.subpopulations[BACTERIA_FAST]\n slow_in_node = node.subpopulations[BACTERIA_SLOW]\n intra_in_node = node.subpopulations[BACTERIA_INTRACELLULAR]\n reg_mac_in_node = node.subpopulations[MACROPHAGE_REGULAR]\n inf_mac_in_node = node.subpopulations[MACROPHAGE_INFECTED]\n act_mac_in_node = node.subpopulations[MACROPHAGE_ACTIVATED]\n degree = node.degree\n o2_tens = node.oxygen_tension\n # Update relevant totals\n self.total_f += fast_in_node\n self.total_s += slow_in_node\n self.total_intra += intra_in_node\n self.total_mac_regular += reg_mac_in_node\n self.total_mac_infected += inf_mac_in_node\n self.total_mac_activated += act_mac_in_node\n self.total_regular_fast += fast_in_node * reg_mac_in_node\n self.total_regular_slow += slow_in_node * reg_mac_in_node\n self.total_infected_fast += fast_in_node * inf_mac_in_node\n self.total_infected_slow += slow_in_node * inf_mac_in_node\n self.total_activated_fast += fast_in_node * act_mac_in_node\n self.total_activated_slow += slow_in_node * act_mac_in_node\n # TODO - check usage of degree\n self.total_f_degree += fast_in_node * degree\n self.total_s_degree += slow_in_node * degree\n self.total_f_o2 += fast_in_node * (1/o2_tens)\n self.total_s_o2 += slow_in_node * o2_tens\n self.total_activation += reg_mac_in_node * inf_mac_in_node", "def visit_level(t: Tree, level: int, act: Callable[[Tree], None]) -> int:\n if t.value is None:\n pass\n elif level == 0:\n # This is like a leaf -> return 1 when we act on a tree.\n act(t)\n return 1\n else:\n # We aren't at the level that we want, so we need to progressively\n # decrease by 1 level each time we go to the next level.\n return sum(visit_level(s, level - 1, act) for s in t.children)\n\n # or...\n # if t.value is None:\n # pass\n # elif level == 0:\n # act(t)\n # return 1\n # else:\n # sum = 0\n # for subtree in t.children:\n # sum += visit_level(subtree, level - 1, act)\n # return sum", "def update(self, idx, x):\n while idx < len(self.bit):\n self.bit[idx] += x\n idx |= idx + 1", "def update(self, x, factor=0):\n if x is self.nil:\n return\n\n if x.balance > 1 or x.balance < -1:\n self.rebalance(x)\n return\n if x is self.root:\n return\n\n if x is x.parent.left:\n x.parent.balance += factor\n elif x is x.parent.right:\n x.parent.balance -= factor\n\n if x.parent.balance != 0:\n self.update(x.parent, factor)", "def sum(self):\n import numpy as np\n\n # covering zero-matrices\n if self.child_nodes == {}:\n return self.null_value\n\n def sum_rec(node, offset):\n # making sure the node exists\n if not node:\n return 0\n # checking whether the node is a leaf\n elif node.is_leaf():\n return np.sum(node.dtype.to_mat(node, offset))\n else:\n tmp_result = 0\n # the recursive call\n # checking for the kind of diagram. MTxxx?\n if self.offsets == {}:\n for edge_name in node.child_nodes:\n tmp_result += sum_rec(node.child_nodes[edge_name], node.dtype.to_mat(node, 0, 0))\n # or edge-value dd?\n else:\n for edge_name in node.child_nodes:\n tmp_result += sum_rec(node.child_nodes[edge_name], node.dtype.to_mat(node,\n node.offsets[edge_name],\n offset))\n\n return tmp_result\n\n return sum_rec(self, None)", "def __init__(self, nums):\n self.nums,self.n =nums,len(nums)\n \n # self.sums is the sume of self value and ++lowbit values.\n # ++lowbit would be larger sibling or parent++ larger sibling.\n self.sums =[0]*(self.n+1)\n for i in xrange(self.n):\n self.add(i+1,nums[i]) # update self.sums", "def get_sum(self, node: Optional[TreeNode]) -> int:\n if not node:\n return 0\n l_sub_sum, r_sub_sum = self.get_sum(node.left), self.get_sum(node.right)\n self.ans += abs(l_sub_sum - r_sub_sum)\n\n return node.val + l_sub_sum + r_sub_sum", "def set_val(self, k, a):\n k += self.n - 1\n self.dat[k] = a\n while k > 0:\n k = (k - 1) // 2 # parent\n self.dat[k] = self.op(self.dat[k * 2 + 1], self.dat[k * 2 + 2])", "def addAtIndex(self, index: int, val: int) -> None:\n if 0 < index < self.node_count:\n prev_neighbor = self.get_node(index - 1)\n next_neighbor = prev_neighbor.next\n temp_node = MyListNode(val)\n prev_neighbor.next = temp_node\n temp_node.next = next_neighbor\n temp_node.prev = prev_neighbor\n next_neighbor.prev = temp_node\n self.node_count += 1\n elif index == 0:\n self.addAtHead(val)\n #self.node_count += 1\n elif index == self.node_count:\n self.addAtTail(val)\n #self.node_count += 1", "def update(self, tree_path, value):\n\t\traise NotImplementedError", "def update_nodes(self):\n\n L = self.level\n P = L.prob\n\n # only if the level has been touched before\n assert L.status.unlocked\n\n # get number of collocation nodes for easier access\n M = self.coll.num_nodes\n\n # gather all terms which are known already (e.g. from the previous iteration)\n # this corresponds to u0 + QF(u^k) - QdF(u^k) + tau\n\n # get QF(u^k)\n integral = self.integrate()\n for m in range(M):\n # get -QdF(u^k)_m\n for j in range(1, M + 1):\n integral[m] -= L.dt * self.QI[m + 1, j] * L.f[j]\n\n # add initial value\n integral[m] += L.u[0]\n # add tau if associated\n if L.tau[m] is not None:\n integral[m] += L.tau[m]\n\n # do the sweep\n for m in range(0, M):\n # build rhs, consisting of the known values from above and new values from previous nodes (at k+1)\n rhs = P.dtype_u(integral[m])\n for j in range(1, m + 1):\n rhs += L.dt * self.QI[m + 1, j] * L.f[j]\n\n # implicit solve with prefactor stemming from the diagonal of Qd\n L.u[m + 1] = P.solve_system(\n rhs, L.dt * self.QI[m + 1, m + 1], L.u[m + 1], L.time + L.dt * self.coll.nodes[m]\n )\n # update function values\n L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m])\n\n # indicate presence of new values at this level\n L.status.updated = True\n\n return None", "def leaf_count(T):\n if T.is_leaf:\n return 1\n else:\n# s = 0\n# for child in T:\n# s += leaf_count(child)\n# return s\n return reduce(add, map(leaf_count, T))", "def minimum_path_sum(self, root) -> int:\n\n def minimum_path_sum_aux(root, path=None):\n path.append(root.value)\n new_path = path[:]\n\n # Stop condition\n if root.is_leaf():\n return\n else:\n if root.left is not None:\n minimum_path_sum_aux(root.left, path=path)\n elif root.right is not None:\n minimum_path_sum_aux(root.right, path=path)\n\n if root.right is not None and root.left is not None:\n paths.append(new_path)\n minimum_path_sum_aux(root.right, path=new_path)\n\n paths = [[]]\n\n minimum_path_sum_aux(root, path=paths[0])\n return min([sum(path) for path in paths])", "def increment(self, index, value):\n self._inrange(index)\n if value==0:\n return\n found,ii = self._find_index(index)\n if found:\n self.value[ii] += value\n if self.value[ii] == 0:\n del self.index[ii]\n del self.value[ii]\n else:\n self.index.insert(ii, index)\n self.value.insert(ii, value)", "def get_sum(self):\n return self.__tree[0]", "def value_at(self, index):\n if index==0:\n return self.head.val\n\n temp_node = self.head\n for _ in range(index):\n temp_node = temp_node.next\n return temp_node.val", "def update_attr(self):\n\n # Retrieve all current values\n all_values = nx.get_node_attributes(self.G, 'value')\n\n new_values = {}\n\n # Loop over all nodes\n for i in range(self.n_v):\n\n # Obtain list of neighbors\n neighbors = list(nx.all_neighbors(self.G, i))\n\n # Compute part dependent on own node\n val_i = all_values[i]\n new_value = (1 - self.eps) * (1 - self.a * val_i * val_i)\n\n # Compute part dependent on neighbor nodes\n neighbors_value = 0\n for neighbor in neighbors:\n val_n = all_values[neighbor]\n neighbors_value += (1 - self.a * val_n * val_n)\n\n # Catch nodes without neighbors\n try:\n new_value += neighbors_value * (self.eps/len(neighbors))\n except ZeroDivisionError:\n pass\n\n # Save new value\n new_values[i] = {'value': new_value}\n\n nx.set_node_attributes(self.G, new_values)", "def addAtIndex(self, index, val):\n if index > 0 and not self.head:\n return\n \n tmp = Node(val)\n if index == 0 and not self.head:\n self.head = tmp\n self.tail = self.head\n return\n if index == 0 and self.head:\n tmp.nxt = self.head\n self.head = tmp \n return\n \n \n cur = self.head\n i = 1\n while i < index and cur:\n cur = cur.nxt\n i+=1\n if i == index:\n if not cur:\n if self.tail:\n self.tail.nxt = tmp\n self.tail = tmp\n else:\n self.head = tmp\n self.tail = tmp\n# print(\"KMG 1\")\n else:\n# print(\"inserting after the value %d\" %cur.val)\n tmp.nxt = cur.nxt\n cur.nxt = tmp\n if self.tail == cur:\n self.tail = tmp", "def right_child_idx(idx):\n return (idx + 1) << 1", "def recursive_sum(lst):\n\n if lst == []:\n return 0\n\n else:\n\n return lst[0] + recursive_sum(lst[1:])" ]
[ "0.70218706", "0.6763772", "0.65720445", "0.6464258", "0.634624", "0.62440336", "0.62352777", "0.6203399", "0.6152715", "0.61511225", "0.61511225", "0.61511225", "0.61307853", "0.6130343", "0.6128592", "0.61171615", "0.6116346", "0.60709554", "0.60432565", "0.60112834", "0.59804213", "0.5979022", "0.59768355", "0.5935433", "0.58912987", "0.5850242", "0.58430946", "0.5835884", "0.5799235", "0.5785629", "0.57015866", "0.5688131", "0.5675977", "0.56494683", "0.5640734", "0.56334865", "0.5615012", "0.56042475", "0.5600265", "0.55939347", "0.5572122", "0.5568287", "0.5565791", "0.5544772", "0.5536994", "0.553172", "0.5531517", "0.55221397", "0.55192703", "0.5501605", "0.5497963", "0.54840875", "0.54664385", "0.54661995", "0.54648995", "0.5458413", "0.5452185", "0.54518586", "0.54420483", "0.543496", "0.54288584", "0.5393533", "0.5393509", "0.53871036", "0.5380236", "0.5359151", "0.5351089", "0.53501225", "0.53480506", "0.5329477", "0.532425", "0.53242064", "0.531769", "0.53033525", "0.5271471", "0.5255981", "0.5226566", "0.5226548", "0.5224268", "0.5220988", "0.52171034", "0.5206211", "0.5185057", "0.51845783", "0.51766664", "0.51762086", "0.51732767", "0.5167954", "0.51661", "0.51558673", "0.51549655", "0.5111906", "0.5111246", "0.51078254", "0.510046", "0.50941765", "0.50913054", "0.5086276", "0.5083782", "0.5081434" ]
0.75952435
0
Given an arbitrary number of functions we create a pipeline where the output is piped between functions. you can also specify a tuple of arguments that should be passed to functions in the pipeline. The first arg is always the output of the previous function.
def PipeLine(*funcs, **kwargs): def wrapper(*data): if len(funcs) == 1: combinedArgs = data + kwargs.get(funcs[-1].__name__, tuple()) return funcs[-1](combinedArgs) else: combinedArgs = kwargs.get(funcs[-1].__name__, tuple()) if combinedArgs != (): del kwargs[funcs[-1].__name__] return funcs[-1](PipeLine(*funcs[:-1], **kwargs)(*data), *combinedArgs) return wrapper
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pipe(*functions):\n\n return reduce(compose, functions, identity)", "def make_pipeline(steps):\n def compose2(f, g):\n return lambda x: g(f(x))\n return functools.reduce(compose2, steps)", "def tee_pipe(*funcs: Tuple[Callable[[GT], GS], ...]) -> Callable[[GT], GT]:\n\n piped = compose(*funcs)\n\n def _tee_pipe(arr):\n a, b = itertools.tee(arr)\n piped(a)\n return b\n\n return _tee_pipe", "def pipeline(\n first: Callable[[Any], Any],\n second: Callable[[Any], Any],\n *rest: Callable[[Any], Any]\n) -> Callable[[Any], Any]:\n return compose(*reversed(rest), second, first)", "def pipeline(self, *funcs) -> \"fn\":\n return self._mod.pipeline(self, *funcs)", "def ReducePipeline(*funcs, **kwargs):\n def accum(val, func):\n funcArgs = kwargs.get(func.__name__, tuple())\n if hasattr(val, \"__call__\"):\n return func(val(), *funcArgs)\n else:\n return func(val, *funcArgs)\n\n def wrapper(*data):\n newFuncs = (partial(funcs[0], *data),) + funcs[1:]\n return reduce(accum, newFuncs)\n return wrapper", "def pipeline(args) :\n from pipeliner import create_pipeline\n create_pipeline(args)", "def m_pipe(val, *fns, **kwargs):\n kw = kwargs\n _val = val\n for fn in fns:\n _val = fn(_val, **kw)\n return _val", "def compose(*functions):\n head, *tail = functions\n return head if not tail else lambda *args, **kwargs: head(compose(*tail)(*args, **kwargs))", "def compose1(*functions: _ComposeArg[_T]) -> _Transform[_T]:\n def composition(arg, **kwargs):\n for f in reversed(functions):\n if isinstance(f, tuple):\n f, kws = f\n arg = f(arg, **{kw: kwargs[kw] for kw in kws})\n else:\n arg = f(arg)\n return arg\n return composition", "def compose(*funcs):\n # return lambda x: reduce(lambda v, f: f(v), funcs, x)\n if funcs:\n return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)\n else:\n raise ValueError('Composition of empty sequence not supported.')", "def compose(*fns):\n return functools.reduce(lambda f,g: lambda x: f(g(x)), fns)", "def compose(*funcs):\n # return lambda x: reduce(lambda v, f: f(v), funcs, x)\n if funcs:\n return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)\n else:\n raise ValueError(\"Composition of empty sequence not supported.\")", "def compose(*funcs):\n if not funcs:\n return identity\n\n def wrapper(*args, **kwargs):\n fst, *rest = funcs\n ret = fst(*args, **kwargs)\n\n for f in rest:\n ret = f(ret)\n\n return ret\n\n return wrapper", "def compose(*funcs):\n if funcs:\n return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)\n else:\n raise ValueError('Composition of empty sequence not supported.')", "def compose(*fs) -> Callable:\n return lambda x: reduce(flip(funcall), reversed(fs), x)", "def compose(*functions):\n return functools.reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x)", "def chain_funcs(funcs):\n return lambda x: reduce(lambda f1, f2: f2(f1), funcs, x)", "def _pipe_and_accumulate(val, fns):\n for fn in fns:\n val = fn(val)\n yield val", "def compose(*funcs):\n return reduce(lambda f, g: lambda x: f(g(x)), funcs[::-1])", "def compose(*fns):\n\n if len(fns) == 0:\n raise ValueError(\"At least one function must be provided\")\n\n def composite(*args):\n x = fns[-1](*args)\n for fn in reversed(fns[0:-1]):\n x = fn(x)\n return x\n\n return composite", "def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:\n\n if isinstance(func, tuple):\n func, target = func\n if target in kwargs:\n raise ValueError(\"%s is both the pipe target and a keyword \" \"argument\" % target)\n kwargs[target] = self\n return func(*args, **kwargs)\n else:\n return func(self, *args, **kwargs)", "def compose(*funcs):\n def _compose(g, f):\n return lambda *args, **kwargs: g(f(*args, **kwargs))\n return reduce(_compose, funcs)", "def pipeline(filters):\n pipe = partial(reduce, lambda acc, f: f(acc), filters)\n bil = bilateral()\n\n def procme(img):\n img = bil(img)\n return pipe(img)\n\n return lambda img: map(procme, [img[:, :, 0], img[:, :, 1], img[:, :, 2]])", "def compose(*funcs: Callable[[T], T]) -> Callable[[T], T]:\n return functools.reduce(lambda g, f: lambda x: f(g(x)), funcs, lambda x: x)", "def compose(*fns):\n import functools\n\n def _apply(x, f):\n if isinstance(x, tuple):\n return f(*x)\n else:\n return f(x)\n\n def comp(*args):\n return functools.reduce(_apply, fns, args)\n\n return comp", "def composition(func_list):\n return reduce(\n lambda (f1, args1), (f2, args2) : (lambda x : f1(f2(x, *args2), *args1)), \n func_list,\n lambda x : x\n )", "def compose(*funcs):\n if not funcs:\n return identity\n else:\n f0 = funcs[0]\n def composed(_):\n # f_1 o f_2 o ... o f_n\n pre_composed = compose(*funcs[1:])\n return f0(pre_composed(_))\n return composed", "def compose_many(*fs):\n return reduce(compose, fs)", "def lift(f: Callable[..., Data]) -> LiftedFunc:\n def inner(*args: Result) -> Result:\n out = []\n for args1 in itertools.product(*args):\n val = f(*args1)\n out.append(val)\n return out\n return inner", "def mult_out(func):\n\n def multiple_output(*args, **kwargs):\n return func(*args, **kwargs)\n\n return multiple_output", "def sequential(*layers: tp.Callable[..., tp.Any]) -> tp.Callable[..., tp.Any]:\n\n def call(inputs, *args, **kwargs):\n\n out = inputs\n for i, layer in enumerate(layers):\n if i == 0:\n out = layer(out, *args, **kwargs)\n else:\n out = layer(out)\n\n if not isinstance(layer, module.Module):\n if hooks.summaries_active():\n name = utils.get_name(layer)\n\n path = module.get_module_path()\n path = path if path is not None else ()\n\n hooks.add_summary(path + (name,), layer, out)\n return out\n\n return call", "def run_pipeline(functionlist, article, **kwargs):\n nerror = 0\n if kwargs.get(\"debug\"):\n print(\"DEBUG: running pipeline on \", article['id'], file=sys.stderr)\n for func in functionlist:\n ret = func(article, **kwargs)\n if ret:\n nerror += 1\n return nerror", "def compose(*funcs: Tuple[Callable[[GT], GS], ...]) -> Callable[[GT], GS]:\n\n def _compose(data):\n result = data\n for i, f in enumerate(funcs):\n\n def _composition_part(r):\n try:\n return f(result)\n except Exception as e:\n raise e\n msg = str(e)\n msg += \"\\nduring composition:\"\n msg += \"\\n ({}) f: {}\".format(i, f)\n msg += \"\\n args: {} {}\".format(result, result.__class__)\n raise type(e)(msg) from e\n\n result = _composition_part(result)\n return result\n\n return _compose", "def delay_pipeline(pipeline, pipe):\n _pipeline = delayed(pipeline[0].curry())(pipe)\n for task in pipeline[1:]:\n _pipeline = delayed(task.curry())(_pipeline)\n\n return _pipeline", "def pipe(self, func, *args, **kwargs):\n return func(self, *args, **kwargs)", "def multichannel(fcn):\n return lambda args: (fcn(*args), )", "def compose(x, funcs, *args, order=\"_order\", **kwargs):\n key = lambda x: getattr(x, order, 0)\n for func in sorted(listify(funcs), key=key):\n x = func(x, *args, **kwargs)\n return x", "def pipe(self, func: Callable, *args, **kwargs) -> Any:\n return func(self, *args, **kwargs)", "def chain_layer_functions(input_layer, functions):\n return reduce(lambda layer, func: func(layer), functions, input_layer)", "def chain(*streams):\n return Stream(itertools.chain(*streams))", "def compose(transforms):\n assert isinstance(transforms, list)\n for transform in transforms:\n assert callable(transform), \"list of functions expected\"\n\n def composition(obj):\n \"Composite function\"\n for transform in transforms:\n obj = transform(obj)\n return obj\n return composition", "def _convert_from_function_io_to_pipeline_io(self, function_io: List[entities.FunctionIO]) -> List[PipelineNodeIO]:\n pipeline_io = []\n for single_input in function_io:\n pipeline_io.append(\n PipelineNodeIO(port_id=str(uuid.uuid4()),\n input_type=single_input.type,\n name=single_input.name,\n color=None,\n display_name=single_input.name,\n default_value=single_input.value,\n actions=single_input.actions if single_input.actions is not None else []))\n return pipeline_io", "def compose(\n f: Callable[[Any], Any],\n g: Callable[[Any], Any],\n *functions: Callable[[Any], Any]\n) -> Callable[[Any], Any]:\n fs: Tuple[Callable, ...] = ()\n for h in (f, g) + functions:\n if isinstance(h, Composition):\n fs += h.functions\n else:\n fs += (h,)\n return Composition(fs)", "def __call__(self, *pipeline_factories, exceptions=None, wait=True):\n return self.run(*pipeline_factories, exceptions=exceptions, wait=wait)", "def pipe(*args):\n # Bail out early if there is only one item\n if len(args) == 1:\n return Graph(args)\n\n graph = Graph()\n graph._pipe(args)\n return graph", "def as_args(function):\n return lambda x: function(*x)", "def mcompose(*mfuncs):\n return functools.partial(foldl, bind, tuple(reversed(mfuncs)))", "def map_functions(x, functions):\n res = []\n for func in functions:\n res.append(func(x))\n return res", "def example_function(*args, **kwargs):\n logging.info('example_function executed with args: %r, kwargs: %r',\n args, kwargs)\n return l_combiner(args)", "def pipeline(func):\n @wraps(func)\n def process(img_or_iterable, *args, **kwargs):\n if isinstance(img_or_iterable, (SliceableIterable, FramesSequence)):\n _len = len(img_or_iterable)\n s = SliceableIterable(img_or_iterable, range(_len), _len)\n s._proc_func = lambda image: func(image, *args, **kwargs)\n return s\n else:\n # Fall back on normal behavior of func, interpreting input\n # as a single image.\n return func(img_or_iterable)\n\n if process.__doc__ is None:\n process.__doc__ = ''\n process.__doc__ = (\"This function has been made pims-aware. When passed\\n\"\n \"a pims reader or SliceableIterable, it will return a \\n\"\n \"new SliceableIterable of the results. When passed \\n\"\n \"other objects, its behavior is \"\n \"unchanged.\\n\\n\") + process.__doc__\n return process", "def transform(stuff, pipelines=DEFAULT_PIPELINE_NAMES):\n global _pipelines\n for name in pipelines:\n p = load_pipeline(name)\n stuff = p.transform(stuff)\n return stuff", "def compose(inner, *others):\n for i, func in enumerate((inner,)+others):\n assert(callable(func)), \"function #{0} is not callable.\".format(i)\n \n accumulator = inner\n for func in others:\n accumulator = _compose(accumulator, func) \n return accumulator", "def compose1(f, g):\n return lambda x: f(g(x))", "def multimap(funcs, iterable):\n\n for f in funcs:\n iterable = map(f, iterable)\n\n return iterable", "def make_product(*functions: Callable, exponents: Optional[Sequence] = None) -> Callable:\n if exponents is None:\n exponents = np.ones(shape=(len(functions)))\n else:\n assert len(functions) == len(exponents), 'the length of exponents must be the ' \\\n 'same as the number of given functions'\n\n def product_function(*args, **kwargs):\n return np.prod([functions[i](*args, **kwargs)**exponents[i]\n for i in range(len(exponents))], axis=0)\n\n return product_function", "def multi_apply(func, *args, **kwargs):\n\n pfunc = partial(func, **kwargs) if kwargs else func\n map_results = map(pfunc, *args)\n return tuple(map(list, zip(*map_results)))", "def pipe_line(input_stream: collections.Iterable, transformers=[], items_count=-1,\r\n chunk_size=10 ** 5) -> collections.Iterable:\r\n output_stream = input_stream\r\n dw = DataWrap(output_stream, items_count=items_count)\r\n for transformer in transformers:\r\n if hasattr(transformer, 'transform_chunks_stream'):\r\n if not dw.is_stream_chunkified:\r\n dw.chunkify(chunk_size)\r\n transform_function = transformer.transform_chunks_stream\r\n else:\r\n if dw.is_stream_chunkified:\r\n dw.dechunkify()\r\n\r\n if hasattr(transformer, 'transform'):\r\n transform_function = transformer.transform\r\n else:\r\n transform_function = transformer\r\n\r\n dw.data_stream = transform_function(dw.data_stream)\r\n # output_stream_copy, output_stream = tee(output_stream)\r\n # pass_consumer(output_stream_copy)\r\n\r\n dw.dechunkify()\r\n return dw.data_stream", "def compose(self, *funcs) -> \"fn\":\n return self._mod.compose(self, *funcs)", "def pipeline(ctx):\n asyncio.run(pipeline_impl(ctx.obj[\"config\"]))", "def filter_compose(*fns: T.Callable[[T.Any], bool]):\n def composite(x):\n for f in fns:\n if not f(x):\n return False\n return True\n\n return composite", "def map_functions(x, functions):\n res = []\n for func in functions:\n res.append(map(func,x))\n return res", "def _apply_in_order(functions, input_data):\n return reduce(lambda res, func: func(res), functions, input_data)", "def pipeline(inputfile, outputfile, functions, **kwargs):\n if not functions:\n raise ValueError('Must have at least one function')\n\n tmpfiles = []\n try:\n previous = inputfile\n for name, f in functions:\n logging.debug(name)\n vrt = f(previous)\n current = vrt.get_tempfile(suffix='.vrt', prefix='gdal')\n tmpfiles.append(current)\n previous = current.name\n logging.info('Rendering reprojected image')\n return vrt.render(outputfile=outputfile, **kwargs)\n finally:\n for f in tmpfiles:\n f.close()", "def merge_n_reduce(\n function: typing.Callable, arity: int, data: list\n) -> typing.Any:\n while len(data) > 1:\n data_chunk = data[:arity]\n data = data[arity:]\n data.append(function(*data_chunk))\n return data[0]", "def pipe(self, *nodes):\n if len(nodes) < 2:\n raise ValueError(\"Provide at least two nodes to connect\")\n\n self._pipe(nodes)\n\n return self", "def runner(func, iterable, arguments, local=False):\n if local:\n return [func(i, *arguments) for i in iterable]\n else:\n if iterable:\n return group(func.s(i, *arguments) for i in iterable)().get()\n else:\n # group()() returns None if group is called with no arguments,\n # leading to an AttributeError with get().\n return []", "def compose(f, g):\n return lambda *args, **kwargs: f(g(*args, **kwargs))", "def test_invoke_anonymous_pipe():\n\n def processor_a(app, items):\n items = list(items)\n assert items == [holocron.Item({\"a\": \"b\"})]\n items[0][\"x\"] = 42\n yield from items\n\n def processor_b(app, items):\n items = list(items)\n assert items == [holocron.Item({\"a\": \"b\", \"x\": 42})]\n items.append(holocron.Item({\"z\": 13}))\n yield from items\n\n def processor_c(app, items):\n items = list(items)\n assert items == [\n holocron.Item({\"a\": \"b\", \"x\": 42}),\n holocron.Item({\"z\": 13}),\n ]\n yield from items\n\n testapp = holocron.Application()\n testapp.add_processor(\"processor_a\", processor_a)\n testapp.add_processor(\"processor_b\", processor_b)\n testapp.add_processor(\"processor_c\", processor_c)\n\n stream = testapp.invoke(\n [\n {\"name\": \"processor_a\"},\n {\"name\": \"processor_b\"},\n {\"name\": \"processor_c\"},\n ],\n [holocron.Item({\"a\": \"b\"})],\n )\n\n assert next(stream) == holocron.Item({\"a\": \"b\", \"x\": 42})\n assert next(stream) == holocron.Item({\"z\": 13})\n\n with pytest.raises(StopIteration):\n next(stream)", "def compute_over_actions(f, *args):\n\n '''\n # show the middle results\n for a in zip(*args):\n print(\"a:\", a)\n r = f(*a)\n print(\"r:\", r)\n '''\n\n return sum(f(*a) for a in zip(*args))", "def map_multi_args(self, func, iterable, chunksize=None):\n assert self._state == RUN\n return self.map_async(one_to_many(func), iterable, chunksize).get()", "def pipe(*args, **kwargs):\n return parser(*args, **kwargs)", "def filtern(func: Callable, iterable: Iterable):\n return next(filter(func, iterable))", "def test_generate_pipeline_code():\n pipeline = ['KNeighborsClassifier',\n ['CombineDFs',\n ['GradientBoostingClassifier',\n 'input_matrix',\n 38.0,\n 0.87],\n ['GaussianNB',\n ['ZeroCount',\n 'input_matrix']]],\n 18,\n 33]\n\n expected_code = \"\"\"make_pipeline(\n make_union(\n make_union(VotingClassifier([('branch',\n GradientBoostingClassifier(learning_rate=1.0, max_features=1.0, n_estimators=500)\n )]), FunctionTransformer(lambda X: X)),\n make_union(VotingClassifier([('branch',\n make_pipeline(\n ZeroCount(),\n GaussianNB()\n )\n )]), FunctionTransformer(lambda X: X))\n ),\n KNeighborsClassifier(n_neighbors=5, weights=\"distance\")\n)\"\"\"\n\n assert expected_code == generate_pipeline_code(pipeline)", "def _for_each_generator(self,\n func: Callable[..., Any],\n *args: Iterable[Any]) -> List[Any]:\n return [func(gen, *args_for_func) for gen, args_for_func in zip(\n self._generators, zip(*args))]", "def apply(self, *input_):\n result = None\n for function in reversed(self._functions):\n if result is None:\n result = function(*input_)\n else:\n result = function(result)\n return result", "def trace_pipeline(pipe):\n _patch_multi_exec_execute(pipe)", "def _worker(pipelines: List[Pipeline], source: Queue, sink: Queue):\n pipelines = list(pipelines)\n for i, p in enumerate(pipelines):\n if isinstance(p, ConvertT2S):\n pipelines[i] = ConvertT2S()\n\n def processor(article):\n for p in pipelines:\n article = p(article)\n return article\n\n while True:\n article = source.get()\n if article == 'EXIT':\n return\n article = list(processor(article))\n sink.put(article)", "def chained(func):\n def wrapper(*args, **kwargs):\n for xs in func(*args, **kwargs):\n for x in xs:\n yield x\n return wrapper", "def make_pipeline():\n # exchange = Fundamentals.exchange_id.latest\n # nyse_filter = exchange.eq('NYS')\n symbol_filter = StaticSids([TRADING_SID])\n set_benchmark(TRADING_SID) \n # volume_filter = VolumeFilter(\n # inputs=[USEquityPricing.volume],\n # window_length=1,\n # mask=symbol_filter\n # )\n\n # is_setup = volume_filter & alpha_long_weekly & alpha_long_daily\n weekly_high = WeeklyHigh(\n inputs=[USEquityPricing.high],\n mask=symbol_filter\n )\n weekly_low = WeeklyLow(\n inputs=[USEquityPricing.low],\n mask=symbol_filter\n )\n weekly_classifier = WeeklyClassifier(\n inputs=[\n USEquityPricing.open,\n USEquityPricing.high,\n USEquityPricing.low,\n USEquityPricing.close\n ],\n mask=symbol_filter\n )\n daily_classifier = DailyClassifier(\n inputs=[\n USEquityPricing.open,\n USEquityPricing.high,\n USEquityPricing.low,\n USEquityPricing.close\n ],\n mask=symbol_filter\n\n )\n\n pipe = Pipeline(\n screen=symbol_filter, # & (daily_classifier > 0),\n columns={\n 'daily_classifier': daily_classifier,\n 'daily_high': USEquityPricing.high.latest,\n 'daily_low': USEquityPricing.low.latest,\n 'weekly_classifier': weekly_classifier,\n 'weekly_high': weekly_high,\n 'weekly_low': weekly_low\n }\n )\n return pipe", "def _make_pipeline(preprocessors, classifier):\n if isinstance(preprocessors, list):\n # support only preprocessing of lenght 2\n return make_pipeline(preprocessors[0], preprocessors[1], classifier)\n if preprocessors is None:\n return make_pipeline(classifier)\n\n return make_pipeline(preprocessors, classifier)", "def construct(args,\n **kwargs):\n kw = parse_args(args)\n kw.update(kwargs)\n return (build_pipeline(**kw),\n kw)", "def lift(func: Callable) -> Callable:\n return lambda f: compose2(func, f)", "def func_caller(*funcs):\n\n def caller():\n \"\"\"\n Call each of `funcs`.\n \"\"\"\n\n for func in funcs:\n func()\n\n return caller", "def chain(*readers):\n\n def reader():\n rs = []\n for r in readers:\n rs.append(r())\n\n for e in itertools.chain(*rs):\n yield e\n\n return reader", "def construct_pipelines(config):\n\n\n def _get_argument_combinations(arguments):\n \"\"\" Utility to function to obtain all permutations of preprocessing arguments. \"\"\"\n arg_names = sorted(arguments)\n combinations = itertools.product(*(arguments[arg] for arg in arg_names))\n combinations = [dict(zip(arg_names, arg_values)) for arg_values in combinations]\n return combinations\n\n options = {}\n for key in config.keys():\n # 1. Check if we got also_skip\n if 'also_skip' in config[key] and config[key]['also_skip']:\n config[key].pop('also_skip')\n options[key] = _get_argument_combinations(config[key])\n options[key].append(None)\n else:\n options[key] = _get_argument_combinations(config[key])\n\n return _get_argument_combinations(options)", "def filter_f(fns, ltaper, lowerp, upperp, utaper, eqband, eqltaper, equtaper, npow, bindir):\n # filtercmd = bindir+\"/filter4\"\n filtercmd = bindir + \"/filter4 1>/dev/null\"\n for src, tar, eqtar in fns:\n p = sp.Popen(filtercmd, shell=True, bufsize=0, stdin=sp.PIPE, stdout=None)\n child = p.stdin\n print >> child, ltaper, lowerp, upperp, utaper, npow, src, tar + '_tmp'\n err = child.close()\n ret = p.wait()\n if err or ret != 0:\n raise RuntimeError, '%r failed with exit code %d' % (filtercmd, err)\n p = sp.Popen(filtercmd, shell=True, bufsize=0, stdin=sp.PIPE, stdout=None)\n child = p.stdin\n print >> child, eqltaper, eqband[0], eqband[1], equtaper, npow, tar + '_tmp', eqtar + '_tmp'\n err = child.close()\n ret = p.wait()\n if err or ret != 0:\n raise RuntimeError, '%r failed with exit code %d' % (filtercmd, err)\n return 1", "def compose1(f, g):\n def fn(x):\n return f(g(x));\n return fn;", "def demoChained():\n \n class ChainingSOFP(StreamingOutputFormattingProcess):\n def onStdoutLine(self, tag, line):\n super().onStdoutLine(tag, \"[{}] {}\".format(tag, line.decode()).encode())\n\n def onStderrLine(self, tag, line):\n super().onStderrLine(tag, \"[{}] {}\".format(tag, line.decode()).encode())\n if tag == \"main\" and b\"trigger\" in line:\n cmdChained = \"bash -c 'echo chained stdout && sleep 2 && echo stderr chained 1>&2 && sleep 1 && echo done chained'\"\n self.run(cmdChained, \"chained\")\n\n cmdInitial = \"bash -c 'echo stdout && sleep 1 && echo trigger chained 1>&2 && sleep 1 && echo more output && sleep 1 && echo done && exit 3'\"\n Spec = ChainingSOFP.OutputSpec\n sofp = ChainingSOFP(Spec(\"STDOUT: {}\"), Spec(\"STDERR: {}\", sys.stderr))\n status = sofp.run(cmdInitial, \"main\")\n print(\"Initial finished with status: {}.\".format(status))", "def tuple2func(func1, func2):\n return lambda e: (func1(e), func2(e))", "def two():\n return lambda f: lambda x: f(f(x))", "def apply_composition_function(x, functions):\n data_type = type(x)\n if len(functions) == 1:\n return data_type(map(functions[0], x))\n else:\n return data_type(map(functions[0], apply_composition_function(x, functions[1:])))", "def fission_pipes():\n def _pipes(num):\n return [base.BasePipe(i) for i in range(1, num + 1)]\n yield _pipes\n base.reset()", "def mapcat(f, seqs):\n return concat(map(f, seqs))", "def sync_filter(func, *iterables):\n return tuple(zip(*tuple(i for i in zip(*iterables) if func(*i)))) or ((),) * len(\n iterables\n )", "def compose(*ops):\n if len(ops) == 0:\n return [0, 1, 2, 3, 4, 5, 6, 7]\n if len(ops) == 1:\n return ops[0]\n if len(ops) == 2:\n op1, op2 = ops\n return [op2[op1[v]] for v in range(8)]\n op1 = ops[0]\n rest = ops[1:]\n return compose(op1, compose(*rest))", "def get_pipeline(features, to_matrix=True):\n feature_names = []\n for feature in features:\n feature_names += feature[1].FEATS\n if to_matrix:\n return Pipeline(features + [('transform', ToMatrix(features=feature_names)), ('norm', MinMaxScaler())])\n else:\n return Pipeline(features)", "def compose1(f, g):\n def h(x):\n return f(g(x))\n return h", "def test_pipe_simple():\n\n def transform(array):\n \"\"\"Turns the (n,2) array into a (n,4) array.\"\"\"\n assert array.shape == (10, 2)\n new = Array(columns=\"abcd\")\n for x, y in array:\n new.append([x, y, x + y, x * y])\n return new\n\n group = Pipe(Group({\"a\": Numerical(), \"b\": Numerical()}), transform)\n for _ in range(10):\n group.set_a(1e-6 + random())\n group.set_b(1e-6 + random())\n group.push()\n\n array = group.array()\n assert array.shape == (10, 4)\n\n for row in array:\n assert row[0] > 0.0 and row[1] > 0.0\n assert row[2] == row[0] + row[1]\n assert row[3] == row[0] * row[1]", "def map(self, func, *sequences):\n return self.mapper().map(func, *sequences)" ]
[ "0.8024059", "0.7620117", "0.74363697", "0.7264736", "0.71780145", "0.6964314", "0.68261856", "0.67939293", "0.6666394", "0.6643853", "0.66330147", "0.66295445", "0.6628641", "0.6607316", "0.6598455", "0.6594299", "0.6561477", "0.6514276", "0.6458189", "0.64448345", "0.6443687", "0.64284945", "0.6418923", "0.639723", "0.6334634", "0.6291856", "0.61759025", "0.61742944", "0.6152304", "0.60802", "0.6001598", "0.59778386", "0.59643453", "0.5947646", "0.594448", "0.59374225", "0.59183747", "0.5901098", "0.58605206", "0.5834769", "0.58265316", "0.57989573", "0.57564586", "0.5754449", "0.5733305", "0.5705631", "0.56490207", "0.5647777", "0.56175095", "0.55963165", "0.55795556", "0.5574567", "0.5517032", "0.5510109", "0.55061877", "0.5496314", "0.54797673", "0.54703724", "0.54666", "0.5446357", "0.54364145", "0.5431218", "0.543082", "0.54154813", "0.5411291", "0.54024726", "0.5395325", "0.5393743", "0.53933173", "0.5365167", "0.53593147", "0.5350373", "0.53135604", "0.53038365", "0.5298095", "0.52970016", "0.5296998", "0.52954257", "0.5286439", "0.52850556", "0.52847904", "0.52826524", "0.5267868", "0.5267249", "0.52634656", "0.52629036", "0.52584845", "0.52490205", "0.5225086", "0.5219629", "0.52129865", "0.52116907", "0.52077603", "0.5198036", "0.5191138", "0.5185073", "0.5174989", "0.515299", "0.5146749", "0.51436615" ]
0.74768734
2
Given an arbitrary number of functions we create a pipeline where the output is piped between functions. You can also specify a tuple of arguments that should be passed to the functions in the pipeline. The first argument is always the output of the previous function. This version uses the reduce builtin instead of using recursion.
def ReducePipeline(*funcs, **kwargs): def accum(val, func): funcArgs = kwargs.get(func.__name__, tuple()) if hasattr(val, "__call__"): return func(val(), *funcArgs) else: return func(val, *funcArgs) def wrapper(*data): newFuncs = (partial(funcs[0], *data),) + funcs[1:] return reduce(accum, newFuncs) return wrapper
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pipe(*functions):\n\n return reduce(compose, functions, identity)", "def make_pipeline(steps):\n def compose2(f, g):\n return lambda x: g(f(x))\n return functools.reduce(compose2, steps)", "def compose(*fns):\n return functools.reduce(lambda f,g: lambda x: f(g(x)), fns)", "def compose(*functions):\n return functools.reduce(lambda f, g: lambda x: f(g(x)), functions, lambda x: x)", "def chain_funcs(funcs):\n return lambda x: reduce(lambda f1, f2: f2(f1), funcs, x)", "def compose(*fs) -> Callable:\n return lambda x: reduce(flip(funcall), reversed(fs), x)", "def tee_pipe(*funcs: Tuple[Callable[[GT], GS], ...]) -> Callable[[GT], GT]:\n\n piped = compose(*funcs)\n\n def _tee_pipe(arr):\n a, b = itertools.tee(arr)\n piped(a)\n return b\n\n return _tee_pipe", "def compose(*funcs: Callable[[T], T]) -> Callable[[T], T]:\n return functools.reduce(lambda g, f: lambda x: f(g(x)), funcs, lambda x: x)", "def compose(*funcs):\n def _compose(g, f):\n return lambda *args, **kwargs: g(f(*args, **kwargs))\n return reduce(_compose, funcs)", "def compose(*funcs):\n # return lambda x: reduce(lambda v, f: f(v), funcs, x)\n if funcs:\n return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)\n else:\n raise ValueError(\"Composition of empty sequence not supported.\")", "def compose(*funcs):\n # return lambda x: reduce(lambda v, f: f(v), funcs, x)\n if funcs:\n return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)\n else:\n raise ValueError('Composition of empty sequence not supported.')", "def compose_many(*fs):\n return reduce(compose, fs)", "def compose(*funcs):\n if funcs:\n return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)\n else:\n raise ValueError('Composition of empty sequence not supported.')", "def compose(*funcs):\n return reduce(lambda f, g: lambda x: f(g(x)), funcs[::-1])", "def compose(*fns):\n import functools\n\n def _apply(x, f):\n if isinstance(x, tuple):\n return f(*x)\n else:\n return f(x)\n\n def comp(*args):\n return functools.reduce(_apply, fns, args)\n\n return comp", "def composition(func_list):\n return reduce(\n lambda (f1, args1), (f2, args2) : (lambda x : f1(f2(x, *args2), *args1)), \n func_list,\n lambda x : x\n )", "def pipeline(\n first: Callable[[Any], Any],\n second: Callable[[Any], Any],\n *rest: Callable[[Any], Any]\n) -> Callable[[Any], Any]:\n return compose(*reversed(rest), second, first)", "def compose(*functions):\n head, *tail = functions\n return head if not tail else lambda *args, **kwargs: head(compose(*tail)(*args, **kwargs))", "def pipeline(self, *funcs) -> \"fn\":\n return self._mod.pipeline(self, *funcs)", "def PipeLine(*funcs, **kwargs):\n def wrapper(*data):\n if len(funcs) == 1:\n combinedArgs = data + kwargs.get(funcs[-1].__name__, tuple())\n return funcs[-1](combinedArgs)\n else:\n combinedArgs = kwargs.get(funcs[-1].__name__, tuple())\n if combinedArgs != ():\n del kwargs[funcs[-1].__name__]\n return funcs[-1](PipeLine(*funcs[:-1], **kwargs)(*data), *combinedArgs)\n return wrapper", "def _pipe_and_accumulate(val, fns):\n for fn in fns:\n val = fn(val)\n yield val", "def compose(*funcs):\n if not funcs:\n return identity\n\n def wrapper(*args, **kwargs):\n fst, *rest = funcs\n ret = fst(*args, **kwargs)\n\n for f in rest:\n ret = f(ret)\n\n return ret\n\n return wrapper", "def pipeline(filters):\n pipe = partial(reduce, lambda acc, f: f(acc), filters)\n bil = bilateral()\n\n def procme(img):\n img = bil(img)\n return pipe(img)\n\n return lambda img: map(procme, [img[:, :, 0], img[:, :, 1], img[:, :, 2]])", "def compose(*fns):\n\n if len(fns) == 0:\n raise ValueError(\"At least one function must be provided\")\n\n def composite(*args):\n x = fns[-1](*args)\n for fn in reversed(fns[0:-1]):\n x = fn(x)\n return x\n\n return composite", "def compose1(*functions: _ComposeArg[_T]) -> _Transform[_T]:\n def composition(arg, **kwargs):\n for f in reversed(functions):\n if isinstance(f, tuple):\n f, kws = f\n arg = f(arg, **{kw: kwargs[kw] for kw in kws})\n else:\n arg = f(arg)\n return arg\n return composition", "def m_pipe(val, *fns, **kwargs):\n kw = kwargs\n _val = val\n for fn in fns:\n _val = fn(_val, **kw)\n return _val", "def compose(*funcs):\n if not funcs:\n return identity\n else:\n f0 = funcs[0]\n def composed(_):\n # f_1 o f_2 o ... o f_n\n pre_composed = compose(*funcs[1:])\n return f0(pre_composed(_))\n return composed", "def reducer(functions, init_value):\n return reduce(lambda res, func: func(res), functions, init_value)", "def merge_n_reduce(\n function: typing.Callable, arity: int, data: list\n) -> typing.Any:\n while len(data) > 1:\n data_chunk = data[:arity]\n data = data[arity:]\n data.append(function(*data_chunk))\n return data[0]", "def chain_layer_functions(input_layer, functions):\n return reduce(lambda layer, func: func(layer), functions, input_layer)", "def compose(inner, *others):\n for i, func in enumerate((inner,)+others):\n assert(callable(func)), \"function #{0} is not callable.\".format(i)\n \n accumulator = inner\n for func in others:\n accumulator = _compose(accumulator, func) \n return accumulator", "def compose(*funcs: Tuple[Callable[[GT], GS], ...]) -> Callable[[GT], GS]:\n\n def _compose(data):\n result = data\n for i, f in enumerate(funcs):\n\n def _composition_part(r):\n try:\n return f(result)\n except Exception as e:\n raise e\n msg = str(e)\n msg += \"\\nduring composition:\"\n msg += \"\\n ({}) f: {}\".format(i, f)\n msg += \"\\n args: {} {}\".format(result, result.__class__)\n raise type(e)(msg) from e\n\n result = _composition_part(result)\n return result\n\n return _compose", "def lift(f: Callable[..., Data]) -> LiftedFunc:\n def inner(*args: Result) -> Result:\n out = []\n for args1 in itertools.product(*args):\n val = f(*args1)\n out.append(val)\n return out\n return inner", "def mcompose(*mfuncs):\n return functools.partial(foldl, bind, tuple(reversed(mfuncs)))", "def compose(\n f: Callable[[Any], Any],\n g: Callable[[Any], Any],\n *functions: Callable[[Any], Any]\n) -> Callable[[Any], Any]:\n fs: Tuple[Callable, ...] = ()\n for h in (f, g) + functions:\n if isinstance(h, Composition):\n fs += h.functions\n else:\n fs += (h,)\n return Composition(fs)", "def compose(x, funcs, *args, order=\"_order\", **kwargs):\n key = lambda x: getattr(x, order, 0)\n for func in sorted(listify(funcs), key=key):\n x = func(x, *args, **kwargs)\n return x", "def _apply_in_order(functions, input_data):\n return reduce(lambda res, func: func(res), functions, input_data)", "def multimap(funcs, iterable):\n\n for f in funcs:\n iterable = map(f, iterable)\n\n return iterable", "def chain(*streams):\n return Stream(itertools.chain(*streams))", "def compose(*ops):\n if len(ops) == 0:\n return [0, 1, 2, 3, 4, 5, 6, 7]\n if len(ops) == 1:\n return ops[0]\n if len(ops) == 2:\n op1, op2 = ops\n return [op2[op1[v]] for v in range(8)]\n op1 = ops[0]\n rest = ops[1:]\n return compose(op1, compose(*rest))", "def make_product(*functions: Callable, exponents: Optional[Sequence] = None) -> Callable:\n if exponents is None:\n exponents = np.ones(shape=(len(functions)))\n else:\n assert len(functions) == len(exponents), 'the length of exponents must be the ' \\\n 'same as the number of given functions'\n\n def product_function(*args, **kwargs):\n return np.prod([functions[i](*args, **kwargs)**exponents[i]\n for i in range(len(exponents))], axis=0)\n\n return product_function", "def compose1(f, g):\n return lambda x: f(g(x))", "def pipeline(args) :\n from pipeliner import create_pipeline\n create_pipeline(args)", "def pipe(self, func: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:\n\n if isinstance(func, tuple):\n func, target = func\n if target in kwargs:\n raise ValueError(\"%s is both the pipe target and a keyword \" \"argument\" % target)\n kwargs[target] = self\n return func(*args, **kwargs)\n else:\n return func(self, *args, **kwargs)", "def rule_sequence(sequence, rules):\n\n def the_func(acc, _next):\n return _next(acc)\n\n return functools.reduce(the_func, rules, sequence)", "def compose(transforms):\n assert isinstance(transforms, list)\n for transform in transforms:\n assert callable(transform), \"list of functions expected\"\n\n def composition(obj):\n \"Composite function\"\n for transform in transforms:\n obj = transform(obj)\n return obj\n return composition", "def filter_compose(*fns: T.Callable[[T.Any], bool]):\n def composite(x):\n for f in fns:\n if not f(x):\n return False\n return True\n\n return composite", "def compose(self, *funcs) -> \"fn\":\n return self._mod.compose(self, *funcs)", "def compose(f, g):\n return lambda *args, **kwargs: f(g(*args, **kwargs))", "def do_reduce(iterable, fn, initial=None):\n if initial is not None:\n return reduce(GlobalFns(fn), iterable, initial)\n else:\n return reduce(GlobalFns(fn), iterable)", "def mapcat(f):\n return compose(map(f), cat)", "def map_functions(x, functions):\n res = []\n for func in functions:\n res.append(func(x))\n return res", "def multi_apply(func, *args, **kwargs):\n\n pfunc = partial(func, **kwargs) if kwargs else func\n map_results = map(pfunc, *args)\n return tuple(map(list, zip(*map_results)))", "def sum(*args):\n return reduce(lambda x, y: x + y, args)", "def map_functions(x, functions):\n res = []\n for func in functions:\n res.append(map(func,x))\n return res", "def multichannel(fcn):\n return lambda args: (fcn(*args), )", "def product(x):\n return functools.reduce(lambda x, y: x * y, x)", "def foldl(fn,\r\n sequences,\r\n outputs_info,\r\n non_sequences=None,\r\n mode=None,\r\n name=None):\r\n return reduce(fn=fn,\r\n sequences=sequences,\r\n outputs_info=outputs_info,\r\n non_sequences=non_sequences,\r\n go_backwards=False,\r\n mode=mode,\r\n name=name)", "def chain(*readers):\n\n def reader():\n rs = []\n for r in readers:\n rs.append(r())\n\n for e in itertools.chain(*rs):\n yield e\n\n return reader", "def filtern(func: Callable, iterable: Iterable):\n return next(filter(func, iterable))", "def mapcat(f, seqs):\n return concat(map(f, seqs))", "def mult_out(func):\n\n def multiple_output(*args, **kwargs):\n return func(*args, **kwargs)\n\n return multiple_output", "def sequential(*layers: tp.Callable[..., tp.Any]) -> tp.Callable[..., tp.Any]:\n\n def call(inputs, *args, **kwargs):\n\n out = inputs\n for i, layer in enumerate(layers):\n if i == 0:\n out = layer(out, *args, **kwargs)\n else:\n out = layer(out)\n\n if not isinstance(layer, module.Module):\n if hooks.summaries_active():\n name = utils.get_name(layer)\n\n path = module.get_module_path()\n path = path if path is not None else ()\n\n hooks.add_summary(path + (name,), layer, out)\n return out\n\n return call", "def apply_composition_function(x, functions):\n data_type = type(x)\n if len(functions) == 1:\n return data_type(map(functions[0], x))\n else:\n return data_type(map(functions[0], apply_composition_function(x, functions[1:])))", "def compose1(f, g):\n def fn(x):\n return f(g(x));\n return fn;", "def make_linear_combination(*functions: Callable, weights: Optional[Sequence] = None) -> Callable:\n if weights is None:\n weights = np.ones(shape=(len(functions)))\n else:\n assert len(functions) == len(weights), 'the length of weights must be the ' \\\n 'same as the number of given functions'\n\n def linear_combination(*args, **kwargs):\n return sum((weights[i]*functions[i](*args, **kwargs) for i in range(len(weights))))\n\n return linear_combination", "def compute_over_actions(f, *args):\n\n '''\n # show the middle results\n for a in zip(*args):\n print(\"a:\", a)\n r = f(*a)\n print(\"r:\", r)\n '''\n\n return sum(f(*a) for a in zip(*args))", "def fmult(items):\n return functools.reduce(lambda x, y: x*y, items)", "def foldr(fn,\r\n sequences,\r\n outputs_info,\r\n non_sequences=None,\r\n mode=None,\r\n name=None):\r\n return reduce(fn=fn,\r\n sequences=sequences,\r\n outputs_info=outputs_info,\r\n non_sequences=non_sequences,\r\n go_backwards=True,\r\n mode=mode,\r\n name=name)", "def delay_pipeline(pipeline, pipe):\n _pipeline = delayed(pipeline[0].curry())(pipe)\n for task in pipeline[1:]:\n _pipeline = delayed(task.curry())(_pipeline)\n\n return _pipeline", "def run_pipeline(functionlist, article, **kwargs):\n nerror = 0\n if kwargs.get(\"debug\"):\n print(\"DEBUG: running pipeline on \", article['id'], file=sys.stderr)\n for func in functionlist:\n ret = func(article, **kwargs)\n if ret:\n nerror += 1\n return nerror", "def map_readers(func, *readers):\n\n def reader():\n rs = []\n for r in readers:\n rs.append(r())\n for e in itertools.imap(func, *rs):\n yield e\n\n return reader", "def pipe_line(input_stream: collections.Iterable, transformers=[], items_count=-1,\r\n chunk_size=10 ** 5) -> collections.Iterable:\r\n output_stream = input_stream\r\n dw = DataWrap(output_stream, items_count=items_count)\r\n for transformer in transformers:\r\n if hasattr(transformer, 'transform_chunks_stream'):\r\n if not dw.is_stream_chunkified:\r\n dw.chunkify(chunk_size)\r\n transform_function = transformer.transform_chunks_stream\r\n else:\r\n if dw.is_stream_chunkified:\r\n dw.dechunkify()\r\n\r\n if hasattr(transformer, 'transform'):\r\n transform_function = transformer.transform\r\n else:\r\n transform_function = transformer\r\n\r\n dw.data_stream = transform_function(dw.data_stream)\r\n # output_stream_copy, output_stream = tee(output_stream)\r\n # pass_consumer(output_stream_copy)\r\n\r\n dw.dechunkify()\r\n return dw.data_stream", "def lift(func: Callable) -> Callable:\n return lambda f: compose2(func, f)", "def reduce(fn,\r\n sequences,\r\n outputs_info,\r\n non_sequences=None,\r\n go_backwards=False,\r\n mode=None,\r\n name=None):\r\n rval = scan(fn=fn,\r\n sequences=sequences,\r\n outputs_info=outputs_info,\r\n non_sequences=non_sequences,\r\n go_backwards=go_backwards,\r\n truncate_gradient=-1,\r\n mode=mode,\r\n name=name)\r\n if isinstance(rval[0], (list, tuple)):\r\n return [x[-1] for x in rval[0]], rval[1]\r\n else:\r\n return rval[0][-1], rval[1]", "def sync_filter(func, *iterables):\n return tuple(zip(*tuple(i for i in zip(*iterables) if func(*i)))) or ((),) * len(\n iterables\n )", "def flatmap(func, *iterable) -> Iterator:\n return map(func, chain(*chain(*iterable)))", "def compose1(f, g):\n def h(x):\n return f(g(x))\n return h", "def mapcat(iteratee, *seqs):\n return concat(*map(iteratee, *seqs))", "def accumulate(*converters):\n #Validation\n if len(converters) == 0:\n converters = (iter,)\n for i, func in enumerate(converters): #tuple of callable\n assert(callable(func)), \"converter #{0} is not callable.\".format(i)\n #Compose all converter functions\n convert = compose(*converters)\n \n @functools.wraps(convert)\n def outer(func): #pylint: disable=C0111\n @functools.wraps(func)\n def inner(*args, **kwargs): #pylint: disable=C0111\n return convert(func(*args, **kwargs))\n return inner\n return outer", "def iter_reduce_ufunc(ufunc, arrs, out=None, default=None):\n # Get first item in iterator\n try:\n initial = next(arrs)\n except StopIteration:\n return default\n # Populate the outvariable if specified otherwise make a copy of the first\n # item to be the output memory\n if out is not None:\n out[:] = initial\n else:\n out = initial.copy()\n # Iterate and reduce\n for arr in arrs:\n ufunc(out, arr, out=out)\n return out", "def merge_reduce(function: typing.Callable, data: list) -> typing.Any:\n queue = deque(range(len(data)))\n while len(queue):\n x_value = queue.popleft()\n if len(queue):\n y_value = queue.popleft()\n data[x_value] = function(data[x_value], data[y_value])\n queue.append(x_value)\n else:\n return data[x_value]", "def flatmap2(func, *iterable) -> Iterator:\n return map(func, chain(*chain(*chain(*iterable))))", "def apply(self, *input_):\n result = None\n for function in reversed(self._functions):\n if result is None:\n result = function(*input_)\n else:\n result = function(result)\n return result", "def map_multi_args(self, func, iterable, chunksize=None):\n assert self._state == RUN\n return self.map_async(one_to_many(func), iterable, chunksize).get()", "def product(*nums):\n\treturn reduce((lambda x, y: x * y), nums)", "def transform(stuff, pipelines=DEFAULT_PIPELINE_NAMES):\n global _pipelines\n for name in pipelines:\n p = load_pipeline(name)\n stuff = p.transform(stuff)\n return stuff", "def two():\n return lambda f: lambda x: f(f(x))", "def map_all(f: Callable[[GT], GS], *args, **kwargs) -> Callable[[GT], GS]:\n\n def _map_all(arr: GT) -> GS:\n return f(arr, *args, **kwargs)\n\n return _map_all", "def custom_sum(*args):\n return functools.reduce(lambda x, y: x + y, args)", "def custom_sum(*args):\n return functools.reduce(lambda x, y: x + y, args)", "def reduce(self, function):\n return reduce(function, self.data)", "def _convert_from_function_io_to_pipeline_io(self, function_io: List[entities.FunctionIO]) -> List[PipelineNodeIO]:\n pipeline_io = []\n for single_input in function_io:\n pipeline_io.append(\n PipelineNodeIO(port_id=str(uuid.uuid4()),\n input_type=single_input.type,\n name=single_input.name,\n color=None,\n display_name=single_input.name,\n default_value=single_input.value,\n actions=single_input.actions if single_input.actions is not None else []))\n return pipeline_io", "def apply_list_of_funcs(call_queue, partition):\n for func, f_args, f_kwargs in call_queue:\n partition = func(partition, *f_args, **f_kwargs)\n return partition, get_ip()", "def runner(func, iterable, arguments, local=False):\n if local:\n return [func(i, *arguments) for i in iterable]\n else:\n if iterable:\n return group(func.s(i, *arguments) for i in iterable)().get()\n else:\n # group()() returns None if group is called with no arguments,\n # leading to an AttributeError with get().\n return []", "def pipe(self, func: Callable, *args, **kwargs) -> Any:\n return func(self, *args, **kwargs)", "def prod(x):\n return functools.reduce(lambda a, b: a * b, x, 1)", "def whole(parsers):\n if len(parsers) == 0:\n return finished >> (lambda x: [])\n if len(parsers) == 1:\n return parsers[0] + finished >> (lambda x: x[:-1])\n return reduce(add, parsers) + skip(finished)", "def pipeline(func):\n @wraps(func)\n def process(img_or_iterable, *args, **kwargs):\n if isinstance(img_or_iterable, (SliceableIterable, FramesSequence)):\n _len = len(img_or_iterable)\n s = SliceableIterable(img_or_iterable, range(_len), _len)\n s._proc_func = lambda image: func(image, *args, **kwargs)\n return s\n else:\n # Fall back on normal behavior of func, interpreting input\n # as a single image.\n return func(img_or_iterable)\n\n if process.__doc__ is None:\n process.__doc__ = ''\n process.__doc__ = (\"This function has been made pims-aware. When passed\\n\"\n \"a pims reader or SliceableIterable, it will return a \\n\"\n \"new SliceableIterable of the results. When passed \\n\"\n \"other objects, its behavior is \"\n \"unchanged.\\n\\n\") + process.__doc__\n return process", "def map(iterable, function):\n for x in iterable:\n yield function(x)" ]
[ "0.85493785", "0.79058933", "0.7589019", "0.75349104", "0.7516276", "0.7404753", "0.7371548", "0.7317679", "0.7314664", "0.7298046", "0.7296629", "0.7294338", "0.7245193", "0.719107", "0.71188146", "0.7108474", "0.710505", "0.7088272", "0.69122714", "0.6900706", "0.68673223", "0.67737937", "0.6764257", "0.6689317", "0.66435003", "0.65857357", "0.65198576", "0.647352", "0.6399837", "0.6360639", "0.6188054", "0.6176181", "0.59685344", "0.59662646", "0.5960752", "0.5938441", "0.59323907", "0.58830994", "0.58518785", "0.5848057", "0.5835738", "0.5831895", "0.5707948", "0.56775796", "0.56682444", "0.5665338", "0.5652715", "0.5639123", "0.5635526", "0.56265384", "0.5609076", "0.56076217", "0.5589532", "0.55610543", "0.55174804", "0.55152345", "0.5490874", "0.54883933", "0.54826146", "0.54702175", "0.54651594", "0.5463791", "0.5459587", "0.5454206", "0.54326755", "0.5397781", "0.53921014", "0.5372343", "0.5329361", "0.5326422", "0.52966446", "0.52929217", "0.52863777", "0.52761185", "0.5259461", "0.5258635", "0.5249826", "0.5245798", "0.52404976", "0.523056", "0.5179425", "0.5177902", "0.51763105", "0.51654303", "0.5153623", "0.51470673", "0.51416194", "0.5127691", "0.5126656", "0.51265407", "0.51265407", "0.5126291", "0.51262254", "0.5120517", "0.5111836", "0.51012826", "0.5101168", "0.5093018", "0.5092229", "0.5086251" ]
0.8107064
1
Calculates new observation vector for GridWorld.
def calculate_next_vector(vector, displacement, max_length): # recover vector to goal alpha = vector[0] * pi d = tan(alpha) col = max_length * vector[1] / sqrt(d ** 2 + 1) row = d * col if abs(alpha) > pi / 2: col = -col new_row = row - displacement[0] new_col = col - displacement[1] # get vector back again new_vec = np.zeros(2) new_vec[0] = atan(new_row / (new_col + 1e-12)) if new_col < 0 < new_row: new_vec[0] += pi elif (new_col < 0) and (new_row <= 0): new_vec[0] -= pi new_vec[0] /= pi new_vec[1] = sqrt(new_col ** 2 + new_row ** 2) / max_length return new_vec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_loc(self) -> None:\n self.state[:, :, Boids.Attr.LOC] += self.state[:, :, Boids.Attr.VEL]\n # wrap-around the simulated environment\n self.state[:, :, Boids.Attr.LOC] %= np.expand_dims(self.env_bounds, axis=1)", "def compute_observation(self):\n robotPos, robotOrn = p.getBasePositionAndOrientation(self.botId)\n robotEuler = p.getEulerFromQuaternion(robotOrn)\n linear, angular = p.getBaseVelocity(self.botId)\n return (np.array([robotEuler[0],angular[0],self.vt], dtype='float32'))", "def _get_obs(self):\n # return np.concatenate((self.world.state[:6], self.world.state[7:13]))\n return np.concatenate((self.world.state, np.zeros(7)))\n # return self.world.state", "def calculateWorldValues(self):\n # 等值面\n for x in range(self.worldSize):\n for y in range(self.worldSize):\n for z in range(self.worldSize):\n if self.t=='b':\n self.world[x][y][z] = math.cos(x) + math.cos(y) + math.cos(z)\n elif self.t=='n':\n self.world[x][y][z] = math.cos(x)*math.cos(y)*math.cos(z) - math.sin(x)*math.sin(y)*math.sin(z)\n elif self.t=='m':\n self.world[x][y][z] = math.sin(x)*math.cos(y) +math.sin(z)*math.cos(x)+math.sin(y)*math.cos(z)", "def project(self):\n # update positions compared to observer\n pos = self.pos.copy()\n\n # center coordinates around obs coords\n pos[:, 0] -= np.sin(self.theta) * self.V * self.time_elapsed\n pos[:, 2] -= np.cos(self.theta) * self.V * self.time_elapsed\n\n # wrap in a novel box around obs coords\n for i in range(3):\n pos[:, i] = self.bounds[2*i] + np.mod(pos[:, i], self.bounds[2*i + 1]-self.bounds[2*i])\n\n d = (pos**2).sum(axis=1)**.5\n # ind_visible = (pos[:, 2] > 0) * (self.d_min<d) * (d<self.d_max)\n ind_visible = (pos[:, 2] > self.d_min) * (d < self.d_max)\n N_visible = int(np.sum(ind_visible))\n\n # self.state = [X, Y, size]\n self.state = np.ones((N_visible, 7))\n for i in range(2):\n self.state[:, i] = self.mag * pos[ind_visible, i] / pos[ind_visible, 2]\n print(i, self.state[:, i].min(), self.state[:, i].max())\n self.state[:, 2] = self.size / d[ind_visible]\n\n # colors do not change\n self.state[:, 3:] = pos[ind_visible, 3:]\n\n # TODO: larger transparency at larger distance => too fancy :-)\n # self.state[:, 2] = self.size / d[ind_visible]\n\n # for i in range(3):\n # self.state[:, i] *= (self.bounds[2*i+1] - self.bounds[2*i])\n # self.state[:, i] -= self.bounds[2*i]", "def observation(self, obs):\n\n# import pdb;pdb.set_trace()\n return np.moveaxis(obs, 2, 0)", "def update_world(self):\n pass", "def create_vectors(self):\n self.localStatistics = []\n self.lastStatistics = []\n self.globalV = []\n self.estimate = []\n self.delta = []\n self.drift = []\n self.slack = [] # only for coordBased model", "def __call__(self):\n return self._vector", "def indep(self):\n out = self.new()\n for y,x in self.coords(False):\n out.store(y,x, float(self.xmarg(x)*self.ymarg(y))/float(self.sum()))\n return out", "def update_E(self):\n self.grid.E[self.loc] += (\n self.grid.courant_number\n * self.grid.inverse_permittivity[self.loc]\n * self.phi_E\n )", "def vorticity(self):\n \n ux,_ = np.gradient(self._obj['u'],self._obj['x'],self._obj['y'],axis=(0,1))\n _,vy = np.gradient(self._obj['v'],self._obj['x'],self._obj['y'],axis=(0,1))\n # self._obj['w'] = xr.DataArray(vy - ux, dims=['x', 'y'])\n self._obj['w'] = xr.DataArray(vy - ux, dims=['x', 'y','t'])\n \n if len(self._obj.attrs['units']) == 4:\n vel_units = self._obj.attrs['units'][-1]\n self._obj.attrs['units'].append('1/dt')\n else:\n vel_units = self._obj.attrs['units'][-2]\n self._obj.attrs['units'][-1] = ('1/dt')\n\n\n return self._obj", "def update_agent_location_vector(self):\n\n for agent in self.agents:\n location = agent.getz()\n # print(location)\n if location[0] == 0:\n vectorized_agent_loc = location[1]\n elif location[0] == 1:\n vectorized_agent_loc = 4 + location[1]\n elif location[0] == 2:\n vectorized_agent_loc = 8 + location[1]\n else: # location[0] == 3\n vectorized_agent_loc = 12 + location[1]\n\n if agent.isBusy == False:\n # remove any location if it shows it as well\n self.agent_locations[0][vectorized_agent_loc] = 0\n continue\n else:\n self.agent_locations[0][vectorized_agent_loc] = 1\n if self.DEBUG:\n print('agent location vector is ', self.agent_locations)", "def __ComputeObservationVector(self, groundPoints):\n\n n = groundPoints.shape[0] # number of points\n\n # Coordinates subtraction\n dX = groundPoints[:, 0] - self.exteriorOrientationParameters[0]\n dY = groundPoints[:, 1] - self.exteriorOrientationParameters[1]\n dZ = groundPoints[:, 2] - self.exteriorOrientationParameters[2]\n dXYZ = np.vstack([dX, dY, dZ])\n rotated_XYZ = np.dot(self.rotationMatrix.T, dXYZ).T\n\n l0 = np.empty(n * 2)\n\n # Computation of the observation vector based on approximate exterior orientation parameters:\n l0[::2] = -self.camera.focalLength * rotated_XYZ[:, 0] / rotated_XYZ[:, 2]\n l0[1::2] = -self.camera.focalLength * rotated_XYZ[:, 1] / rotated_XYZ[:, 2]\n\n return l0", "def return_vec(self) :\r\n y_vec = np.concatenate((self.x_vec,self.v_vec))\r\n return y_vec", "def vector_to(self, location):\n return (self.X - location) * numpy.array([1.0, 1.0, 0.0])", "def data_vector(self) -> np.ndarray:\r\n return np.dot(\r\n self.linear_obj_list[0].mapping_matrix.T, self.w_tilde.dirty_image\r\n )", "def vector(self):\n return np.array([self.lat, self.lng])", "def observation_space():", "def update_grid(self, x):\r\n\r\n # Append boundary rows and columns to matrix\r\n x = self.append_boundary(x) # the boundary is recomputed at each step\r\n y = np.copy(x)\r\n\r\n # For each cell within boundary, compute state according to rules.\r\n chg_0_1 = 0 # the number of cells that changed from state 0 to state 1\r\n chg_1_0 = 0 # the number of cells that changes from state 1 to state 0\r\n chg_none = 0 # the number of cells that did not change\r\n index = np.arange(1, x.shape[0] - 1)\r\n for i in index:\r\n for j in index:\r\n neighborhood = x[i - 1:i + 2:1, j - 1:j + 2:1] # 3x3 sub matrix centered at i, j\r\n y[i, j] = self.update_cell(neighborhood)\r\n change = int(y[i, j] - x[i, j])\r\n if change == -1:\r\n chg_1_0 += 1\r\n if change == 0:\r\n chg_none += 1\r\n if change == 1:\r\n chg_0_1 += 1\r\n\r\n # Compute statistics excluding boundary\r\n total = np.power(x[1:-1:1, 1:-1:1].shape[0] - 1, 2)\r\n start_1 = np.sum(x[1:-1:1, 1:-1:1])\r\n end_1 = np.sum(y[1:-1:1, 1:-1:1])\r\n stats = [total, start_1, end_1, chg_1_0, chg_none, chg_0_1]\r\n\r\n return y[1:-1:1, 1:-1:1], stats # remove the boundary\r", "def update_grid_pos(self):\n self.grid_pos = self.get_tile_of_position(self.tank.body.position)", "def _get_obs(self):\n obs = super()._get_obs()\n gripper_rot = rotations.mat2euler(\n self.sim.data.get_site_xmat('robot0:grip'))\n obs['observation'] = np.concatenate(\n [obs['observation'], gripper_rot.ravel()])\n return obs", "def xyz(self) -> np.ndarray:\n return self._vector[0:3]", "def observation_space(self):\n return Box(low=-np.inf, high=np.inf, shape=(self.num_cars,))\n # pos = Box(low=0., high=self.road_length, shape=(self.num_cars, ))\n # vel = Box(low=0., high=self.GOAL_VELOCITY+10, shape=(self.num_cars, ))\n # accel = Box(low=self.min_acceleration, high=self.max_acceleration, shape=(self.num_cars, ))\n # return vel\n # return Product([pos, vel, accel])", "def _make_observation(self) -> Dict[str, np.ndarray]:\n return {\n \"cur_pos\": np.array([self.cur_pos], dtype=int),\n }", "def GravityVector(self):\n if self.Cid() == 0:\n return self.N\n ## TODO: shouldn't be scaled by the ???\n p = self.cid_ref.transform_vector_to_global(self.N)\n return self.scale * p", "def inv_covar(self) -> np.ndarray:\n if self._inv_covar is None:\n self._inv_covar = batched_inv_spd(self.chol_covar)\n return self._inv_covar", "def _get_observations(self):\n food = np.array(self.game.state.data.food.data)\n walls = np.array(self.game.state.data.layout.walls.data)\n map_shape = walls.shape\n capsules = self.game.state.data.capsules\n pacman_pos = self.game.state.data.agentStates[0].configuration.pos\n\n gosts_pos = list(map(lambda agent: agent.configuration.pos,\n self.game.state.data.agentStates[1:]))\n gosts_scared = list(\n map(lambda agent: agent.scaredTimer > 0, self.game.state.data.agentStates[1:]))\n\n \"\"\"\n 0: empty,\n 1: wall,\n 2: food,\n 3: capsules,\n 4: ghost,\n 5: scared ghost,\n 6: pacman\n \"\"\"\n\n view_slices = ((max(pacman_pos[0]-self.view_distance[0], 0), min(pacman_pos[0]+self.view_distance[0]+1, map_shape[0])),\n (max(pacman_pos[1]-self.view_distance[1], 0), min(pacman_pos[1]+self.view_distance[1]+1, map_shape[1])))\n\n def select(l):\n return l[view_slices[0][0]:view_slices[0][1], view_slices[1][0]:view_slices[1][1]]\n\n obs = np.vectorize(lambda v: 1 if v else 0)(select(walls))\n obs = obs + np.vectorize(lambda v: 2 if v else 0)(select(food))\n\n def pos_to_relative_pos(pos):\n if (pos[0] < view_slices[0][0] or view_slices[0][1] <= pos[0]\n or pos[1] < view_slices[1][0] or view_slices[1][1] <= pos[1]):\n return None\n else:\n return pos[0]-view_slices[0][0], pos[1]-view_slices[1][0]\n\n for c_relative_pos in filter(lambda x: x is not None, map(pos_to_relative_pos, capsules)):\n obs[c_relative_pos[0], c_relative_pos[1]] = 3\n\n for i, g_relative_pos in enumerate(map(pos_to_relative_pos, gosts_pos)):\n if (g_relative_pos is not None):\n obs[int(g_relative_pos[0]), int(g_relative_pos[1])\n ] = 5 if gosts_scared[i] else 4\n\n pacman_relative_pos = pos_to_relative_pos(pacman_pos)\n\n obs[pacman_relative_pos[0], pacman_relative_pos[1]] = 6\n\n obs[0, 0] = 2 if np.any(\n food[0:pacman_pos[0]+1, 0:pacman_pos[1]+1]) else 0\n obs[obs.shape[0]-1,\n 0] = 2 if np.any(food[pacman_pos[0]:map_shape[0], 0:pacman_pos[1]+1])else 0\n\n obs[0, obs.shape[1] -\n 1] = 2 if np.any(food[0:pacman_pos[0]+1, pacman_pos[1]:map_shape[0]]) else 0\n obs[obs.shape[0]-1, obs.shape[1]-1] = 2 if np.any(\n food[pacman_pos[0]:map_shape[0], pacman_pos[1]:map_shape[0]]) else 0\n\n # print(np.transpose(obs)[::-1, :])\n\n return obs", "def update(world):\r\n infect = infection(world['SIR'], infection_rate, incubation_rate)\r\n disperse = dispersion(world['SIR'], dispersion_kernel, dispersion_rates)\r\n world['SIR'] += dt*( infect + disperse)\r\n world['t'] += dt", "def _get_observation_np(self) -> np.ndarray: # need this for baselines\n observation = []\n observation.extend(self.rex.GetMotorAngles().tolist())\n observation.extend(self.rex.GetMotorVelocities().tolist())\n observation.extend(self.rex.GetMotorTorques().tolist())\n observation.extend(list(self.rex.GetBaseOrientation()))\n\n # in addition to state, will need ratio, clock_variables, and desired speed\n observation.extend([self.ratio]) # only 1\n observation.extend(self.get_clock()) # 4 variables (1 per leg)\n observation.extend(self.speed_des) # [vx_des, vy_des]\n self._observation = observation\n return np.array(self._observation)", "def unit():\n return Vec2d(0, 1)", "def GlobalVector(self):\n return _hypre.HypreParVector_GlobalVector(self)", "def reparameterize(self, mu, logvar):\n\t\tlogvar = torch.exp(logvar/2)\n\t\tif self.cuda_flag:\n\t\t\tepsilon = torch.randn((mu.size())).float().cuda()\n\t\telse:\n\t\t\tepsilon = torch.randn((mu.size())).float()\n\t\tlatent_vector = torch.mul(epsilon, logvar) + mu \n\t\treturn latent_vector", "def index_to_obs(a,b, grid_x, grid_y):\n position = grid_x[a]\n velocity = grid_y[b]\n return position, velocity", "def get_potential(self,t):\n grid_V = self.grid_x * (1 + self.Lambda*np.cos(self.omega*t)) * (self.grid_x >= 0)\n grid_V += 200 * (self.grid_x < 0)\n return grid_V", "def observation_function(self, game_state):\n return game_state.make_observation(self.index)", "def verletIntegration(self):\n for atom in range(0, self.numAtoms):\n \n # Update velocities\n self.atoms[atom].vx += (self.atoms[atom].fx/self.m)*self.dt\n self.atoms[atom].vy += (self.atoms[atom].fy/self.m)*self.dt\n self.atoms[atom].vz += (self.atoms[atom].fz/self.m)*self.dt\n \n \n # Update positions\n newX = self.atoms[atom].x + self.atoms[atom].vx*self.dt\n newY = self.atoms[atom].y + self.atoms[atom].vy*self.dt\n newZ = self.atoms[atom].z + self.atoms[atom].vz*self.dt\n\n # Update current positions (applying PBC)\n if newX < 0:\n self.atoms[atom].x = newX + self.lbox\n elif newX > self.lbox:\n self.atoms[atom].x = newX - self.lbox\n else:\n self.atoms[atom].x = newX\n \n if newY < 0:\n self.atoms[atom].y = newY + self.lbox\n elif newY > self.lbox:\n self.atoms[atom].y = newY - self.lbox\n else:\n self.atoms[atom].y = newY\n \n if newZ < 0:\n self.atoms[atom].z = newZ + self.lbox\n elif newZ > self.lbox:\n self.atoms[atom].z = newZ - self.lbox\n else:\n self.atoms[atom].z = newZ", "def getNewCoordinate(self):\n R = GeometryToolBox.rotation_matrix_from_vectors([1, 0, 0], self.nVec)\n x = self.nVec\n y = np.dot(R, np.array([0, 1, 0]))\n z = np.dot(R, np.array([0, 0, 1]))\n self.XYZCoordinate = np.vstack((x, y, z))\n return self.XYZCoordinate", "def calc_observation(self, t, x, u):\n return", "def calc_observation(self, t, x, u):\n return", "def current_update():\n # Compute the multiplier coefficient:\n ci = dt / (L * dx)\n for k in range(0, nx-1):\n I[k] = I[k] - (ci * (V[k + 1] - V[k]))", "def update_position(self, event):\n\n # Create a copy of the most recent stored twist data to perform calculations\n with self.lock:\n velocity_data = copy.deepcopy(self.twist)\n\n # Time elapsed since last update position call\n if hasattr(event, 'last_real'):\n if event.last_real is None:\n time = rospy.Duration(0.05)\n else:\n time = event.current_real - event.last_real\n \n time = time.to_sec()\n\n # Calculate angle turned in the given time using omega = theta/time\n angle = velocity_data.angular.z*time\n\n # Calculate distance travelled in the given time using linear velocity = arc distance/time\n distance = velocity_data.linear.x*time\n\n # Calculate yaw of the robot\n self.vehicle_yaw += angle\n\n # Calculate vehicle x, y, z position coordinates\n # TODO recalculate the position based on traveling in a circular arc.\n self.pose.position.x += (distance)*cos(self.vehicle_yaw)\n self.pose.position.y += (distance)*sin(self.vehicle_yaw)\n\n # Calculate z position using linear interpolation and create cloud array\n \n # 1. Create ranges to be used in interpolation function\n terrain_points_x = np.arange(0, self.gaussian_array.shape[1]*self.resolution, self.resolution)\n terrain_points_y = np.arange(0, self.gaussian_array.shape[0]*self.resolution, self.resolution)\n\n # 2. Create array of points to be converted to point cloud for vizualization\n terrain_mesh_x, terrain_mesh_y = np.meshgrid(terrain_points_x, terrain_points_y)\n terrain_x = terrain_mesh_x.ravel()\n terrain_y = terrain_mesh_y.ravel()\n terrain_z = self.gaussian_array.ravel()\n terrain_grid_points = np.stack((terrain_x, terrain_y, terrain_z), axis=1)\n\n # 3. Create interpolation function based on the ranges and gaussian data\n interp_func = RectBivariateSpline(terrain_points_y, terrain_points_x, self.gaussian_array)\n\n # 4. Find z value for x and y coordinate of vehicle using interpolation function\n # TODO compute z height based on footprint\n self.pose.position.z = interp_func(self.pose.position.y, self.pose.position.x)\n\n # Convert Euler Angles to Quarternion\n V_rotation = tf.transformations.quaternion_from_euler(0.0, 0.0, self.vehicle_yaw)\n\n # Broadcast vehicle frame which is a child of the world frame\n br = tf.TransformBroadcaster()\n br.sendTransform((self.pose.position.x, self.pose.position.y, self.pose.position.z), \n V_rotation, rospy.Time.now(),\"vehicle_frame\", \"map\")\n\n # Construct the homogenous transformation matrix for map to vehicle frame\n V_translation = [self.pose.position.x, self.pose.position.y, self.pose.position.z]\n map_T_V = tf.transformations.quaternion_matrix(V_rotation) \n map_T_V[:3,3] = np.array(V_translation)\n\n # Create footprint of vehicle\n V_footprint_range_x = np.linspace((-self.vehicle_length/2), (self.vehicle_length/2), 30)\n V_footprint_range_y = np.linspace((-self.vehicle_width/2), (self.vehicle_width/2), 15)\n V_footprint_mesh_x, V_footprint_mesh_y = np.meshgrid(V_footprint_range_x, V_footprint_range_y)\n V_footprint_x = V_footprint_mesh_x.ravel()\n V_footprint_y = V_footprint_mesh_y.ravel()\n\n # For every point in the vehicle footprint, calculate the position wrt to the vehicle's frame\n # and its interpolated z value. Add this point to a list of points for visualization.\n # TODO Flatten into a single matrix multiply to remove for loop\n V_viz_points = []\n for i in range(V_footprint_x.shape[0]):\n p = Point()\n V_footprint_point = np.array([[V_footprint_x[i]],[V_footprint_y[i]], [0.0], [1.0]])\n V_footprint_point = np.matmul(map_T_V, V_footprint_point)\n V_footprint_point[2, 0] = interp_func(V_footprint_point[1, 0], V_footprint_point[0, 0])\n p.x = V_footprint_point[0, 0]\n p.y = V_footprint_point[1, 0]\n p.z = V_footprint_point[2, 0]\n V_viz_points.append(p)\n\n #####################################################################################\n # Create a copy of the most recent stored JointState data to perform calculations\n with self.joint_lock:\n joint_data = copy.deepcopy(self.joint)\n\n # If the data is empty on first run, fill with 0.0\n if not joint_data.velocity:\n joint_data.velocity = [0.0,0.0]\n \n # Calculate angle based on velocity data and time\n angle = joint_data.velocity[0]*time\n angle2 = joint_data.velocity[1]*time\n\n self.joint1_pitch += angle\n self.joint2_pitch += angle2\n\n # Transformations from vehicle frame to Joint1 and Joint2\n \n # Static rotation about z-axis \n static_rot = tf.transformations.quaternion_from_euler(0.0, 0.0, 3.14159)\n translation = [0.0, 0.0, 0.0]\n V_T_SRz = tf.transformations.quaternion_matrix(static_rot)\n V_T_SRz[:3,3] = np.array(translation)\n\n # Dynamic rotation about the y-axis of Joint 1\n rot_SRz_T_J1 = [[cos(self.joint1_pitch), 0.0, sin(self.joint1_pitch)],\n [0.0, 1.0, 0.0],\n [-sin(self.joint1_pitch), 0.0, cos(self.joint1_pitch)]]\n\n trans_SRz_T_J1 = [0.0, 0.0, 0.0, 1.0]\n\n SRz_T_J1 = np.zeros((4,4))\n SRz_T_J1[:3,:3] = rot_SRz_T_J1\n SRz_T_J1[:4,3] = trans_SRz_T_J1\n\n # Translation based on length of Joint 1 arm \n no_rot = tf.transformations.quaternion_from_euler(0.0, 0.0, 0.0)\n translation = [self.joint1_length, 0.0, 0.0]\n J1_T_STx = tf.transformations.quaternion_matrix(no_rot)\n J1_T_STx[:3,3] = np.array(translation)\n\n # Dynamic rotation about y-axis of Joint 2\n dynamic_rot2 = tf.transformations.quaternion_from_euler(0.0, self.joint2_pitch, 0.0)\n translation = [0.0, 0.0, 0.0]\n STx_T_J2 = tf.transformations.quaternion_matrix(dynamic_rot2)\n STx_T_J2[:3,3] = np.array(translation)\n\n # matrix multiplication to form the homogenous matrices\n V_T_J1 = np.matmul(V_T_SRz, SRz_T_J1)\n V_T_STx = np.matmul(V_T_J1, J1_T_STx)\n V_T_J2 = np.matmul(V_T_STx, STx_T_J2)\n\n frame_J1 = tf_conversions.fromMatrix(V_T_J1)\n frame_J2 = tf_conversions.fromMatrix(V_T_J2)\n\n # The ripper tip is a point in the J2's frame, this is based on the length of the ripper\n ripper_tip_point_J2 = [self.ripper_length, 0.0, 0.0, 1.0]\n map_T_J2 = np.matmul(map_T_V, V_T_J2)\n ripper_tip_pt_map = np.matmul(map_T_J2, ripper_tip_point_J2)\n ripper_tip_point_viz = Point()\n ripper_tip_point_viz.x = ripper_tip_pt_map[0]\n ripper_tip_point_viz.y = ripper_tip_pt_map[1]\n ripper_tip_point_viz.z = ripper_tip_pt_map[2]\n V_viz_points.append(ripper_tip_point_viz)\n\n # use the ripper's position as an index value to access the gaussian array\n ripper_tip_cell_index_x = int(ripper_tip_pt_map[1]/self.resolution)\n ripper_tip_cell_index_y = int(ripper_tip_pt_map[0]/self.resolution)\n\n # Create a range of index values surrounding index_x and y\n nearby_index_cells_range_x = np.arange((ripper_tip_cell_index_x-1),(ripper_tip_cell_index_x+2), 1)\n nearby_index_cells_range_y = np.arange((ripper_tip_cell_index_y-1),(ripper_tip_cell_index_y+2), 1)\n nearby_index_cells_mesh_x, nearby_index_cells_mesh_y = np.meshgrid(nearby_index_cells_range_x,nearby_index_cells_range_y)\n nearby_index_cells_x = nearby_index_cells_mesh_x.ravel()\n nearby_index_cells_y = nearby_index_cells_mesh_y.ravel()\n\n # First check if the index is within the gaussian array, if it is, then check if the tip of\n # the ripper is beneath the soil, if it is, then remove the soil above the tip and disperse\n # it to the surrounding cells, provided those cells are also within the gaussian array\n # TODO Remove use of for loops and excess if statements\n\n if (0 <= ripper_tip_cell_index_x <= (self.gaussian_array.shape[0]-1)) and (0 <= ripper_tip_cell_index_y <= (self.gaussian_array.shape[1]-1)):\n if (self.gaussian_array[ripper_tip_cell_index_x][ripper_tip_cell_index_y] > ripper_tip_pt_map[2]):\n diff = self.gaussian_array[ripper_tip_cell_index_x][ripper_tip_cell_index_y] - ripper_tip_pt_map[2]\n for i in range(nearby_index_cells_x.shape[0]):\n if (0 <= nearby_index_cells_x[i] <= (self.gaussian_array.shape[0]-1)) and (0 <= nearby_index_cells_y[i] <= (self.gaussian_array.shape[1]-1)):\n self.gaussian_array[nearby_index_cells_x[i]][nearby_index_cells_y[i]] += diff/8\n self.gaussian_array[ripper_tip_cell_index_x][ripper_tip_cell_index_y] = ripper_tip_pt_map[2]\n \n\n # Publish all messages\n self.publish_messages(V_translation, V_rotation, terrain_grid_points, V_viz_points, frame_J1, frame_J2)", "def apply(self, vec):\n return self._disp_over_m * vec - self._gp * ( # pylint: disable = E1130\n self._gp @ vec / self.L ** self.ndim\n )", "def alive_vector(self):\n return self._alive_vector", "def vector(self,\n i: int,\n j: int) -> np.ndarray:\n return self[j].coord - self[i].coord", "def get_observations(self):\n joint_states = self.joints_state\n self.force = self.wrench_stamped.wrench.force\n self.torque = self.wrench_stamped.wrench.torque\n self.static_taxel = self.tactile_static.taxels\n# dynamic_taxel= tactile_dynamic\n\n# print(\"[force]\", self.force.x, self.force.y, self.force.z)\n# print(\"[torque]\", self.torque.x, self.torque.y, self.torque.z)\n shp_joint_ang = joint_states.position[0]\n shl_joint_ang = joint_states.position[1]\n elb_joint_ang = joint_states.position[2]\n wr1_joint_ang = joint_states.position[3]\n wr2_joint_ang = joint_states.position[4]\n wr3_joint_ang = joint_states.position[5]\n\n shp_joint_vel = joint_states.velocity[0]\n shl_joint_vel = joint_states.velocity[1]\n elb_joint_vel = joint_states.velocity[2]\n wr1_joint_vel = joint_states.velocity[3]\n wr2_joint_vel = joint_states.velocity[4]\n wr3_joint_vel = joint_states.velocity[5]\n\n q = [shp_joint_ang, shl_joint_ang, elb_joint_ang, wr1_joint_ang, wr2_joint_ang, wr3_joint_ang]\n# print(\"q(observation):\", q)\n eef_x, eef_y, eef_z = self.get_xyz(q)\n self.end_effector = self.get_xyz(q)\n eef_x_ini, eef_y_ini, eef_z_ini = self.get_xyz(self.init_joint_pose2) \n\n delta_image_r, delta_image_l = self.get_image()\n self.cnn_image_r = agent.update_cnn(delta_image_r)\n self.cnn_image_l = agent.update_cnn(delta_image_l)\n self.cnn_image_r_list = self.cnn_image_r.tolist()\n self.cnn_image_l_list = self.cnn_image_l.tolist()\n print(\"r_list\", self.cnn_image_r_list)\n print(\"l_list\", self.cnn_image_l_list)\n\n observation = []\n# rospy.logdebug(\"List of Observations==>\"+str(self.observations))\n for obs_name in self.observations:\n if obs_name == \"shp_joint_ang\":\n observation.append((shp_joint_ang - self.init_joint_pose2[0]) * self.joint_n)\n elif obs_name == \"shl_joint_ang\":\n observation.append((shl_joint_ang - self.init_joint_pose2[1]) * self.joint_n)\n elif obs_name == \"elb_joint_ang\":\n observation.append((elb_joint_ang - self.init_joint_pose2[2]) * self.joint_n)\n elif obs_name == \"wr1_joint_ang\":\n observation.append((wr1_joint_ang - self.init_joint_pose2[3]) * self.joint_n)\n elif obs_name == \"wr2_joint_ang\":\n observation.append((wr2_joint_ang - self.init_joint_pose2[4]) * self.joint_n)\n elif obs_name == \"wr3_joint_ang\":\n observation.append((wr3_joint_ang - self.init_joint_pose2[5]) * self.joint_n)\n elif obs_name == \"shp_joint_vel\":\n observation.append(shp_joint_vel)\n elif obs_name == \"shl_joint_vel\":\n observation.append(shl_joint_vel)\n elif obs_name == \"elb_joint_vel\":\n observation.append(elb_joint_vel)\n elif obs_name == \"wr1_joint_vel\":\n observation.append(wr1_joint_vel)\n elif obs_name == \"wr2_joint_vel\":\n observation.append(wr2_joint_vel)\n elif obs_name == \"wr3_joint_vel\":\n observation.append(wr3_joint_vel)\n elif obs_name == \"eef_x\":\n observation.append((eef_x - eef_x_ini) * self.eef_n)\n elif obs_name == \"eef_y\":\n observation.append((eef_y - eef_y_ini) * self.eef_n)\n elif obs_name == \"eef_z\":\n observation.append((eef_z - eef_z_ini) * self.eef_n)\n elif obs_name == \"force_x\":\n observation.append((self.force.x - self.force_ini.x) / self.force_limit1 * self.force_n)\n elif obs_name == \"force_y\":\n observation.append((self.force.y - self.force_ini.y) / self.force_limit1 * self.force_n)\n elif obs_name == \"force_z\":\n observation.append((self.force.z - self.force_ini.z) / self.force_limit1 * self.force_n)\n elif obs_name == \"torque_x\":\n observation.append((self.torque.x - self.torque_ini.x) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"torque_y\":\n observation.append((self.torque.y - self.torque_ini.y) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"torque_z\":\n observation.append((self.torque.z - self.torque_ini.z) / self.torque_limit1 * self.torque_n)\n elif obs_name == \"image_cnn\":\n for x in range(0, 10):\n observation.append(self.cnn_image_r_list[0][x])\n# print(\"r_list\", self.cnn_image_r_list[0][x])\n for x in range(0, 10):\n observation.append(self.cnn_image_l_list[0][x])\n# print(\"l_list\", self.cnn_image_l_list[0][x])\n elif obs_name == \"static_taxel\":\n for x in range(0, 28):\n observation.append((self.static_taxel[0].values[x] - self.static_taxel_ini[0].values[x]) * self.taxel_n)\n for x in range(0, 28):\n observation.append((self.static_taxel[1].values[x] - self.static_taxel_ini[1].values[x]) * self.taxel_n)\n# elif obs_name == \"dynamic_taxel\":\n# observation.append(dynamic_taxel[0].values) * self.taxel_n\n# observation.append(dynamic_taxel[1].values) * self.taxel_n\n else:\n raise NameError('Observation Asked does not exist=='+str(obs_name))\n\n print(\"observation\", list(map(round, observation, [3]*len(observation))))\n# print(\"observation\", observation)\n\n return observation", "def get_observation_(self):\n obs = np.zeros(self.STATE_SIZE, dtype=np.float)\n obs[:,:,0] = np.array(self.input_img_.data).reshape((self.STATE_SIZE[0:2]))\n\n if self.debug_:\n self.debugger_.show_input_occ_grid(self.input_img_)\n self.debugger_.show_input_image(obs[:,:,0])\n return obs", "def find_new_coordinates(self):\n max_weight = max(self.norm_weights)\n max_index = list(self.norm_weights).index(max_weight)\n new_x = int(self.rad + (max_index / (self.window_y / self.dy))*self.dx)\n new_y = int(self.rad + (max_index % (self.window_y / self.dy))*self.dy)\n self.set_coordinates(new_x, new_y)", "def unit(self):\r\n return Vector(self.x/self.length(), self.y/self.length())", "def mc_update_xy(self):\n i = random.randint(0,self.N-1)\n return self.mc_update_fixed(i,xy = True)", "def observation(self, observation):\n\n # Normalise rows for view directions\n obs = observation[0]\n\n # self.env.unwrapped._config.number_of_right_players() doesn't work so we have to resort to this hack\n if not self.player_view_directions:\n self.n_left_players = len(obs[\"left_team\"])\n self.n_right_players = len(obs[\"right_team\"])\n self.player_view_directions = {\"left\": np.zeros((self.n_left_players, 2)),\n \"right\": np.zeros((self.n_right_players, 2))}\n self.player_view_directions[\"left\"][:, 0] = 1\n self.player_view_directions[\"right\"][:, 0] = -1\n\n for player_id in range(self.n_left_players):\n if np.linalg.norm(obs[\"left_team_direction\"][player_id]) != 0.0:\n self.player_view_directions[\"left\"][player_id] = obs[\"left_team_direction\"][player_id] / \\\n np.linalg.norm(obs[\"left_team_direction\"][player_id])\n for player_id in range(self.n_right_players):\n if np.linalg.norm(obs[\"right_team_direction\"][player_id]) != 0.0:\n self.player_view_directions[\"right\"][player_id] = obs[\"right_team_direction\"][player_id] / \\\n np.linalg.norm(obs[\"right_team_direction\"][player_id])\n\n final_obs = []\n obj_lists = []\n for player_id, obs in enumerate(observation):\n player_location = self._add_zero(obs['left_team'][player_id]) # [x,y] of active player\n player_view_direction = self._add_zero(self.player_view_directions['left'][player_id])\n\n obj_lst = self._encapsulate_objects(obs, player_location)\n\n if not self.full_obs:\n # update visibilities wrt player view radius\n if self.po_player_view_radius != -1:\n for obj in obj_lst:\n if obj.distance > self.po_player_view_radius:\n obj.is_visible = False\n\n for obj in obj_lst:\n # make sure to not reset visible status\n if obj.is_visible:\n obj.is_visible = self._is_in_wedge(player_view_direction, obj.location,\n self.po_view_cone_xy_opening)\n\n # update visibilities wrt occlusion\n obj_dist_sorted = sorted([o for o in obj_lst if o.is_visible and o.type == 'player'],\n key=lambda obj: obj.distance)\n while len(obj_dist_sorted) > 1:\n curr_obj, obj_dist_sorted = obj_dist_sorted[0], obj_dist_sorted[1:]\n\n for obj in obj_dist_sorted:\n # relative location wrt curr_obj\n if np.linalg.norm(curr_obj.location[:2]) == 0.0 or np.linalg.norm(curr_obj.location) == 0.0:\n continue\n\n blocked_xy_angle = 2 * np.degrees(\n np.arctan(self.po_player_width / np.linalg.norm(curr_obj.location[:2])))\n\n obj.is_visible = not self._is_in_wedge(player_view_direction,\n obj.location,\n blocked_xy_angle)\n\n obj_dist_sorted = [o for o in obj_dist_sorted if o.is_visible]\n\n # noise visible object coordinates according to their distance\n if self.po_depth_noise is not None:\n visible_objs = [obj for obj in obj_lst if obj.is_visible]\n for obj in visible_objs:\n if self.po_depth_noise.get('type', None) == 'gaussian':\n if self.po_depth_noise.get('attenuation_type', None) == 'fixed_angular_resolution':\n angular_resolution = self.po_depth_noise.get('angular_resolution_degrees',\n 2) * np.pi / 180.0\n sigma = obj.distance * angular_resolution\n # noise relevant quantities\n obj.location += np.random.normal(0, sigma, (3,))\n obj.attrs['move_direction'] += np.random.normal(0, sigma, (3,))\n if obj.type == 'player':\n obj.location[2] = 0.0 # it is commonly known that players are confined to xy plane\n obj.attrs['view_direction'] += np.random.normal(0, sigma, (3,))\n\n # collate observation from object representations\n o = []\n for obj in obj_lst:\n o.extend(obj.rep())\n\n # If there were less than 11vs11 players, we backfill missing values with -1.\n if len(o) < 184:\n o.extend([-1] * (184 - len(o)))\n\n # Add in personal absolute location and tiredness.\n o.extend(player_location[:2])\n o.append(obs['left_team_tired_factor'][player_id])\n\n # Add encoding of team in possession and game mode.\n o.extend({-1: [1, 0, 0], 0: [0, 1, 0], 1: [0, 0, 1]}[obs['ball_owned_team']])\n game_mode = [0] * 7\n game_mode[obs['game_mode']] = 1\n o.extend(game_mode)\n final_obs.append(o)\n\n # Make sure we have backfilled properly.\n assert len(o) == 197\n\n # save object representations if rendering, can remove this later once code trusted\n if self.render_points:\n obj_lists.append(obj_lst)\n\n if self.render_points:\n self._plot_points(obj_lists)\n\n # Ensure all numbers sensible\n final_obs = np.nan_to_num(np.array(final_obs, dtype=np.float32))\n\n return final_obs", "def Cvec(self):\n return vec(self.xc, self.yc)", "def world_coord(self, position, len):\n\n if len > 1:\n x_world = []\n y_world = []\n\n for item in position:\n x_world.append(self.cell_size*item[0]+self.cell_size/2-2)\n y_world.append(self.cell_size*item[1]+self.cell_size/2-6)\n\n else:\n x_world = self.cell_size*position[0]+self.cell_size/2-2\n y_world = self.cell_size*position[1]+self.cell_size/2-6\n\n\n return np.array([x_world, y_world])", "def random_vector_in_unit_ball():\n x = np.random.normal(loc=0.0, scale=1.0, size=(numSamples, self.dim))\n z = np.random.exponential(scale=1.0, size=(numSamples,))\n d = (np.sum(np.square(x), axis=1) + z) ** 0.5\n d = d[:, np.newaxis]\n return x / d", "def _cell_to_global(self, xy, wh):\n # grid setup\n line = tf.range(0, self.num_cells)\n rows = tf.reshape(line, [self.num_cells, 1])\n rows = tf.tile(rows, [1, self.num_cells])\n cols = tf.reshape(line, [1, self.num_cells])\n cols = tf.tile(cols, [self.num_cells, 1])\n grid = tf.stack([cols, rows], axis=-1)\n grid = tf.reshape(grid, [1, self.num_cells, self.num_cells, 1, 2])\n grid = tf.cast(grid, tf.float32)\n # box transformation\n xy += grid\n wh *= tf.reshape(self.anchors, [1, 1, 1, self.num_anchors, 2])\n return tf.concat([xy, wh], axis=-1) / self.num_cells", "def indices_and_currents_TSC_2D( charge_electron, positions_x, positions_y, velocity_x, velocity_y,\\\n x_grid, y_grid, ghost_cells, length_domain_x, length_domain_y, dt ):\n \n \n positions_x_new = positions_x + velocity_x * dt\n positions_y_new = positions_y + velocity_y * dt\n\n base_indices_x = af.data.constant(0, positions_x.elements(), dtype=af.Dtype.u32)\n base_indices_y = af.data.constant(0, positions_x.elements(), dtype=af.Dtype.u32)\n\n dx = af.sum(x_grid[1] - x_grid[0])\n dy = af.sum(y_grid[1] - y_grid[0])\n\n\n # Computing S0_x and S0_y\n ###########################################################################################\n \n # Determining the grid cells containing the respective particles\n \n x_zone = (((af.abs(positions_x - af.sum(x_grid[0])))/dx).as_type(af.Dtype.u32))\n y_zone = (((af.abs(positions_y - af.sum(y_grid[0])))/dy).as_type(af.Dtype.u32))\n\n \n # Determing the indices of the closest grid node in x direction\n\n temp = af.where(af.abs(positions_x-x_grid[x_zone]) < \\\n af.abs(positions_x-x_grid[x_zone + 1])\\\n )\n\n if(temp.elements()>0):\n base_indices_x[temp] = x_zone[temp]\n\n temp = af.where(af.abs(positions_x - x_grid[x_zone]) >= \\\n af.abs(positions_x-x_grid[x_zone + 1])\\\n )\n\n if(temp.elements()>0):\n base_indices_x[temp] = (x_zone[temp] + 1).as_type(af.Dtype.u32) \n\n\n # Determing the indices of the closest grid node in y direction\n\n temp = af.where(af.abs(positions_y-y_grid[y_zone]) < \\\n af.abs(positions_y-y_grid[y_zone + 1])\\\n )\n\n if(temp.elements()>0):\n base_indices_y[temp] = y_zone[temp]\n\n temp = af.where(af.abs(positions_y - y_grid[y_zone])>=af.abs(positions_y-x_grid[y_zone + 1]))\n\n if(temp.elements()>0):\n base_indices_y[temp] = (y_zone[temp] + 1).as_type(af.Dtype.u32) \n\n # Concatenating the index list for near by grid nodes in x direction\n # TSC affect 5 nearest grid nodes around in 1 Dimensions\n\n base_indices_minus_two = (base_indices_x - 2).as_type(af.Dtype.u32) \n base_indices_minus = (base_indices_x - 1).as_type(af.Dtype.u32) \n base_indices_plus = (base_indices_x + 1).as_type(af.Dtype.u32) \n base_indices_plus_two = (base_indices_x + 2).as_type(af.Dtype.u32) \n\n\n\n index_list_x = af.join( 1,\\\n af.join(1, base_indices_minus_two, base_indices_minus, base_indices_x),\\\n af.join(1, base_indices_plus, base_indices_plus_two),\\\n )\n\n\n\n # Concatenating the index list for near by grid nodes in y direction\n # TSC affect 5 nearest grid nodes around in 1 Dimensions\n \n base_indices_minus_two = (base_indices_y - 2).as_type(af.Dtype.u32) \n base_indices_minus = (base_indices_y - 1).as_type(af.Dtype.u32) \n base_indices_plus = (base_indices_y + 1).as_type(af.Dtype.u32) \n base_indices_plus_two = (base_indices_y + 2).as_type(af.Dtype.u32) \n\n\n index_list_y = af.join( 1,\\\n af.join(1, base_indices_minus_two, base_indices_minus, base_indices_y),\\\n af.join(1, base_indices_plus, base_indices_plus_two),\\\n )\n\n # Concatenating the positions_x for determining weights for near by grid nodes in y direction\n # TSC affect 5 nearest grid nodes around in 1 Dimensions\n\n positions_x_5x = af.join( 0,\\\n af.join(0, positions_x, positions_x, positions_x),\\\n af.join(0, positions_x, positions_x),\\\n )\n\n positions_y_5x = af.join( 0,\\\n af.join(0, positions_y, positions_y, positions_y),\\\n af.join(0, positions_y, positions_y),\\\n )\n\n\n\n\n # Determining S0 for positions at t = n * dt\n\n\n distance_nodes_x = x_grid[af.flat(index_list_x)]\n\n distance_nodes_y = y_grid[af.flat(index_list_y)]\n\n\n W_x = 0 * distance_nodes_x.copy()\n W_y = 0 * distance_nodes_y.copy()\n\n\n # Determining weights in x direction\n\n temp = af.where(af.abs(distance_nodes_x - positions_x_5x) < (0.5*dx) )\n\n if(temp.elements()>0):\n W_x[temp] = 0.75 - (af.abs(distance_nodes_x[temp] - positions_x_5x[temp])/dx)**2\n\n temp = af.where((af.abs(distance_nodes_x - positions_x_5x) >= (0.5*dx) )\\\n * (af.abs(distance_nodes_x - positions_x_5x) < (1.5 * dx) )\\\n )\n\n if(temp.elements()>0):\n W_x[temp] = 0.5 * (1.5 - (af.abs(distance_nodes_x[temp] - positions_x_5x[temp])/dx))**2\n\n\n\n # Determining weights in y direction\n\n temp = af.where(af.abs(distance_nodes_y - positions_y_5x) < (0.5*dy) )\n\n if(temp.elements()>0):\n W_y[temp] = 0.75 - (af.abs(distance_nodes_y[temp] - positions_y_5x[temp])/dy)**2\n\n temp = af.where((af.abs(distance_nodes_y - positions_y_5x) >= (0.5*dy) )\\\n * (af.abs(distance_nodes_y - positions_y_5x) < (1.5 * dy) )\\\n )\n\n if(temp.elements()>0):\n W_y[temp] = 0.5 * (1.5 - (af.abs(distance_nodes_y[temp] - positions_y_5x[temp])/dy))**2\n\n # Restructering W_x and W_y for visualization and ease of understanding\n\n W_x = af.data.moddims(W_x, positions_x.elements(), 5)\n W_y = af.data.moddims(W_y, positions_y.elements(), 5)\n\n # Tiling the S0_x and S0_y for the 25 indices around the particle\n \n S0_x = af.tile(W_x, 1, 1, 5)\n S0_y = af.tile(W_y, 1, 1, 5)\n\n\n S0_y = af.reorder(S0_y, 0, 2, 1)\n\n\n\n #Computing S1_x and S1_y\n ###########################################################################################\n\n positions_x_5x_new = af.join( 0,\\\n af.join(0, positions_x_new, positions_x_new, positions_x_new),\\\n af.join(0, positions_x_new, positions_x_new),\\\n )\n\n positions_y_5x_new = af.join( 0,\\\n af.join(0, positions_y_new, positions_y_new, positions_y_new),\\\n af.join(0, positions_y_new, positions_y_new),\\\n )\n\n\n\n\n # Determining S0 for positions at t = n * dt\n\n W_x = 0 * distance_nodes_x.copy()\n W_y = 0 * distance_nodes_y.copy()\n\n\n # Determining weights in x direction\n\n temp = af.where(af.abs(distance_nodes_x - positions_x_5x_new) < (0.5*dx) )\n\n if(temp.elements()>0):\n W_x[temp] = 0.75 - (af.abs(distance_nodes_x[temp] - positions_x_5x_new[temp])/dx)**2\n\n temp = af.where((af.abs(distance_nodes_x - positions_x_5x_new) >= (0.5*dx) )\\\n * (af.abs(distance_nodes_x - positions_x_5x_new) < (1.5 * dx) )\\\n )\n\n if(temp.elements()>0):\n W_x[temp] = 0.5 * (1.5 - (af.abs(distance_nodes_x[temp] \\\n - positions_x_5x_new[temp])/dx\\\n )\\\n )**2\n\n\n\n # Determining weights in y direction\n\n temp = af.where(af.abs(distance_nodes_y - positions_y_5x_new) < (0.5*dy) )\n\n if(temp.elements()>0):\n W_y[temp] = 0.75 - (af.abs(distance_nodes_y[temp] \\\n - positions_y_5x_new[temp]\\\n )/dy\\\n )**2\n\n temp = af.where((af.abs(distance_nodes_y - positions_y_5x_new) >= (0.5*dy) )\\\n * (af.abs(distance_nodes_y - positions_y_5x_new) < (1.5 * dy) )\\\n )\n\n if(temp.elements()>0):\n W_y[temp] = 0.5 * (1.5 - (af.abs(distance_nodes_y[temp] \\\n - positions_y_5x_new[temp])/dy\\\n )\\\n )**2\n\n # Restructering W_x and W_y for visualization and ease of understanding\n\n W_x = af.data.moddims(W_x, positions_x.elements(), 5)\n W_y = af.data.moddims(W_y, positions_x.elements(), 5)\n\n # Tiling the S0_x and S0_y for the 25 indices around the particle \n \n S1_x = af.tile(W_x, 1, 1, 5)\n S1_y = af.tile(W_y, 1, 1, 5)\n\n S1_y = af.reorder(S1_y, 0, 2, 1)\n\n\n ###########################################################################################\n\n # Determining the final weight matrix for currents in 3D matrix form factor\n\n\n W_x = (S1_x - S0_x) * (S0_y + (0.5 *(S1_y - S0_y)) )\n\n\n W_y = (S1_y - S0_y) * (S0_x + (0.5 *(S1_x - S0_x)) )\n\n\n ###########################################################################################\n\n\n # Assigning Jx and Jy according to Esirkepov's scheme\n\n Jx = af.data.constant(0, positions_x.elements(), 5, 5, dtype = af.Dtype.f64)\n Jy = af.data.constant(0, positions_x.elements(), 5, 5, dtype = af.Dtype.f64)\n\n\n Jx[:, 0, :] = -1 * charge_electron * (dx/dt) * W_x[:, 0, :].copy()\n Jx[:, 1, :] = Jx[:, 0, :] + -1 * charge_electron * (dx/dt) * W_x[:, 1, :].copy()\n Jx[:, 2, :] = Jx[:, 1, :] + -1 * charge_electron * (dx/dt) * W_x[:, 2, :].copy()\n Jx[:, 3, :] = Jx[:, 2, :] + -1 * charge_electron * (dx/dt) * W_x[:, 3, :].copy()\n Jx[:, 4, :] = Jx[:, 3, :] + -1 * charge_electron * (dx/dt) * W_x[:, 4, :].copy()\n \n # Computing current density using currents\n \n Jx = (1/(dx * dy)) * Jx\n\n\n Jy[:, :, 0] = -1 * charge_electron * (dy/dt) * W_y[:, :, 0].copy()\n Jy[:, :, 1] = Jy[:, :, 0] + -1 * charge_electron * (dy/dt) * W_y[:, :, 1].copy()\n Jy[:, :, 2] = Jy[:, :, 1] + -1 * charge_electron * (dy/dt) * W_y[:, :, 2].copy()\n Jy[:, :, 3] = Jy[:, :, 2] + -1 * charge_electron * (dy/dt) * W_y[:, :, 3].copy()\n Jy[:, :, 4] = Jy[:, :, 3] + -1 * charge_electron * (dy/dt) * W_y[:, :, 4].copy()\n \n # Computing current density using currents\n\n Jy = (1/(dx * dy)) * Jy\n\n # Preparing the final index and current vectors\n ###########################################################################################\n \n \n # Determining the x indices for charge deposition\n index_list_x_Jx = af.flat(af.tile(index_list_x, 1, 1, 5))\n\n # Determining the y indices for charge deposition\n y_current_zone = af.tile(index_list_y, 1, 1, 5)\n index_list_y_Jx = af.flat(af.reorder(y_current_zone, 0, 2, 1))\n\n\n currents_Jx = af.flat(Jx)\n\n # Determining the x indices for charge deposition\n index_list_x_Jy = af.flat(af.tile(index_list_x, 1, 1, 5))\n\n # Determining the y indices for charge deposition\n y_current_zone = af.tile(index_list_y, 1, 1, 5)\n index_list_y_Jy = af.flat(af.reorder(y_current_zone, 0, 2, 1))\n \n # Flattenning the Currents array\n currents_Jy = af.flat(Jy)\n\n af.eval(index_list_x_Jx, index_list_y_Jx)\n af.eval(index_list_x_Jy, index_list_y_Jy)\n af.eval(currents_Jx, currents_Jy)\n\n\n return index_list_x_Jx, index_list_y_Jx, currents_Jx,\\\n index_list_x_Jy, index_list_y_Jy, currents_Jy", "def grid_inflation(self):\n for obs in self.obstacle_list:\n\n inflation_x1 = round((obs[0][0]-self._inflation_radius)/self.step_size)\n\n inflation_y2 = round((obs[0][1] + obs[2] +self._inflation_radius)/self.step_size)\n\n inflation_x2 = round((obs[0][0] + obs[1] +self._inflation_radius)/self.step_size)\n\n inflation_y1 = round((obs[0][1] -self._inflation_radius)/self.step_size)\n\n self.grid[1, inflation_x1:inflation_x2+1,\n inflation_y1:inflation_y2+1] = INFLATION_COST\n\n # border inflation\n self.grid[1, 0:self.gridwidth+1, 0:round(self._inflation_radius/self.step_size)+1] = INFLATION_COST\n self.grid[1, 0:self.gridwidth+1, self.gridheight-round(self._inflation_radius / self.step_size):self.gridheight+1] = INFLATION_COST\n self.grid[1, 0:round(self._inflation_radius/self.step_size)+1, 0:self.gridheight+1] = INFLATION_COST\n self.grid[1, self.gridwidth-round(self._inflation_radius/self.step_size):self.gridwidth+1, 0:self.gridheight+1] = INFLATION_COST\n\n # if NEED_DRAW_INFLATED_GRID:\n # for i in range(self.gridwidth):\n # plt.scatter(i,0)\n # plt.scatter(i,self.gridheight)\n # for j in range(self.gridheight):\n # plt.scatter(0,j)\n # plt.scatter(self.gridwidth,j)\n # if self.grid[i, j] != 0:\n # plt.scatter(i,j)\n # plt.show()\n\n return self.grid", "def update(self, X: np.ndarray):\n\n self._num_items += 1\n\n if isinstance(X, int) or isinstance(X, float):\n X = np.array([X])\n self._is_uni = True\n elif isinstance(X, np.ndarray):\n X = np.array([X]).flatten()\n if len(X) == 1:\n self._is_uni = True\n else:\n self._is_uni = False\n else:\n raise NotImplementedError(\"Only support int, float and np.ndarray\")\n\n if self._is_global:\n\n tmp = defaultdict(float)\n\n for index, item in enumerate(X):\n self._max[index] = (\n self._max[index] if self._max[index] > item else item\n )\n self._min[index] = (\n self._min[index] if self._min[index] < item else item\n )\n self._sum[index] += X[index]\n old_mean = self._mean[index]\n tmp[index] = item - self._mean[index]\n self._mean[index] = self._sum[index] / self._num_items\n self._sum_squares[index] += (X[index] - old_mean) * (\n X[index] - self._mean[index]\n )\n self._var[index] = self._sum_squares[index] / self._num_items\n self._std[index] = math.sqrt(self._var[index])\n else:\n self._window.append(X)", "def calc(self):\n np = 0\n for cell in self.cells:\n n = self.cell_np[cell]\n np += n\n self.dnp = np - self.np\n self.np = np", "def initial_vector(self):\n\n return asarray([np.random.uniform(l, u) for l, u in self.bounds])", "def vector(self):\n return self.__vector", "def c(self) -> np.ndarray:\n return self._vector[10:12]", "def compute_local_statistic(self, x_win):\n delta_mu = np.mean(x_win, axis=1) - self._mu_0.reshape(1, -1)\n # d2_ = np.sum(delta_mu.dot(self._s2_0inv) * delta_mu, axis=1)\n d2 = np.einsum('ij,ij->i', delta_mu.dot(self._s2_0inv), delta_mu)\n d2 /= (1. / self._training_sample_size + 1. / self.window_size)\n return d2", "def update(self, x):\n with torch.no_grad():\n x_channel_first = x.to(self.dtype).transpose(0, 1).reshape(x.size(1), -1)\n new_count = torch.full_like(self._count, x.size(0)).float()\n new_mean = x_channel_first.mean(-1)\n\n if self._distributed:\n distrib.all_reduce(new_count)\n\n new_mean = new_mean.float()\n distrib.all_reduce(new_mean)\n\n # msg = torch.cat([new_mean, new_count.unsqueeze(-1)])\n # distrib.all_reduce(msg)\n # new_mean = msg[0:-1]\n # new_count = msg[-1]\n\n new_mean /= distrib.get_world_size()\n\n # new_var = (x_channel_first - new_mean.view(x.size(1), -1)).pow(2).mean(-1)\n new_var = (\n x_channel_first - new_mean.view(x.size(1), -1).type_as(x)\n ).pow(2).mean(-1, keepdim=True).float()\n\n if self._distributed:\n distrib.all_reduce(new_var)\n new_var /= distrib.get_world_size()\n new_var = new_var.view(self._shape)\n new_mean = new_mean.view(self._shape).float()\n\n self._mean, self._var, self._count = welford_update(\n self._mean, self._var, self._count, new_mean, new_var, new_count,\n )", "def _get_slice_to_world(self) :\n \n return self._slice_to_world", "def updateWorld(self):\n pass", "def vector(molec, dihed, nonH, energy):\n #Torison\n if dihed:\n pass\n #XYZ\n else:\n coords = ()\n if nonH:\n for atom in molec.atoms:\n coords += atom.coords\n else:\n for atom in molec.atoms:\n if atom.atomicnum > 1:\n coords += atom.coords\n #Energy\n if energy:\n coords += (molec.energy/10.0,)\n return coords", "def get_observation(world_state,weather,time_now):\n obs = np.zeros((2, OBS_SIZE, OBS_SIZE))\n object_list=[]\n while world_state.is_mission_running:\n time.sleep(0.1)\n world_state = agent_host.getWorldState()\n \n if len(world_state.errors) > 0:\n raise AssertionError('Could not load grid.')\n\n if world_state.number_of_observations_since_last_state > 0:\n # First we get the json from the observation API\n msg = world_state.observations[-1].text\n observations = json.loads(msg)\n # Get observation\n if weather != 'clear':\n object_list.append(weather)\n else:\n object_list.append('sunny')\n\n if time_now>13000:\n object_list.append(\"night\")\n else:\n object_list.append(\"morning\")\n grid = observations['floorAll']\n \n if grid.count('water')>3:\n object_list.append(\"river\")\n if grid.count('leaves')>2:\n object_list.append(\"tree\")\n if grid.count('lava')>1:\n object_list.append(\"lava\")\n \n animal=observations['NearbyEntities']\n target_ani=['Sheep','Cow','Pig']\n for i in animal:\n if i['name'] not in object_list and i['name'] in target_ani:\n object_list.append(i['name'])\n break\n \n \n return object_list", "def update(self, observations: Observations, action: CARLAAction,\n reward: float, new_observations: Observations, *args: Any,\n **kwargs: Any) -> None:\n self.value += np.linalg.norm( # Euclidean distance in meters\n x=new_observations[\"location\"] - observations[\"location\"],\n ord=2,\n )", "def vec_coords(label_coords, LAMBDA=1, spacing=1):\n\n #LAMBDA = 1\n #SPACING = 8\n SPACING = spacing\n \n coords_pial = np.array(label_coords[0])\n coords_gwb = np.array(label_coords[6]) #[::SPACING]\n\n\n ##### Normal Vector Pial\n #derivatives and velocity\n x_der = np.gradient(coords_pial[:,0])\n y_der = np.gradient(coords_pial[:,1]) #col slicing, R, np.array, [:,0]\n velo = np.array([[x_der[i], y_der[i]] for i in range(x_der.size)])\n\n #displacement, tangent\n displ = np.sqrt( x_der * x_der + y_der * y_der ) #speed, time\n tang = np.array([1/displ] *2 ).transpose() * velo\n\n #outward point surface normal, from tang flip, make first neg, opv\n pial_normal = [ [y*-1, x] for x, y in zip(tang[:,0], tang[:,1]) ]\n\n\n ##### Normal Vector GWB\n #derivatives and velocity\n x_der = np.gradient(coords_gwb[:,0])\n y_der = np.gradient(coords_gwb[:,1]) \n velo = np.array([[x_der[i], y_der[i]] for i in range(x_der.size)])\n\n #displacement, tangent\n displ = np.sqrt( x_der * x_der + y_der * y_der ) \n tang = np.array([1/displ] *2 ).transpose() * velo\n\n #outward point surface normal, owv\n gwb_normal = [ [y*-1, x] for x, y in zip(tang[:,0], tang[:,1]) ]\n\n\n\n plot_coords_lst = []\n used_energy_lst = []\n ##### FIND ENERGY\n # for each coord on the pial surface, x\n for x in range(len(coords_pial)):\n pial = coords_pial[x]\n \n #find vector pial to gwb, unit length, tv\n if x == 0:\n min_energy = []\n normal_term_lst = []\n vec_dist_lst = []\n parallel_term_lst = []\n vec_dist_lst = []\n for v in range(len(coords_gwb)):\n #find vector distance from pial to gwb\n gwb = coords_gwb[v]\n vec_pial_gwb = np.array(gwb) - np.array(pial)\n vec_mag = np.array(vec_pial_gwb[0]**2 + vec_pial_gwb[1]**2)\n unit_vec_dist = vec_pial_gwb/vec_mag\n vec_dist_lst.append(unit_vec_dist)\n\n #find dot product for tv and owhite, tv and opial\n dot_prod1 = np.dot(vec_dist_lst[v], gwb_normal[v])\n dot_prod2 = np.dot(vec_dist_lst[v], pial_normal[x])\n\n #normal term for each v\n normal_term_v = (1 - np.abs(dot_prod1)) + (1 - np.abs(dot_prod2))\n normal_term_lst.append(normal_term_v)\n\n #parallel term for each v \n # if x == 0:\n \n #find dot product, using self distance\n dot_prod3 = np.dot(vec_dist_lst[v], vec_dist_lst[v])\n parallel_term_v = (1 - np.abs(dot_prod3))\n parallel_term_lst.append(parallel_term_v)\n \n #energy, no summation\n ind_energy = list(enumerate(np.array([((1-LAMBDA)*n) + (LAMBDA*p) for n, p in \\\n zip(normal_term_lst, parallel_term_lst)]).T))\n \n #find local minima energy\n for i in range(len(ind_energy)):\n curr = ind_energy[i]\n fut = ind_energy[i+1]\n if fut[1] > curr[1]:\n min_energy.append(curr)\n used_energy_lst.append(curr)\n break\n\n # append coordinates to plot straight vector from pial to gwb, min energy\n gwb_idx = min_energy.pop()[0]\n # gwb_idx = min_energy[-1][0]\n plot_coords_lst.append([pial, list(coords_gwb[gwb_idx])])\n\n elif x > 0:\n min_energy = []\n normal_term_lst = []\n vec_dist_lst = []\n parallel_term_lst = []\n vec_dist_lst = []\n \n \n # used_start = int(used_energy_lst[-1][0])+20\n used_start = used_energy_lst[-1][0]\n\n for v in list( range(used_start, len(coords_gwb)-1) ):\n #find vector distance from pial to gwb\n gwb = coords_gwb[v]\n vec_pial_gwb = np.array(gwb) - np.array(pial)\n vec_mag = np.array(vec_pial_gwb[0]**2 + vec_pial_gwb[1]**2)\n unit_vec_dist = vec_pial_gwb/vec_mag\n vec_dist_lst.append(unit_vec_dist)\n\n #find dot product for tv and owhite, tv and opial\n dot_prod1 = np.dot(vec_dist_lst[-1], gwb_normal[v])\n dot_prod2 = np.dot(vec_dist_lst[-1], pial_normal[x])\n\n #normal term for each v\n normal_term_v = (1 - np.abs(dot_prod1)) + (1 - np.abs(dot_prod2))\n normal_term_lst.append(normal_term_v)\n\n #parallel term for each v \n #find dot product, using neighbour vector distance\n knear_vec_dist = np.array(plot_coords_lst[-1][1]) - np.array(plot_coords_lst[-1][0])\n dot_prod3 = np.dot(vec_dist_lst[-1], knear_vec_dist)\n parallel_term_v = (1 - np.abs(dot_prod3))\n parallel_term_lst.append(parallel_term_v) \n\n #energy, no summation\n ind_energy = list( enumerate(np.array([ ((1-LAMBDA)*n) + (LAMBDA*p) for n, p in \\\n zip(normal_term_lst, parallel_term_lst)]).T, used_energy_lst[-1][0])) #v\n\n #find local minima energy, and associated coordinate\n for i in range(len(ind_energy)):\n try:\n curr = ind_energy[i]\n fut = ind_energy[i+1]\n except(IndexError):\n continue\n \n if fut[1] > curr[1]:\n min_energy.append(curr)\n used_energy_lst.append(curr)\n # print(\"curr energy = \", curr)\n break\n\n try:\n gwb_idx = min_energy.pop()[0] #+ 20 #atleast deltaX apart\n plot_coords_lst.append([pial, list(coords_gwb[gwb_idx])])\n # print(\"energy coordinates = \", list( map(list, [pial, coords_gwb[gwb_idx]])) )\n except(IndexError):\n continue\n\n\n \"\"\"\n #encourage atleast one space between each end point coordinate\n energy_idx = [i[0] for i in used_energy_lst]\n new_energy_idx = []\n energy_idx_cp = energy_idx.copy()\n\n count = 0\n same_count = 0\n # loop to remove repeat indices, makes list two short\n while count < len(energy_idx):\n energy_concat = []\n i = count\n curr = energy_idx_cp[i]\n if energy_idx_cp[i] not in new_energy_idx:\n new_energy_idx.append(curr)\n same_count = 0\n else: \n energy_idx_cp = energy_idx_cp[:i] + list((np.array(energy_idx_cp[i:]) \\\n + same_count))\n\n same_count+=1\n \n count+=1\n \"\"\"\n\n\n #encourage even space between each end point coordinate\n energy_idx = [i[0] for i in used_energy_lst]\n new_energy_idx = list(map(math.floor , np.linspace(energy_idx[0] , \\\n len(coords_gwb[energy_idx[0]: len(coords_gwb)]), num=len(energy_idx)))) \n\n # new_plot_coords_lst = [[list(i[0]), list(coords_gwb[j])] for i, j in \\\n # zip(plot_coords_lst, new_energy_idx)]\n\n new_plot_coords_lst = []\n for i, j in zip(plot_coords_lst, new_energy_idx):\n try:\n pial_gwb_plot = [list(i[0]), list(coords_gwb[j])]\n new_plot_coords_lst.append(pial_gwb_plot) \n except(IndexError):\n continue\n\n #space vectors according to SPACING var\n new_plot_coords_lst = new_plot_coords_lst[::SPACING] \n\n return(new_plot_coords_lst)", "def get_fitness_vector(self):\r\n vector = list()\r\n \r\n for particle in self.population: \r\n vector.append(particle.current_fitness)\r\n \r\n return vector", "def get_observation(self, physics):\n obs = collections.OrderedDict()\n render_kwargs = {}\n render_kwargs['camera_id'] = 0\n render_kwargs['width'] = W\n render_kwargs['height'] = W\n image = physics.render(**render_kwargs)\n self.image=image\n\n if self._random_location or self._maxq:\n one_hot = np.zeros(4).astype('float32')\n one_hot[self._current_loc] = 1\n obs['location'] = np.tile(one_hot, 50).reshape(-1).astype('float32')\n\n if not self._pixels_only:\n obs['position'] = physics.data.geom_xpos[5:,:].reshape(-1).astype('float32')\n\n return obs", "def vec(self):\r\n\r\n xv = np.arange(self.dx / 2, self.lx, self.dx)\r\n yv = np.arange(-self.ly / 2 + self.dy / 2, self.ly / 2, self.dy)\r\n zv = np.arange(self.oz, self.lz + self.oz, self.dz)\r\n\r\n if self.ox != 0:\r\n xv = np.arange(self.ox, self.lx + self.ox, self.dx)\r\n yv = np.arange(self.oy, self.ly + self.oy, self.dy)\r\n zv = np.arange(self.oz, self.lz + self.oz, self.dz)\r\n\r\n return xv, yv, zv", "def get_feature_vector(self, mode=\"binary\"):\n voxel_n = np.ravel_multi_index([self.voxel_x, self.voxel_y, self.voxel_z], self.x_y_z)\n if mode == \"binary\":\n vector = np.zeros(self.n_voxels)\n vector[np.unique(voxel_n)] = 1\n vector = vector.reshape(self.x_y_z)\n return vector\n\n elif mode == \"binary_with_nopoints\":\n vector = np.zeros(self.n_voxels)\n vector[np.unique(voxel_n)] = 1\n vector = vector.reshape(self.x_y_z)\n tot_bounds = abs(self.bounds[0]) + abs(self.bounds[1])\n # TODO can be parallelised\n non_points = []\n for point in self.points_inside_bounds:\n start, end = get_points_from_bounds(self.bounds[0], self.bounds[1], self.origin, point)\n start_projected_voxelgrid = (start - self.bounds[0])\n end_projected_voxelgrid = (end - self.bounds[0])\n\n assert np.all(start_projected_voxelgrid + PRECISION >= 0), 'Start / end point for nopoints calculation out of bounds: {} / {}'.format(start_projected_voxelgrid + PRECISION, tot_bounds)\n assert np.all(end_projected_voxelgrid + PRECISION >= 0), 'Start / end point for nopoints calculation out of bounds: {} / {}'.format(end_projected_voxelgrid + PRECISION, tot_bounds)\n assert np.all(start_projected_voxelgrid - PRECISION <= tot_bounds), 'Start / end point for nopoints calculation out of bounds: {} / {}'.format(start_projected_voxelgrid, tot_bounds)\n assert np.all(end_projected_voxelgrid - PRECISION <= tot_bounds), 'Start / end point for nopoints calculation out of bounds: {} / {}'.format(end_projected_voxelgrid, tot_bounds)\n\n start_projected_voxelgrid = np.clip(start_projected_voxelgrid, 0, tot_bounds - PRECISION)\n end_projected_voxelgrid = np.clip(end_projected_voxelgrid, 0, tot_bounds - PRECISION)\n\n new_non_points = list(supercover_line(start_projected_voxelgrid, end_projected_voxelgrid, self.sizes))\n non_points.extend(new_non_points)\n # if not np.all(np.array(new_non_points) >= 0) or not np.all(np.array(new_non_points).max(axis=0) < vector.shape):\n # print('Non-point detected with indices under 0 or over size')\n # print('start = {}'.format(start_projected_voxelgrid))\n # print('end = {}'.format(end_projected_voxelgrid))\n # print('Max Size: {}'.format(vector.shape))\n # print('Wrong points:')\n # print(np.array(new_non_points))\n # raise Exception()\n\n # convert only cells that are 0 to -1, NOT 1 to -1\n non_points = np.unique(np.array(non_points), axis=0).astype(int)\n\n temp = vector[non_points[:, 0], non_points[:, 1], non_points[:, 2]]\n temp[temp == 0] = -1\n vector[non_points[:, 0], non_points[:, 1], non_points[:, 2]] = temp\n return vector\n elif mode == \"density\":\n vector = np.zeros(self.n_voxels)\n count = np.bincount(voxel_n)\n vector[:len(count)] = count\n vector /= len(voxel_n)\n vector = vector.reshape(self.x_y_z)\n return vector\n # elif mode == \"TDF\":\n # vector = np.zeros(self.n_voxels)\n # # truncation = np.linalg.norm(self.shape)\n # kdt = cKDTree(self.points_inside_bounds)\n # vector, i = kdt.query(self.voxel_centers, n_jobs=-1)\n # vector = vector.reshape(self.x_y_z)\n # return vector\n elif mode.endswith(\"_max\"):\n vector = np.zeros(self.n_voxels)\n if not is_numba_avaliable:\n raise ImportError(\"numba is required to compute {}\".format(mode))\n axis = {\"x_max\": 0, \"y_max\": 1, \"z_max\": 2}\n vector = groupby_max(self.points_inside_bounds, voxel_n, axis[mode], vector)\n vector = vector.reshape(self.x_y_z)\n return vector\n elif mode.endswith(\"_mean\"):\n vector = np.zeros(self.n_voxels)\n if not is_numba_avaliable:\n raise ImportError(\"numba is required to compute {}\".format(mode))\n axis = {\"x_mean\": 0, \"y_mean\": 1, \"z_mean\": 2}\n voxel_sum = groupby_sum(self.points_inside_bounds, voxel_n, axis[mode], np.zeros(self.n_voxels))\n voxel_count = groupby_count(self.points_inside_bounds, voxel_n, np.zeros(self.n_voxels))\n vector = np.nan_to_num(voxel_sum / voxel_count)\n vector = vector.reshape(self.x_y_z)\n return vector\n\n else:\n raise NotImplementedError(\"{} is not a supported feature vector mode\".format(mode))", "def observation_space(self):\n\n return Box(low=np.array(self.observation_low), high=np.array(self.observation_high), dtype=np.float32)", "def vector(self) -> Vector:\n return self._normal * self._distance_from_origin", "def update_world_points(self, wpSet):\r\n self.world_points = np.empty((0, 5))\r\n for i, row in wpSet.correspondences.iterrows():\r\n Ids = row['ViewId']\r\n if self.id in Ids:\r\n point_2D = row['FeatureIndex'][Ids.index(self.id)]\r\n point_3D = wpSet.world_points[i]\r\n self.world_points = np.append(self.world_points, [np.hstack((point_2D, point_3D))], axis=0)", "def observation_space(self):\n raise NotImplementedError", "def create_observation(self):", "def create_observation(self):", "def update_positions(self, grid):\r\n self.grid = grid", "def update(self):\n self.setVector(0.15, 0.0)", "def VectorObserver(vector, ofdate):\n gast = _sidereal_time(vector.t)\n ovec = [vector.x, vector.y, vector.z]\n if not ofdate:\n ovec = _precession(ovec, vector.t, _PrecessDir.From2000)\n ovec = _nutation(ovec, vector.t, _PrecessDir.From2000)\n return _inverse_terra(ovec, gast)", "def update(self, delta):\n # Computes new positions\n for part in self.particles:\n part.set_xyvxvy(self.runge_kutta(part.to_y(), 0, delta))", "def xvec(self):\n return np.array([self.x, self.y])", "def value_iteration_on_grid_world() -> PolicyAndValueFunction:\n result = get_value_iteration(grid_world, 0.99, 0.01)\n export_to_json(result.pi, 'value_iteration_grid_world')\n return result", "def update(self) -> np.ndarray:\r\n raise NotImplementedError(\"`update` method is not implemented!\")", "def _compute_world_params(self) -> None:\n\n self.states = []\n for row in range(self.grid_height):\n for col in range(self.grid_width):\n cell = row * self.grid_width + col\n cell_type = self.grid[cell]\n\n possible_actions = {\n Action.up: self._get_action(max(row - 1, 0) * self.grid_width + col),\n Action.down: self._get_action(min(row + 1, self.grid_height - 1) * self.grid_width + col),\n Action.right: self._get_action(row * self.grid_width + min(col + 1, self.grid_width - 1)),\n Action.left: self._get_action(row * self.grid_width + max(col - 1, 0))\n }\n\n self.states.append(State(cell, possible_actions, cell_type))", "def _DisplaceCoords(self, disp_mag, disp_vector):\n for i in range(self.mol.n_atoms):\n for j in range(const.NUMDIM):\n self.mol.atoms[i].coords[j] += disp_mag * disp_vector[i][j]\n self.mol.UpdateInternals()", "def global_coords(self) -> GlobalCoordsABC:", "def compute_step(self, step):\r\n self.position += step * self.velocity\r\n self.solpos.append(np.copy(self.position)) \r\n self.solvel.append(np.copy(self.velocity)) \r\n self.solvel_mag.append(np.linalg.norm(np.copy(self.velocity)))", "def update(self):\n self.x += self.vx\n self.y += self.vy", "def covar(self):\n wwt = self.ww.copy()\n wwt[self.ww>0] = 1.0/self.ww[self.ww>0]\n covar = np.zeros((self.nstar,self.nstar),dtype=self.ww.dtype)\n for i in range(self.nstar):\n for j in range(i+1):\n covar[i,j] = np.sum(wwt * self.vv[:,i] * self.vv[:,j])\n covar[j,i] = covar[i,j]\n return covar", "def update_z(self):\n w_actual = self.w_estimates[-1]\n # update the underlying linear model\n self.linear_model.w = w_actual\n gamma = np.divide(abs(w_actual), np.sqrt(self.z.value.T)).T\n Gamma_diag = np.zeros((gamma.shape[0], gamma.shape[0]), float)\n np.fill_diagonal(Gamma_diag, gamma)\n Sigma_y = self.lambda_param * np.eye(\n self.linear_model.data_num\n ) + self.linear_model.dict_mtx @ Gamma_diag @ np.transpose(\n self.linear_model.dict_mtx\n )\n\n self.z.value = np.diag(\n np.transpose(self.linear_model.dict_mtx)\n @ np.linalg.pinv(Sigma_y, hermitian=True)\n @ self.linear_model.dict_mtx\n )\n self.z_estimates.append(self.z.value)\n self.gamma_estimates.append(gamma)\n # print(f'state: {self.state_name} Sigma_y cond: {np.linalg.cond(np.linalg.pinv(Sigma_y, hermitian=True))} gamma: {np.divide(np.abs(w_actual),np.sqrt(self.z.value))}')", "def _update_grid_map_from_world_cord(self, world_cords_xy):\n # find occupancy map cords\n # self.logger.debug(f\"Updating Grid Map: {np.shape(world_cords_xy)}\")\n occu_cords = self.cord_translation_from_world(\n world_cords_xy=world_cords_xy)\n # self.logger.debug(f\"Occupancy Grid Map Cord shape = {np.shape(occu_cords)}\")\n occu_cords_x, occu_cords_y = occu_cords[:, 0], occu_cords[:, 1]\n min_occu_cords_x, max_occu_cords_x = np.min(occu_cords_x), np.max(occu_cords_x)\n min_occu_cords_y, max_occu_cords_y = np.min(occu_cords_y), np.max(occu_cords_y)\n self.map[min_occu_cords_y: max_occu_cords_y, min_occu_cords_x:max_occu_cords_x] -= 0.01\n self.map[occu_cords_y, occu_cords_x] += self.occu_prob\n self.map[min_occu_cords_y: max_occu_cords_y,\n min_occu_cords_x:max_occu_cords_x] = \\\n self.map[min_occu_cords_y: max_occu_cords_y, min_occu_cords_x:max_occu_cords_x].clip(0, 1)", "def cxvec(self):\n return np.array([self.cx, self.cy])", "def vel_inicial(x): #Velocidad inicial como un vector de ceros\r\n return np.zeros_like(x)", "def _as_vector(self):\n return np.hstack([self.global_parameters, self.weights])", "def potential_new_obs(self) -> Iterable[GriddedPerm]:", "def gen_occupation_vector(state, states):\n\n # initialise vector of occupation numbers\n occ_vector = np.zeros([len(states[0])])\n\n # we square the weight because one contribution from bra and one from ket\n for i, istate in enumerate(states):\n occ_vector += (abs(state[i])**2)*istate\n\n return occ_vector", "def update_task_location_vector(self):\n for counter, task in enumerate(self.tasks):\n location = task.getloc()\n if location[0] == 0:\n vectorized_task_loc = location[1]\n elif location[0] == 1:\n vectorized_task_loc = 4 + location[1]\n elif location[0] == 2:\n vectorized_task_loc = 8 + location[1]\n else: # location[0] == 3\n vectorized_task_loc = 12 + location[1]\n self.how_many_tasks_in_each_square[0][vectorized_task_loc] += 1\n self.task_locations[0][counter] = vectorized_task_loc\n # print(location)\n # print(self.how_many_tasks_in_each_square)" ]
[ "0.6230598", "0.6210779", "0.6134541", "0.6021939", "0.5974683", "0.5839655", "0.58316547", "0.58069307", "0.57648754", "0.56969017", "0.56762373", "0.56254745", "0.5595447", "0.5594612", "0.55772495", "0.55599743", "0.5549346", "0.5541184", "0.5500111", "0.54753816", "0.54575694", "0.54378617", "0.5437749", "0.543576", "0.54248244", "0.5398019", "0.5393093", "0.53733325", "0.5362315", "0.5358641", "0.5356128", "0.5353669", "0.5348433", "0.5342727", "0.5342363", "0.5337947", "0.5334707", "0.53222984", "0.5320647", "0.5320647", "0.5320201", "0.53196865", "0.5315955", "0.530282", "0.530256", "0.5296826", "0.52916783", "0.52875406", "0.52859193", "0.5284578", "0.5281102", "0.52756035", "0.52665097", "0.52647173", "0.5256665", "0.52561206", "0.5255023", "0.5251492", "0.5244986", "0.5235113", "0.5225805", "0.52240145", "0.5221279", "0.5219523", "0.5216745", "0.5214186", "0.5212776", "0.5212463", "0.52120394", "0.52109087", "0.5204885", "0.52003783", "0.5191133", "0.5188758", "0.51855266", "0.5185201", "0.5174562", "0.5173225", "0.51698214", "0.51698214", "0.51652527", "0.5162812", "0.51614475", "0.5154615", "0.5153857", "0.51516825", "0.5150201", "0.5148265", "0.5140415", "0.5136987", "0.5134146", "0.5133797", "0.51177585", "0.5115378", "0.5107977", "0.51031554", "0.5098936", "0.50962627", "0.50897044", "0.5077212", "0.50767356" ]
0.0
-1
Check move possibility using observation window
def is_move_possible(obs_window, displacement): pos_row = obs_window.shape[0] // 2 pos_col = obs_window.shape[1] // 2 new_pos_row = pos_row + displacement[0] new_pos_col = pos_col + displacement[1] is_traversable = obs_window[new_pos_row, new_pos_col] == 0 is_shortcut = obs_window[new_pos_row, pos_col] or obs_window[pos_row, new_pos_col] return is_traversable and (not is_shortcut)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _ismoving(self):\n return self.dp.state()==PyTango.DevState.MOVING", "def move_window():\n\tif SLIDING_WINDOW:\n\t\t# get the chosen predicates\n\t\tpred = Predicate.objects.filter(pk__in=[p+1 for p in toggles.CHOSEN_PREDS])\n\n\t\t# handle window properties\n\t\tfor p in pred:\n\t\t\tp.move_window()", "def test_window():\n # Generate observations with random times\n timeline = random_timed_observation_timeline()\n\n # Defaults to one hour\n for window in timeline.windows():\n # Gotta be a tuple, though we don't know the length\n assert isinstance(window, tuple)\n assert len(window) > 0\n\n # Check the types\n for o in window:\n assert isinstance(o, Observation)\n\n # Double check that Observations in the window are sorted (for fun)\n for o1, o2 in zip(window, window[1:]):\n assert o1 < o2\n\n # Make sure each member is within an hour of the first.\n # We know they're sorted, so just check first and last.\n assert (window[0].time + timedelta(hours=1)) > window[-1].time", "def ismoving(self):\n return not self.get_par(\"done_moving\")", "def is_moving(self):\n is_moving = self.get_raw_status() & self.STATUS_MOVING\n return bool(is_moving)", "def does_move_win(self, x, y):\n me = self.board[x][y]\n for (dx, dy) in [(0, +1), (+1, +1), (+1, 0), (+1, -1)]:\n p = 1\n while self.is_on_board(x+p*dx, y+p*dy) and self.board[x+p*dx][y+p*dy] == me:\n p += 1\n n = 1\n while self.is_on_board(x-n*dx, y-n*dy) and self.board[x-n*dx][y-n*dy] == me:\n n += 1\n\n if p + n >= (self.connect + 1): # want (p-1) + (n-1) + 1 >= 4, or more simply p + n >- 5\n return True\n\n return False", "def CheckPaneMove(self, pane):\r\n\r\n win_rect = pane.frame.GetRect()\r\n win_rect.x, win_rect.y = pane.floating_pos\r\n \r\n if win_rect == self._last_rect:\r\n return False\r\n\r\n # skip the first move event\r\n if self._last_rect.IsEmpty():\r\n self._last_rect = wx.Rect(*win_rect)\r\n return False\r\n\r\n # skip if moving too fast to avoid massive redraws and\r\n # jumping hint windows\r\n if abs(win_rect.x - self._last_rect.x) > 10 or \\\r\n abs(win_rect.y - self._last_rect.y) > 10:\r\n self._last_rect = wx.Rect(*win_rect)\r\n return False\r\n\r\n return True", "def is_moving(self):\n return self.steps < self.max_steps", "def move_valid(move):\n return True", "def is_moving(self):\n return self.gripper_io.get_signal_value(\"is_moving\")", "def _ispinnedmove(self, from_, to_):\n return False", "def is_moving(self):\n response = self.__send_and_receive(protocol.GET_IS_MOVE)\n value = self.__gen_response_value(response)\n if value:\n # printf(\"\".join(value[1:]))\n if \"\".join(value)[1:] == \"1\":\n return True\n else:\n return False\n else:\n return False", "def is_new_move(my_board, x, y):\n return my_board[x, y] == CLOSED", "def is_moving(self) -> bool:\n return self.orders and self.orders[0].ability.id is AbilityId.MOVE", "def _onmove(self, event):", "def has_moved(self):\n return self.move_count > 0", "def _check_window(x: int, y: int, z: int) -> bool:\n return (x + y) == z", "def time_to_move(self):\r\n if int(self.pix_pos.x+TOP_BOTTOM_BUFFER//2) % self.app.cell_width == 0:\r\n if self.direction == vec(1, 0) or self.direction == vec(-1, 0) or self.direction == vec(0, 0):\r\n return True\r\n # for the x-direction\r\n\r\n if int(self.pix_pos.y+TOP_BOTTOM_BUFFER//2) % self.app.cell_height == 0:\r\n if self.direction == vec(0, 1) or self.direction == vec(0, -1) or self.direction == vec(0, 0):\r\n return True\r\n # for the y-direction\r\n\r\n # checks to see if the player is still within the bounds\r", "def move_atoms(self):\n return self.abivars.ionmov != 0", "def ev_windowmoved(self, event: WindowMoved) -> None:", "def move_of_king_and_rook(self, from_row, from_col, to_row, to_col): \n #provjere da li su kraljevi ili topovi inicirali pomijeranje\n if(from_row == 7 and from_col == 0):\n self.wrl_moved = True\n elif(from_row == 7 and from_col == 7):\n self.wrr_moved = True\n elif(from_row == 7 and from_col == 4):\n self.wk_moved = True\n elif(from_row == 0 and from_col == 0):\n self.brl_moved = True\n elif(from_row == 0 and from_col == 7):\n self.brr_moved = True\n elif(from_row == 0 and from_col == 4):\n self.bk_moved = True\n \n #provjera da li je neko pojeo topove\n if(to_row == 7 and to_col == 0):\n self.wrl_moved = True\n elif(to_row == 7 and to_col == 7):\n self.wrr_moved = True\n elif(to_row == 0 and to_col == 0):\n self.brl_moved = True\n elif(to_row == 0 and to_col == 7):\n self.brr_moved = True", "def OnMoveEvent(self, event):\r\n\r\n win_rect = self.GetRect()\r\n\r\n if win_rect == self._last_rect:\r\n return\r\n\r\n # skip the first move event\r\n if self._last_rect.IsEmpty(): \r\n self._last_rect = wx.Rect(*win_rect)\r\n return\r\n \r\n # skip if moving too fast to avoid massive redraws and\r\n # jumping hint windows\r\n if abs(win_rect.x - self._last_rect.x) > 3 or abs(win_rect.y - self._last_rect.y) > 3:\r\n self._last3_rect = wx.Rect(*self._last2_rect)\r\n self._last2_rect = wx.Rect(*self._last_rect)\r\n self._last_rect = wx.Rect(*win_rect)\r\n return\r\n\r\n # prevent frame redocking during resize\r\n if self._last_rect.GetSize() != win_rect.GetSize():\r\n self._last3_rect = wx.Rect(*self._last2_rect)\r\n self._last2_rect = wx.Rect(*self._last_rect)\r\n self._last_rect = wx.Rect(*win_rect)\r\n return\r\n\r\n self._last3_rect = wx.Rect(*self._last2_rect)\r\n self._last2_rect = wx.Rect(*self._last_rect)\r\n self._last_rect = wx.Rect(*win_rect)\r\n\r\n if _VERSION_STRING < \"2.9\":\r\n leftDown = wx.GetMouseState().LeftDown()\r\n else:\r\n leftDown = wx.GetMouseState().LeftIsDown()\r\n\r\n if not leftDown:\r\n return\r\n\r\n if not self._moving: \r\n self.OnMoveStart(event)\r\n self._moving = True\r\n\r\n if self._last3_rect.IsEmpty():\r\n return\r\n\r\n self.OnMoving(event)", "def check_win(self, board, move):\n for i, j, k in self.winning_cases:\n if board[i] == move and board[j] == move and board[k] == move:\n return True\n return False", "def Active(self):\n return self.NMove > 0", "def no_more_move(self):\n if (self.p_no_move + self.c_no_move == 2):\n return True\n return False", "def isArmWithinWindow(armPos, window):\n for pos in armPos:\n if(pos[0][0]<0 or pos[0][0]>window[0] or pos[1][0]<0 or pos[1][0]>window[0]):\n return False\n if(pos[0][1]<0 or pos[0][1]>window[1] or pos[1][1]<0 or pos[1][1]>window[1]):\n return False\n\n\n return True", "def findPlacesToMove():\n movesDestinations = [];\n \n curY = curBlank[0];\n curX = curBlank[1];\n\n if(curY-1 >= 1): #UP\n movesDestinations.append((curY-1, curX));\n if(curY+1 <= n): #DOWN\n movesDestinations.append((curY+1, curX));\n if(curX-1 >= 1): #LEFT\n movesDestinations.append((curY, curX-1));\n if(curX+1 <= n): #RIGHT\n movesDestinations.append((curY, curX+1));\n \n return movesDestinations;", "def legal_move_on(draw, board):\n start, _ = draw(strategies.sampled_from(sorted(board.pieces)))\n end = draw(strategies.sampled_from(sorted(board.movable_from(start))))\n return start, end", "def test_move():\n human = Human()\n coordinates = [2, 1]\n dimensions = [3, 4]\n\n new_coordinates = human.move(coordinates, dimensions)\n\n possible_new_coordinates = [[2, 0], [3, 0], [3, 1], [3, 2], [2, 2], [1, 2], [1, 1], [1, 0]]\n\n assert new_coordinates in possible_new_coordinates", "def test_sliding_window(self):\n frame_length = 512\n step = 100\n x_trainlist = [np.zeros((25187,9)) for b in range(78)]\n y_trainlist = [np.zeros((12,9)) for b in range(78)]\n x_train, y_train = tutorial_pamap2.sliding_window(frame_length, step, x_trainlist, y_trainlist)\n test = len(x_train) == 19266\n assert test", "def test_move_over_past(self):\n b1 = board.Board(self.small_plain)\n start = np.array((3, 3), dtype='int')\n k1 = knight.Knight(b1,start)\n # set move choice 0\n move_choice = 0\n # change the board layout to reflect the move\n k1.execute_move(move_choice)\n self.assertTrue((k1.position == np.array((5, 4), dtype='int')).all())\n # try to go back in strict mode (fail)\n move_choice = 3\n k1.execute_move(move_choice, strict=True)\n # confirm lack of movement\n self.assertTrue((k1.position == np.array((5, 4), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . . . . . .\n . . . . . . . .\n . . . S . . . .\n . . . x . . . .\n . . . x K . . .\n . . . . . . . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)\n # try to go back without strict mode\n move_choice = 3\n k1.execute_move(move_choice, strict=False)\n # confirm lack of movement\n self.assertTrue((k1.position == np.array((4, 2), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . . . . . .\n . . . . . . . .\n . . . S . . . .\n . . K x . . . .\n . . x x S . . .\n . . . . . . . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)", "def check_movement(self):\n is_clear = True # default return value if no obstacles\n # !!! IR_SENSORS DISABLED\n if self.move_state == MOV_FORWARD:\n if self.l.look_for_obstacle(OBST_FRONT) == True:\n is_clear = False\n return is_clear", "def sliding(self):\n for i in range(self.tiles_len):\n x, y = self.tilepos[i] # current pos\n X, Y = self.tilePOS[self.tiles[i]] # target pos\n if x != X or y != Y:\n return True", "def is_moving(self):\n return self._proxy.get(\"is_moving\", \"filterwheel\")", "def motion(self):\n priority = {\"north\": [-1, 0], \"south\": [1, 0],\n \"east\": [0, 1], \"west\": [0, -1]}\n\n priority_list = [\"north\", \"south\", \"east\", \"west\"]\n\n critical_point = False\n while critical_point is False:\n row = self.curr_cell.row\n column = self.curr_cell.col\n\n if self.allow_to_move(priority_list[0],\n row + priority[priority_list[0]][0],\n column + priority[priority_list[0]][1]):\n\n self.move(priority_list[0])\n\n elif self.allow_to_move(priority_list[1],\n row + priority[priority_list[1]][0],\n column + priority[priority_list[1]][1]):\n\n self.move(priority_list[1])\n\n elif self.allow_to_move(priority_list[2],\n row + priority[priority_list[2]][0],\n column + priority[priority_list[2]][1]):\n\n self.move(priority_list[2])\n\n elif self.allow_to_move(priority_list[3],\n row + priority[priority_list[3]][0],\n column + priority[priority_list[3]][1]):\n\n self.move(priority_list[3])\n\n else:\n # Robot isolated\n critical_point = True\n\n return self.curr_cell, self.path", "def move(self) -> bool:\n pass", "def process_IN_MOVE_SELF(self, event):", "def CheckMove(self,move):\n\t\tif(move=='w'):\n\t\t\tif(self.x==0):\n\t\t\t\treturn 0\n\t\t\treturn 1\n\t\telif(move=='s'):\n\t\t\tif(self.x==15):\n\t\t\t\treturn 0\n\t\t\treturn 1\n\t\telif(move=='d'):\n\t\t\tif(self.y==35):\n\t\t\t\treturn 0\n\t\t\treturn 1\n\t\telif(move=='a'):\n\t\t\tif(self.y==0):\n\t\t\t\treturn 0\n\t\t\treturn 1", "def movement(self):", "def decide_next_move(self):\n pass", "def filter_for_activity(self, window, ssd_thres, minimum_wb):\n data_wb = self.data.copy()\n applyOffsetRemove(data_wb)\n applyFilter(data_wb)\n window = window \n ssd_threshold = ssd_thres\n minimum = minimum_wb\n ranges_ww = runWalkingBoutDetection(\n data_wb,\n ssd_threshold,\n window,\n minimum,\n )\n try:\n segment = ranges_ww[0]\n lower = self.data.loc[segment[0],0]\n upper = self.data.loc[segment[1],0]\n self.data = self.data[(self.data[0]>lower) & (self.data[0]<=upper)]\n except:\n print(\"No movement detected\")", "def op_move_preconditions(self):\n\n if(self.next_move != self.FREE):\n return False\n\n return True", "def _check_for_ko(self):\n try:\n if self._array == self._history[-2][0]:\n self._pop_history()\n raise BoardError('Cannot make a move that is redundant!')\n except IndexError:\n # Insufficient history...let this one slide\n pass", "def check_for_win(self,board, player_id, action):\n\n row = 0\n\n # check which row was inserted last:\n for i in range(ROWS):\n if board[ROWS - 1 - i, action] == EMPTY_VAL:\n row = ROWS - i\n break\n\n # check horizontal:\n vec = board[row, :] == player_id\n if np.any(np.convolve(WIN_MASK, vec, mode=\"valid\") == 4):\n return True\n\n # check vertical:\n vec = board[:, action] == player_id\n if np.any(np.convolve(WIN_MASK, vec, mode=\"valid\") == 4):\n return True\n\n # check diagonals:\n vec = np.diagonal(board, action - row) == player_id\n if np.any(np.convolve(WIN_MASK, vec, mode=\"valid\") == 4):\n return True\n vec = np.diagonal(np.fliplr(board), ACTION_DIM - action - 1 - row) == player_id\n if np.any(np.convolve(WIN_MASK, vec, mode=\"valid\") == 4):\n return True\n\n return False", "def test_get_move(self):\n\n class DynamicTimer():\n \"\"\"Dynamic Timer allows the time limit to be changed after the\n timer is initialized so that the search timeout can be triggered\n before the timer actually expires. This allows the timer to expire\n when an event occurs, regardless of the clock time required until\n the event happens.\n \"\"\"\n def __init__(self, time_limit):\n self.time_limit = time_limit\n self.start_time = curr_time_millis()\n\n def time_left(self):\n return self.time_limit - (curr_time_millis() - self.start_time)\n\n w, h = 11, 11 # board size\n adversary_location = (0, 0)\n method = \"minimax\"\n\n # The agent under test starts at the positions indicated below, and\n # performs an iterative deepening minimax search (minimax is easier to\n # test because it always visits all nodes in the game tree at every\n # level).\n origins = [(2, 3), (6, 6), (7, 4), (4, 2), (0, 5), (10, 10)]\n exact_counts = [(8, 8), (32, 10), (160, 39), (603, 35), (1861, 54), (3912, 62)]\n\n for idx in range(len(origins)):\n\n # set the initial timer high enough that the search will not\n # timeout before triggering the dynamic timer to halt by visiting\n # the expected number of nodes\n time_limit = 1e4\n timer = DynamicTimer(time_limit)\n eval_fn = makeEvalStop(exact_counts[idx][0], timer, time_limit)\n agentUT, board = self.initAUT(-1, eval_fn, True, method,\n origins[idx], adversary_location,\n w, h)\n legal_moves = board.get_legal_moves()\n chosen_move = agentUT.get_move(board, legal_moves, timer.time_left)\n\n diff_total = abs(board.counts[0] - exact_counts[idx][0])\n diff_unique = abs(board.counts[1] - exact_counts[idx][1])\n\n self.assertTrue(diff_total <= 1 and diff_unique == 0, ID_FAIL)\n\n self.assertTrue(chosen_move in legal_moves, INVALID_MOVE.format(\n legal_moves, chosen_move))", "def getMoveStatus(self):\n return self.hasMoved", "def ev_windowmoved(self, event: tcod.event.WindowMoved) -> T | None:", "def is_win(my_board):\n return np.count_nonzero(my_board == CLOSED) == NUM_MINES", "def decide_move(self, action):\n x1, y1 = action['xy1']\n x2, y2 = action['xy2']\n self.__state.push(action)", "def observation_space():", "def mouse_move(self, obj, event):\n last_pos = self.iren.GetLastEventPosition()\n next_pos = self.iren.GetEventPosition()\n last_disp_coords = np.asarray([last_pos[0], last_pos[1], 0])\n next_disp_coords = np.asarray([next_pos[0], next_pos[1], 0])\n last_world_coords = self.display_to_world(last_disp_coords)\n next_world_coords = self.display_to_world(next_disp_coords)\n world_direction = (last_world_coords - next_world_coords)[0]\n\n if world_direction > 0:\n direction = 'forwards'\n elif world_direction < 0:\n direction = 'backwards'\n else:\n direction = 'none'\n\n if self.cone_dir == 'start':\n if direction == 'backwards':\n self.start_base_x += .5\n if self.start_base_x.is_integer():\n ind = str(int(self.start_base_x))\n isvalid = self.gaps.set_dragged_start(ind)\n if isvalid:\n self.ren_win.Render()\n else:\n self.start_base_x -= .5\n return\n\n elif direction == 'forwards':\n if self.start_base_x > 0:\n self.start_base_x -= .5\n if self.start_base_x.is_integer():\n ind = str(int(self.start_base_x))\n self.gaps.set_dragged_start(ind)\n self.ren_win.Render()\n\n if self.cone_dir == 'end':\n if direction == 'backwards':\n if self.end_base_x > 0:\n self.end_base_x -= .5\n if self.end_base_x.is_integer():\n ind = str(int(self.end_base_x))\n self.gaps.set_dragged_end(ind)\n self.ren_win.Render()\n\n elif direction == 'forwards':\n self.end_base_x += .5\n if self.end_base_x.is_integer():\n ind = str(int(self.end_base_x))\n isvalid = self.gaps.set_dragged_end(ind)\n if isvalid:\n self.ren_win.Render()\n else:\n self.end_base_x -= .5\n return", "def test_sliding_window(data, x):\n\n win_dim = data.draw(st.integers(1, x.ndim), label=\"win_dim\")\n win_shape = data.draw(\n st.tuples(*(st.integers(1, s) for s in x.shape[-win_dim:])), label=\"win_shape\"\n )\n step = data.draw(\n st.tuples(*(st.integers(1, s) for s in x.shape[-win_dim:])), label=\"step\"\n )\n\n max_dilation = np.array(x.shape[-win_dim:]) // win_shape\n dilation = data.draw(\n st.one_of(st.none(), st.tuples(*(st.integers(1, s) for s in max_dilation))),\n label=\"dilation\",\n )\n y = sliding_window_view(x, window_shape=win_shape, step=step, dilation=dilation)\n\n if dilation is None:\n dilation = np.ones((len(win_shape),), dtype=int)\n\n for ind in np.ndindex(*y.shape[:win_dim]):\n slices = tuple(\n slice(i * s, i * s + w * d, d)\n for i, w, s, d in zip(ind, win_shape, step, dilation)\n )\n assert_allclose(actual=y[tuple([*ind])], desired=x[(..., *slices)])", "def moving(self):\n if self.done is not None:\n dval = self.done.get(use_monitor=False)\n return dval != self.done_value\n else:\n return self._moving", "def positionsMatch(action, lastObservation):\n acceptedError = 0.01\n for i in range(action.size -1): #lastObservation loses last pose\n if abs(action[i] - lastObservation[i]) > acceptedError:\n return False\n return True", "def is_win(state: StonehengeState) -> bool:\n moves = []\n for move in state.get_possible_moves():\n new_state = deepcopy(state.make_move(move))\n moves.append(new_state.finished())\n return any(moves)", "def check_win(self):\n for pos in self.win_set:\n # s would be all 1 if all positions of a winning move is fulfilled\n # otherwise 1s and 0s\n s = set([self.grid[p] for p in pos])\n if len(s) == 1 and (0 not in s):\n return True\n return False", "def choose_move(self):\n return 0", "def test_move_onto_past(self):\n b1 = board.Board(self.small_plain)\n start = np.array((3, 3), dtype='int')\n k1 = knight.Knight(b1,start)\n # set move choice 0\n move_choice = 0\n # change the board layout to reflect the move\n k1.execute_move(move_choice)\n self.assertTrue((k1.position == np.array((5, 4), dtype='int')).all())\n # try to go back in strict mode (fail)\n move_choice = 4\n k1.execute_move(move_choice, strict=True)\n # confirm lack of movement\n self.assertTrue((k1.position == np.array((5, 4), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . . . . . .\n . . . . . . . .\n . . . S . . . .\n . . . x . . . .\n . . . x K . . .\n . . . . . . . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)\n # try to go back without strict mode\n move_choice = 4\n k1.execute_move(move_choice)\n # confirm lack of movement\n self.assertTrue((k1.position == np.array((3, 3), dtype='int')).all())\n # confirm state of board\n with capture_output() as (out, _):\n b1.display()\n my_out = out.getvalue().strip()\n out.close()\n out_list = [ each.strip() for each in\n \"\"\". . . . . . . .\n . . . . . . . .\n . . . . . . . .\n . . . K x . . .\n . . . x x . . .\n . . . x S . . .\n . . . . . . . .\n . . . . . . . .\"\"\".split('\\n')]\n expected_out = '\\n'.join(out_list)\n self.assertEqual(my_out, expected_out)", "def __check_move(self):\n move = self.communications.get_move()\n if move is not None and move in self.bot.movements:\n self.communications.set_status(\"Moving Bot {}\".format(move))\n self.make_move(move)\n\n self.communications.send_proximity_data(self.proximity_sensors.read_sensors())", "def test_move():\n\n board = Board()\n\n # invalid moves: out of board boundaries\n assert board.move(board.P1, 100) is False\n assert board.move(board.P2, -2) is False\n\n print(board)\n\n # valid moves\n assert board.move(board.P1, 0) is True\n assert board.move(board.P2, 3) is True\n\n assert board.move(board.P1, 4) is True\n assert board.move(board.P2, 3) is True\n\n assert board.move(board.P1, 3) is True\n assert board.move(board.P2, 4) is True\n\n assert board.move(board.P1, 2) is True\n assert board.move(board.P2, 4) is True\n\n assert board.move(board.P1, 2) is True\n assert board.move(board.P2, 2) is True\n\n assert board.move(board.P1, 1) is True\n assert board.move(board.P2, 4) is True\n\n \"\"\"\n BEFORE:\n \n 0 1 2 3 4 5 6 \n +-+-+-+-+-+-+-+\n 0|-|-|-|-|-|-|-|0\n +-+-+-+-+-+-+-+\n 1|-|-|-|-|-|-|-|1\n +-+-+-+-+-+-+-+\n 2|-|-|-|-|-|-|-|2\n +-+-+-+-+-+-+-+\n 3|-|-|-|-|-|-|-|3\n +-+-+-+-+-+-+-+\n 4|-|-|-|-|-|-|-|4\n +-+-+-+-+-+-+-+\n 5|-|-|-|-|-|-|-|5\n +-+-+-+-+-+-+-+\n 0 1 2 3 4 5 6 \n \n AFTER:\n \n 0 1 2 3 4 5 6 \n +-+-+-+-+-+-+-+\n 0|-|-|-|-|-|-|-|0\n +-+-+-+-+-+-+-+\n 1|-|-|-|-|-|-|-|1\n +-+-+-+-+-+-+-+\n 2|-|-|-|-|O|-|-|2\n +-+-+-+-+-+-+-+\n 3|-|-|O|X|O|-|-|3\n +-+-+-+-+-+-+-+\n 4|-|-|X|O|O|-|-|4\n +-+-+-+-+-+-+-+\n 5|X|X|X|O|X|-|-|5\n +-+-+-+-+-+-+-+\n 0 1 2 3 4 5 6 \n \n \"\"\"", "def _valid_move_exists(self):\n lst = []\n for i_row in range(self._num_rows):\n for i_col in range(self._num_cols):\n if self._valid_placement(i_row, i_col)[0]:\n lst.append((i_row, i_col))\n\n return lst != [] #If lst != [], then the list has elements -> valid move(s) exist", "def test_window_moves_when_new_element_is_added(self):\n sliding_window = SlidingWindow(SLIDING_WINDOW_SIZE)\n network_delays = [100, 101, 102, 103]\n for delay in network_delays:\n sliding_window.add_delay(delay)\n expected_window = [101, 102, 103]\n self.assertListEqual(sliding_window.delays, expected_window)", "def move(self):\n # neighbor offsets\n offset = [(-1, 1),(0, 1),(1, 1),(-1, 0),(1, 0),(-1, -1),(0, -1),(1, -1)]\n for i in range(len(offset)):\n x = self.x + offset[i][0] # neighboring coordinates\n y = self.y + offset[i][1]\n if self.island.animal(x, y) == 0: # neighboring spot is open\n self.island.remove(self) # remove from current spot\n self.x = x # new coordinates\n self.y = y\n self.island.register(self) # register new coordinates\n break # finished with move", "def move(self):\n # neighbor offsets\n offset = [(-1, 1),(0, 1),(1, 1),(-1, 0),(1, 0),(-1, -1),(0, -1),(1, -1)]\n for i in range(len(offset)):\n x = self.x + offset[i][0] # neighboring coordinates\n y = self.y + offset[i][1]\n if self.island.animal(x, y) == 0: # neighboring spot is open\n self.island.remove(self) # remove from current spot\n self.x = x # new coordinates\n self.y = y\n self.island.register(self) # register new coordinates\n break # finished with move", "def check_valid_move(grid: np.ndarray, current_position: tuple, move: tuple) -> bool:\n # getting coordinates for moved position\n moved_position = tuple(np.add(current_position, move))\n\n def compare_coordinates(a: tuple, b: tuple) -> bool:\n \"\"\"\n Helper function to compare coordinates\n Checks if a is smaller than b\n \"\"\"\n return all(np.array(a) < np.array(b))\n\n # checking if coordinates are inside the array (between (0,0) and (N,N))\n if compare_coordinates((0, 0), moved_position) and compare_coordinates(moved_position, grid.shape):\n # checking if the coordinates are not on the obstacle\n if grid[moved_position] == 'x':\n return False\n else:\n return True\n else:\n return False", "def left(self, obs, object):\n for i in range(int((self.no_rays-1)/2)):\n if(obs[self.listOfObjects.index(object)][i] > 0):\n # print(\"found \" + str(object) + \" left\")\n return True\n return False", "def wander(self):\n \n has_new_pos = False\n while not has_new_pos:\n move = random.choice(self.moves)\n new_pos = add_lists(move, self.position)\n has_new_pos = check_bounds(new_pos, self.grid_size)\n return new_pos", "def horde_step(self, observation):", "def is_valid(self, move):\r\n return move > 10 and move < 89", "def status(self):\r\n return not self.sendQuery(\"isMoving\",\"isMoving\")", "def check_winner(self):\n if self.history:\n last_move = self.history[-1]\n last_player = self.get_last_player()\n\n connected_token = last_player*self.n_in_row\n # check for row\n if connected_token in [sum(self.state[last_move[0]][j:j+self.n_in_row]) for j in range(0, self.width-self.n_in_row+1, 1)]:\n self.winner = [True, last_player]\n return self.winner\n\n # check for column\n if connected_token in [sum(self.state.T[last_move[1]][i:i+self.n_in_row]) for i in range(0, self.height-self.n_in_row+1, 1)]:\n self.winner = [True, last_player]\n return self.winner\n\n # check for diagonal with slope 1\n diagonal = np.diag(self.state, last_move[1]-last_move[0])\n if connected_token in [sum(diagonal[i:i+self.n_in_row]) for i in range(0, len(diagonal)-self.n_in_row+1, 1)]:\n self.winner = [True, last_player]\n return self.winner\n\n # check for diagonal with slope -1\n diagonal = np.diag(self.state[:,::-1], self.width-1-last_move[1]-last_move[0])\n if connected_token in [sum(diagonal[i:i+self.n_in_row]) for i in range(0, len(diagonal)-self.n_in_row+1, 1)]:\n self.winner = [True, last_player]\n return self.winner\n\n # check for draw game\n if len(np.argwhere(self.state==0)) == 0:\n self.winner = [True, 0]\n return self.winner\n return self.winner", "def calc_position_change(self, game_state: dict):\n current_position = game_state['self'][3]\n # print(f'Current Position: {current_position}')\n while len(self.positions) > 3:\n self.positions.pop(0)\n\n if current_position in self.positions:\n return True\n else:\n return False", "def check_win():\r\n for mark in markers:\r\n if loc[0] == mark and loc[1] == mark and loc[2] == mark:\r\n return True\r\n if loc[0] == mark and loc[3] == mark and loc[6] == mark:\r\n return True\r\n if loc[0] == mark and loc[4] == mark and loc[8] == mark:\r\n return True\r\n if loc[1] == mark and loc[4] == mark and loc[7] == mark:\r\n return True\r\n if loc[2] == mark and loc[4] == mark and loc[6] == mark:\r\n return True\r\n if loc[2] == mark and loc[5] == mark and loc[8] == mark:\r\n return True\r\n if loc[3] == mark and loc[4] == mark and loc[5] == mark:\r\n return True\r\n if loc[6] == mark and loc[7] == mark and loc[8] == mark:\r\n return True\r\n else:\r\n return False", "def next_move(ttt):\r\n # get board in 2D array form\r\n b = ttt.get_board()\r\n \r\n # if there's a winning move, take it\r\n (cfw, win_move) = check_for_win_lose(b)\r\n if cfw is not None:\r\n if win_move:\r\n print 'COMPUTER WINS!'\r\n return cfw, win_move\r\n # otherwise, pres on with the next best move\r\n\r\n # get \"points\" on board. this tells us not only the move\r\n # but also who went first\r\n board_count = sum(sum(b,[]))\r\n \r\n # IF COMPUTER HAS FIRST TURN\r\n # if 1st move\r\n if board_count == 0:\r\n return (2,2), False # take the center\r\n # this is not best strategy for winning, but\r\n # it the human messes up, the computer can win.\r\n # taking a corner first makes it a little easier\r\n # for the computer to win becase the human only\r\n # has one correct move to make: to take the center\r\n \r\n # if 3rd move, and not a winning one\r\n if board_count == 3:\r\n if b[0][1]==2 or b[1][0]==2 or b[0][0]==2:\r\n return (3,3), False\r\n elif b[0][2]==2:\r\n return (3,1), False\r\n elif b[2][0]==2:\r\n return (1,3), False\r\n else:#elif b[1][2]==2 or b[2][1]==2 or b[2][2]==2:\r\n return (1,1), False\r\n\r\n # if 5th move, and not a winning or losing one\r\n if board_count == 6:\r\n b5 = numpy.array([[0,2,1],[0,1,0],[2,0,0]])\r\n if (b == b5).all():\r\n return (3,3), False\r\n elif (b == numpy.rot90(b5,1)).all():\r\n return (3,1), False\r\n elif (b == numpy.rot90(b5,2)).all():\r\n return (1,1), False\r\n elif (b == numpy.rot90(b5,3)).all():\r\n return (1,3), False\r\n\r\n b5 = numpy.array([[0,0,1],[0,1,2],[2,0,0]])\r\n if (b == b5).all():\r\n return (1,1), False\r\n elif (b == numpy.rot90(b5,1)).all():\r\n return (1,3), False\r\n elif (b == numpy.rot90(b5,2)).all():\r\n return (3,3), False\r\n elif (b == numpy.rot90(b5,3)).all():\r\n return (3,1), False\r\n\r\n # at this point, all possible boards should have been covered\r\n\r\n # if 7th move, and a winning or losing one\r\n if board_count == 9:\r\n # find the row or col with 2 open slots and mark it\r\n for ri in range(3):\r\n r = b[ri]\r\n if sum([1 if i==0 else 0 for i in r]) == 2:\r\n if r[0] == 0:\r\n return (ri+1,1), False\r\n else:\r\n return (ri+1,2), False\r\n for ci in range(3):\r\n c = get_col(b, ci)\r\n if sum([1 if i==0 else 0 for i in c]) == 2:\r\n if c[0] == 0:\r\n return (1,ci+1), False\r\n else:\r\n return (2,ci+1), False\r\n\r\n \r\n # IF HUMAN HAS FIRST TURN\r\n # if 2nd move\r\n if board_count == 2:\r\n if b[1][1] == 0:\r\n # if the center is open, computer has\r\n # to take it in order to not lose\r\n return (2,2), False\r\n else:\r\n # otherwise take a corner\r\n return (1,1), False\r\n\r\n # if 4th move\r\n if board_count == 5:\r\n # if we took a corner on move 2 and they\r\n # are using computer's offensive strategy\r\n # when it is first player\r\n b4 = [[1,0,0],[0,2,0],[0,0,2]]\r\n if b==b4:\r\n return (3,1), False\r\n # if we took center on move 2\r\n else:\r\n b4 = numpy.array([[2,0,0],[0,1,0],[0,0,2]])\r\n if (b == b4).all() or (b == numpy.rot90(b4,1)).all():\r\n return (1,2), False\r\n\r\n # overall ELSE -- just find a square\r\n for ri in range(3):\r\n for ci in range(3):\r\n if b[ri][ci] == 0:\r\n return (ri+1,ci+1), False", "def _boxes_in_window(self, od_bbox):\n\t\t(h_llim, h_ulim), (w_llim, w_ulim) = self._window(get_lims=True)\n\t\txmin = od_bbox[:, 0]\n\t\t# ymin = od_bbox[:, 1]\n\t\txmax = od_bbox[:, 2]\n\t\t# ymax = od_bbox[:, 3]\n\n\t\tin_window = (xmax > w_llim & xmin < w_ulim)\n\t\tprint(in_window.shape) # (n)\n\t\treturn in_window", "def moving_window_pos(data, tvec, wn, deg=2):\n\n xwn = wn\n hxwn = xwn / 2\n posx, posz = data.T\n npts = len(posx)\n spos = np.zeros((npts, 2))\n svel = np.zeros((npts, 2))\n sacc = np.zeros((npts, 2))\n\n for i in range(npts):\n\n ind = np.where((posx >= posx[i] - hxwn) & (posx <= posx[i] + hxwn))[0]\n t = tvec[ind]\n x = posx[ind]\n z = posz[ind]\n\n pfpx = np.polyfit(t, x, deg)\n pfpz = np.polyfit(t, z, deg)\n pfvx = np.polyder(pfpx, m=1)\n pfvz = np.polyder(pfpz, m=1)\n pfax = np.polyder(pfpx, m=2)\n pfaz = np.polyder(pfpz, m=2)\n\n tval = tvec[i]\n spos[i] = np.polyval(pfpx, tval), np.polyval(pfpz, tval)\n svel[i] = np.polyval(pfvx, tval), np.polyval(pfvz, tval)\n sacc[i] = np.polyval(pfax, tval), np.polyval(pfaz, tval)\n\n return spos, svel, sacc", "def check_for_win_lose(b):\r\n win_move = None\r\n block_win = None\r\n # check for wins based on row\r\n for ri in range(3):\r\n row = b[ri]\r\n if single_move(row):\r\n if row==[1,1,0]:\r\n win_move = (ri+1,3)\r\n elif row==[2,2,0]:\r\n block_win = (ri+1,3)\r\n elif row==[1,0,1]:\r\n win_move = (ri+1,2)\r\n elif row==[2,0,2]:\r\n block_win = (ri+1,2)\r\n elif row==[0,1,1]:\r\n win_move = (ri+1,1)\r\n elif row==[0,2,2]:\r\n block_win = (ri+1,1)\r\n else:\r\n print '144 ERROR!'\r\n print single_move(row)\r\n print row\r\n print ' '\r\n\r\n # check for win based on column\r\n for ci in range(3):\r\n col = get_col(b,ci)\r\n if single_move(col):\r\n if col==[1,1,0]:\r\n win_move = (3,ci+1)\r\n elif col==[2,2,0]:\r\n block_win = (3,ci+1)\r\n elif col==[1,0,1]:\r\n win_move = (2,ci+1)\r\n elif col==[2,0,2]:\r\n block_win = (2,ci+1)\r\n elif col==[0,1,1]:\r\n win_move = (1,ci+1)\r\n elif col==[0,2,2]:\r\n block_win = (1,ci+1)\r\n else:\r\n print '166 ERROR!'\r\n print single_move(col)\r\n print col\r\n print ' '\r\n\r\n # check for win on backward diagonal\r\n diag = get_bw_diag(b)\r\n if single_move(diag):\r\n if diag==[1,1,0]:\r\n win_move = (3,3)\r\n elif diag==[2,2,0]:\r\n block_win (3,3)\r\n elif diag == [1,0,1]:\r\n win_move = (2,2)\r\n elif diag==[2,0,2]:\r\n block_win = (2,2)\r\n elif diag == [0,1,1]:\r\n win_move = (1,1)\r\n elif diag==[0,2,2]:\r\n block_win = (1,1)\r\n \r\n # check for win on forward diagonal\r\n diag = get_fwd_diag(b)\r\n if single_move(diag):\r\n if diag == [1,1,0]:\r\n win_move = (3,1)\r\n elif diag==[2,2,0]:\r\n block_win = (3,1)\r\n elif diag == [1,0,1]:\r\n win_move = (2,2)\r\n elif diag==[2,0,2]:\r\n block_win = (2,2)\r\n elif diag == [0,1,1]:\r\n win_move = (1,3)\r\n elif diag==[0,2,2]:\r\n block_win = (1,3)\r\n\r\n if win_move is not None:\r\n return (win_move, True)\r\n elif block_win is not None:\r\n return (block_win, False)\r\n else:\r\n return (None, False)", "def legal_move(marker, x, y, direction):\n # first if statement determines the directions\n # second if statement checks if the \"potential move\" is within the index\n if direction == \"N\":\n if 0 <= y-2 < len(marker):\n return marker[y-2][x] == marker[y-1][x] == '*'\n if direction == \"S\":\n if 0 <= y+2 < len(marker):\n return marker[y+2][x] == marker[y+1][x] == '*'\n if direction == \"W\":\n if 0 <= x-2 < len(marker[0]):\n return marker[y][x-2] == marker[y][x-1] == '*'\n if direction == \"E\":\n if 0 <= x+2 < len(marker[0]):\n return marker[y][x+2] == marker[y][x+1] == '*'\n return False", "def apply_move(self, move, state):\n x, y , heading, grid_data = state\n map_data = [row[:] for row in grid_data]\n if move == self.MOVE_FORWARD:\n # get coordinates for next cell\n if heading == self.UP:\n next_y = y - 1\n next_x = x\n elif heading == self.DOWN:\n next_y = y + 1\n next_x = x\n elif heading == self.LEFT:\n next_y = y\n next_x = x - 1\n else:\n next_y = y\n next_x = x + 1\n\n # handle special tile types\n if map_data[next_y][next_x] == self.ICE_SYMBOL:\n # handle ice tile - slide until first non-ice tile or blocked\n if heading == self.UP:\n for i in range(next_y, -1, -1):\n if map_data[i][next_x] != self.ICE_SYMBOL:\n if map_data[i][next_x] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(i, next_x, map_data):\n # if blocked, stop on last ice cell\n next_y = i + 1\n break\n else:\n next_y = i\n break\n elif heading == self.DOWN:\n for i in range(next_y, self.y_size):\n if map_data[i][next_x] != self.ICE_SYMBOL:\n if map_data[i][next_x] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(i, next_x, map_data):\n # if blocked, stop on last ice cell\n next_y = i - 1\n break\n else:\n next_y = i\n break\n elif heading == self.LEFT:\n for i in range(next_x, -1, -1):\n if map_data[next_y][i] != self.ICE_SYMBOL:\n if map_data[next_y][i] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(next_y, i, map_data):\n # if blocked, stop on last ice cell\n next_x = i + 1\n break\n else:\n next_x = i\n break\n else:\n for i in range(next_x, self.x_size):\n if map_data[next_y][i] != self.ICE_SYMBOL:\n if map_data[next_y][i] == self.WATER_SYMBOL:\n # slide into water - game over\n return self.GAME_OVER\n elif self.cell_is_blocked(next_y, i, map_data):\n # if blocked, stop on last ice cell\n next_x = i - 1\n break\n else:\n next_x = i\n break\n if map_data[next_y][next_x] == self.TELEPORT_SYMBOL:\n # handle teleport - find the other teleporter\n tpy, tpx = (None, None)\n for i in range(self.y_size):\n for j in range(self.x_size):\n if map_data[i][j] == self.TELEPORT_SYMBOL and (i != next_y or j != next_x):\n tpy, tpx = (i, j)\n break\n if tpy is not None:\n break\n if tpy is None:\n raise Exception(\"LaserTank Map Error: Unmatched teleport symbol\")\n next_y, next_x = (tpy, tpx)\n else:\n # if not ice or teleport, perform collision check\n if self.cell_is_blocked(next_y, next_x, map_data):\n return self.COLLISION\n\n # check for game over conditions\n if self.cell_is_game_over(next_y, next_x, map_data):\n return self.GAME_OVER\n\n # no collision and no game over - update player position\n y = next_y\n x = next_x\n return (x, y, heading, map_data)\n\n elif move == self.TURN_LEFT:\n # no collision or game over possible\n if heading == self.UP:\n heading = self.LEFT\n elif heading == self.DOWN:\n heading = self.RIGHT\n elif heading == self.LEFT:\n heading = self.DOWN\n else:\n heading = self.UP\n return (x, y, heading, map_data)\n\n elif move == self.TURN_RIGHT:\n # no collision or game over possible\n if heading == self.UP:\n heading = self.RIGHT\n elif heading == self.DOWN:\n heading = self.LEFT\n elif heading == self.LEFT:\n heading = self.UP\n else:\n heading = self.DOWN\n return (x, y, heading, map_data)\n\n elif move == self.SHOOT_LASER:\n # set laser direction\n if heading == self.UP:\n laserheading = self.UP\n dy, dx = (-1, 0)\n elif heading == self.DOWN:\n laserheading = self.DOWN\n dy, dx = (1, 0)\n elif heading == self.LEFT:\n laserheading = self.LEFT\n dy, dx = (0, -1)\n else:\n laserheading = self.RIGHT\n dy, dx = (0, 1)\n\n # loop until laser blocking object reached\n ly, lx = (y, x)\n while True:\n ly += dy\n lx += dx\n\n # handle boundary and immovable obstacles\n if ly < 0 or ly >= self.y_size or \\\n lx < 0 or lx >= self.x_size or \\\n map_data[ly][lx] == self.OBSTACLE_SYMBOL:\n # laser stopped without effect\n return self.COLLISION\n\n # handle movable objects\n elif self.cell_is_laser_movable(ly, lx, laserheading, map_data):\n # check if tile can be moved without collision\n if self.cell_is_blocked(ly + dy, lx + dx, map_data) or \\\n map_data[ly + dy][lx + dx] == self.ICE_SYMBOL or \\\n map_data[ly + dy][lx + dx] == self.TELEPORT_SYMBOL or \\\n map_data[ly + dy][lx + dx] == self.FLAG_SYMBOL or \\\n (ly + dy == y and lx + dx == x):\n # tile cannot be moved\n return self.COLLISION\n else:\n old_symbol = map_data[ly][lx]\n map_data[ly][lx] = self.LAND_SYMBOL\n if map_data[ly + dy][lx + dx] == self.WATER_SYMBOL:\n # if new bridge position is water, convert to land tile\n if old_symbol == self.BRIDGE_SYMBOL:\n map_data[ly + dy][lx + dx] = self.LAND_SYMBOL\n # otherwise, do not replace the old symbol\n else:\n # otherwise, move the tile forward\n map_data[ly + dy][lx + dx] = old_symbol\n break\n\n # handle bricks\n elif map_data[ly][lx] == self.BRICK_SYMBOL:\n # remove brick, replace with land\n map_data[ly][lx] = self.LAND_SYMBOL\n break\n\n # handle facing anti-tanks\n elif (map_data[ly][lx] == self.ANTI_TANK_UP_SYMBOL and laserheading == self.DOWN) or \\\n (map_data[ly][lx] == self.ANTI_TANK_DOWN_SYMBOL and laserheading == self.UP) or \\\n (map_data[ly][lx] == self.ANTI_TANK_LEFT_SYMBOL and laserheading == self.RIGHT) or \\\n (map_data[ly][lx] == self.ANTI_TANK_RIGHT_SYMBOL and laserheading == self.LEFT):\n # mark anti-tank as destroyed\n map_data[ly][lx] = self.ANTI_TANK_DESTROYED_SYMBOL\n break\n\n # handle player laser collision\n elif ly == y and lx == x:\n return self.GAME_OVER\n\n # handle facing mirrors\n elif (map_data[ly][lx] == self.MIRROR_UL_SYMBOL and laserheading == self.RIGHT) or \\\n (map_data[ly][lx] == self.MIRROR_UR_SYMBOL and laserheading == self.LEFT):\n # new direction is up\n dy, dx = (-1, 0)\n laserheading = self.UP\n elif (map_data[ly][lx] == self.MIRROR_DL_SYMBOL and laserheading == self.RIGHT) or \\\n (self.grid_data[ly][lx] == self.MIRROR_DR_SYMBOL and laserheading == self.LEFT):\n # new direction is down\n dy, dx = (1, 0)\n laserheading = self.DOWN\n elif (map_data[ly][lx] == self.MIRROR_UL_SYMBOL and laserheading == self.DOWN) or \\\n (map_data[ly][lx] == self.MIRROR_DL_SYMBOL and laserheading == self.UP):\n # new direction is left\n dy, dx = (0, -1)\n laserheading = self.LEFT\n elif (map_data[ly][lx] == self.MIRROR_UR_SYMBOL and laserheading == self.DOWN) or \\\n (map_data[ly][lx] == self.MIRROR_DR_SYMBOL and laserheading == self.UP):\n # new direction is right\n dy, dx = (0, 1)\n laserheading = self.RIGHT\n # do not terminate laser on facing mirror - keep looping\n\n # check for game over condition after effect of laser\n if self.cell_is_game_over(y, x, map_data):\n return self.GAME_OVER\n return (x, y, heading, map_data)\n return self.SUCCESS", "def movements(self):\n raise NotImplementedError", "def test_valid_move():\n\n board = Board()\n\n # a col outside the width of the board should be false\n assert board.valid_move(board.get_grid_size()[1] + 1) is False\n\n # only positive cols should be considered for a move\n assert board.valid_move(-2) is False\n\n # since board is empty all cols should have moves\n for i in range(board.get_grid_size()[1]):\n assert board.valid_move(i) is True\n\n # if a col is full no move can be made\n for i in range(board.get_grid_size()[1]):\n if i % 2 == 0:\n board.move(board.P1, 0)\n else:\n board.move(board.P2, 0)\n\n \"\"\"\n board now looks like this...\n \n 0 1 2 3 4 5 6 \n +-+-+-+-+-+-+-+\n 0|O|-|-|-|-|-|-|0\n +-+-+-+-+-+-+-+\n 1|X|-|-|-|-|-|-|1\n +-+-+-+-+-+-+-+\n 2|O|-|-|-|-|-|-|2\n +-+-+-+-+-+-+-+\n 3|X|-|-|-|-|-|-|3\n +-+-+-+-+-+-+-+\n 4|O|-|-|-|-|-|-|4\n +-+-+-+-+-+-+-+\n 5|X|-|-|-|-|-|-|5\n +-+-+-+-+-+-+-+\n 0 1 2 3 4 5 6 \n\n \"\"\"\n assert board.valid_move(0) is False", "def check4move(st, selected_unit, direction):\n return 1", "def ahead(self, obs, object):\n if(obs[self.listOfObjects.index(object)][int((self.no_rays-1)/2)] > 0):\n # print(\"found \" + str(object) + \" ahead\")\n return True\n return False", "def observation(self, obs):\n\n# import pdb;pdb.set_trace()\n return np.moveaxis(obs, 2, 0)", "def moving_window_pts(data, tvec, wn, deg=2, drop_deg=False):\n\n deg_orig = deg\n posx, posz = data.T\n npts = len(posx)\n spos = np.zeros((npts, 2))\n svel = np.zeros((npts, 2))\n sacc = np.zeros((npts, 2))\n\n for i in range(npts):\n start, stop, at_end = window_bounds(i, npts, wn)\n if at_end and drop_deg:\n deg = deg_orig - 1\n else:\n deg = deg_orig\n\n t = tvec[start:stop]\n x = posx[start:stop]\n z = posz[start:stop]\n\n pfpx = np.polyfit(t, x, deg)\n pfpz = np.polyfit(t, z, deg)\n pfvx = np.polyder(pfpx, m=1)\n pfvz = np.polyder(pfpz, m=1)\n pfax = np.polyder(pfpx, m=2)\n pfaz = np.polyder(pfpz, m=2)\n\n tval = tvec[i]\n spos[i] = np.polyval(pfpx, tval), np.polyval(pfpz, tval)\n svel[i] = np.polyval(pfvx, tval), np.polyval(pfvz, tval)\n sacc[i] = np.polyval(pfax, tval), np.polyval(pfaz, tval)\n\n return spos, svel, sacc", "def bump_moving_obstacle(\n state: State, action: Action, next_state: State\n) -> bool:\n return overlap(state, action, next_state, object_type=MovingObstacle)", "def __window_moveBy(self, deltaX, deltaY):\n pass", "def checkMove(self, vehicle, change):\n\n newPos = self.changeable[vehicle] + change\n if not isinstance(change, int):\n return 1\n\n if newPos < 0 or newPos > self.gridSize - self.length[vehicle]:\n return 1\n\n usedElement = self.occupiedElements()\n\n if change > 0:\n for i in range(change):\n if self.direction[vehicle] == \"h\":\n if usedElement[self.fixed[vehicle]][self.changeable[vehicle] + self.length[vehicle] + i] == 1:\n return 1\n else:\n if usedElement[self.changeable[vehicle] + self.length[vehicle] + i][self.fixed[vehicle]] == 1:\n return 1\n else:\n for i in range(abs(change)):\n if self.direction[vehicle] == \"h\":\n if usedElement[self.fixed[vehicle]][self.changeable[vehicle] - 1 - i] == 1:\n return 1\n else:\n if usedElement[self.changeable[vehicle] - 1 - i][self.fixed[vehicle]] == 1:\n return 1\n return 0", "def update_board(self,move, _testing : bool = True ) -> bool :\r\n\r\n temp = self.board.copy()\r\n self.count = 0\r\n\r\n for direction in DIRECTIONS:\r\n self.update_direction(move,direction)\r\n\r\n if self.count == 0:\r\n self.board = temp\r\n return False\r\n else:\r\n if _testing:\r\n self.board = temp\r\n else:\r\n self.board[move[0],move[1]] = self.turn\r\n return True", "def winning_move(board, position, player):\n win = list(player*3)\n if get_row(board, position) == win:\n return True\n elif get_column(board, position) == win:\n return True\n elif position % 2 != 0:\n # odd positions are on the diagonals\n return get_diagonal(board, 1) == win or get_diagonal(board, 3) == win\n return False", "def can_move(self):\r\n for wall in self.app.walls:\r\n if vec(self.grid_pos+self.direction) == wall:\r\n return False\r\n return True", "def cb_move(self, event):\n if not self.move_timer.IsRunning():\n self.move_timer.StartOnce(2000)", "def can_move(self):\n return self.movement", "def move(self,board,n,display):\n\n\t\tmove = False\n\t\tgoodInput = False\n\t\tn = 0\n\n\t\twhile not goodInput:\n\t\t\tpygame.time.wait(10)\n\t\t\tdisplay.displayBoard()\n\t\t\tmove = display.getMove()\n\n\t\t\tif move == \"End Preset\":\n\t\t\t\treturn move\n\n\t\t\tif move and tuple(move) in board.openPoints():\n\t\t\t\tgoodInput = True\n\t\t\telif move:\n\t\t\t\tprint \"Bad input, try again!\"\n\n\t\t\tn += 1\n\n\t\treturn move", "def get_move(arr=None):\n if arr is None or np.sum(arr!=0) < 55:\n return []\n \n moves = [] # (coord, dir) ex ((3, 4), 0) means move (3, 4) to right, 0 right, 1 up, 2 left, 3 down\n mask_moved = np.ones_like(arr)\n replace_value = 0\n # detect 2 consecutive\n for key in filters:\n for rot in [1, 3, 0, 2]:\n early_break = False\n out = signal.correlate2d(arr, np.rot90(filters[key], rot), mode='same', fillvalue=100)\n \n mask = (out==arr).astype(np.float)\n tmp = np.stack(np.where(mask), -1)\n # print(tmp)\n for idx in range(tmp.shape[0]):\n # if mask_moved[tuple(tmp[idx])] == 1:\n if mask_moved[tuple(tmp[idx])] == 1 and mask_moved[tuple(tmp[idx]+dirs[rot])] == 1:\n # if mask_moved[tuple(tmp[idx])] == 1 and mask_moved[tuple(tmp[idx]+dirs[rot])] == 1 and arr[tuple(tmp[idx]+dirs[rot])] != replace_value:\n moves.append((tmp[idx], rot))\n # mask_moved[tuple(tmp[idx])] = 0\n mask_moved[tuple(tmp[idx]+dirs[rot])] = 0\n arr[tuple(tmp[idx])], arr[tuple(tmp[idx]+dirs[rot])] = arr[tuple(tmp[idx]+dirs[rot])], arr[tuple(tmp[idx])]\n arr[tuple(tmp[idx]+dirs[rot])] = replace_value\n if key == 3:\n mask_moved[tuple(tmp[idx]+dirs[rot]*2)] = 0\n mask_moved[tuple(tmp[idx]+dirs[rot]*3)] = 0\n arr[tuple(tmp[idx]+dirs[rot]*2)] = replace_value\n arr[tuple(tmp[idx]+dirs[rot]*3)] = replace_value\n elif key == 2:\n mask_moved[tuple(tmp[idx]+dirs[rot]+dirs[(rot+1)%4])] = 0\n mask_moved[tuple(tmp[idx]+dirs[rot]+dirs[(rot+3)%4])] = 0\n arr[tuple(tmp[idx]+dirs[rot]+dirs[(rot+1)%4])] = replace_value\n arr[tuple(tmp[idx]+dirs[rot]+dirs[(rot+3)%4])] = replace_value\n elif key == 0:\n mask_moved[tuple(tmp[idx]+dirs[rot]+dirs[(rot+1)%4])] = 0\n mask_moved[tuple(tmp[idx]+dirs[rot]+2*dirs[(rot+1)%4])] = 0\n arr[tuple(tmp[idx]+dirs[rot]+dirs[(rot+1)%4])] = replace_value\n arr[tuple(tmp[idx]+dirs[rot]+2*dirs[(rot+1)%4])] = replace_value\n else:\n mask_moved[tuple(tmp[idx]+dirs[rot]+dirs[(rot+3)%4])] = 0\n mask_moved[tuple(tmp[idx]+dirs[rot]+2*dirs[(rot+3)%4])] = 0\n arr[tuple(tmp[idx]+dirs[rot]+dirs[(rot+3)%4])] = replace_value\n arr[tuple(tmp[idx]+dirs[rot]+2*dirs[(rot+3)%4])] = replace_value\n early_break = True\n break\n if early_break:\n break\n \n if len(moves) > 5: # early break to save computing resources\n break\n\n if len(moves) == 0:\n icon_other = np.stack(np.where(arr==0), -1)\n for idx in range(icon_other.shape[0]):\n moves.append((icon_other[idx], np.random.randint(0, 4)))\n\n return moves", "def test_hanning(length, window):\n assert_almost_equal(windows.hanning(length), np.array(window))", "def check_win(self):\n for pos in self.win_set:\n s = set([self.grid[p] for p in pos])\n if len(s) == 1 and (0 not in s):\n return True\n return False", "def test_verify_move(self):\n self._verify([self.applied_commands['move']])", "def test_move(self):\n neq_gcmc_system_sampler.reset()\n\n # Just run one move, as they are a bit more expensive\n neq_gcmc_system_sampler.move(neq_gcmc_system_simulation.context, 1)\n\n # Check some of the variables have been updated as appropriate\n assert neq_gcmc_system_sampler.n_moves == 1\n assert 0 <= neq_gcmc_system_sampler.n_accepted <= 1\n assert len(neq_gcmc_system_sampler.Ns) == 1\n assert len(neq_gcmc_system_sampler.acceptance_probabilities) == 1\n\n # Check the NCMC-specific variables\n assert isinstance(neq_gcmc_system_sampler.velocities, Quantity)\n assert neq_gcmc_system_sampler.velocities.unit.is_compatible(nanometers/picosecond)\n assert len(neq_gcmc_system_sampler.insert_works) + len(neq_gcmc_system_sampler.delete_works) == 1\n assert 0 <= neq_gcmc_system_sampler.n_explosions <= 1\n\n return None", "def test_move(self):\n # Run a handful of GCMC moves\n n_moves = 10\n std_gcmc_system_sampler.move(std_gcmc_system_simulation.context, n_moves)\n\n # Check that all of the appropriate variables seem to have been updated\n # Hard to test individual moves as they are rarely accepted - just need to check the overall behaviour\n assert std_gcmc_system_sampler.n_moves == n_moves\n assert 0 <= std_gcmc_system_sampler.n_accepted <= n_moves\n assert len(std_gcmc_system_sampler.Ns) == n_moves\n assert len(std_gcmc_system_sampler.acceptance_probabilities) == n_moves\n assert isinstance(std_gcmc_system_sampler.energy, Quantity)\n assert std_gcmc_system_sampler.energy.unit.is_compatible(kilocalories_per_mole)\n\n return None" ]
[ "0.6529597", "0.6286072", "0.6259327", "0.6203149", "0.6197683", "0.6197294", "0.6170365", "0.601616", "0.5998505", "0.5931079", "0.5923095", "0.5921639", "0.5878888", "0.5877249", "0.58698124", "0.585277", "0.5812267", "0.5797952", "0.5775176", "0.57483536", "0.5737459", "0.5729434", "0.57093906", "0.5701173", "0.5668545", "0.56511736", "0.5650913", "0.5649598", "0.5646812", "0.56445885", "0.5643883", "0.5640937", "0.56341654", "0.5628849", "0.5625213", "0.56199235", "0.5601362", "0.559707", "0.5595681", "0.55901015", "0.5582935", "0.55803823", "0.557816", "0.55746114", "0.5569128", "0.55647874", "0.55536944", "0.55472976", "0.5530732", "0.5527743", "0.55272746", "0.55229217", "0.5519678", "0.551766", "0.5508547", "0.5508461", "0.54962033", "0.54857296", "0.54777855", "0.54766613", "0.54744714", "0.5466932", "0.5461989", "0.5461989", "0.54608095", "0.5447224", "0.54453313", "0.5435963", "0.5430014", "0.54281056", "0.54246217", "0.54217935", "0.54177386", "0.54159856", "0.54159164", "0.54149127", "0.54138666", "0.54122347", "0.5408326", "0.5402277", "0.5398712", "0.5396828", "0.53964525", "0.5389962", "0.53886706", "0.5383695", "0.5383403", "0.5377205", "0.53745747", "0.53740114", "0.5370862", "0.53626436", "0.5361162", "0.5360123", "0.53596306", "0.53585106", "0.5358455", "0.5354276", "0.535373", "0.53536433" ]
0.7049266
0
Returns the CPModule from within the loaded Python module m an imported module returns the CPModule class
def find_cpmodule(m): for v, val in list(m.__dict__.items()): if isinstance(val, type) and issubclass(val, cellprofiler_core.module.Module): return val raise ValueError( "Could not find cellprofiler_core.module.Module class in %s" % m.__file__ )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _module(self):\n if self._module_cache is None:\n self._module_cache = load_module(self._name, self._path)\n return self._module_cache", "def base_module(self) -> nn.Module:\n return getattr(__import__(\"src.modules\", fromlist=[\"\"]), self.name)", "def get_module(self):\n return self.module", "def get_module(cls, module_name):\n if cls.module_dict is None:\n # Init the module_dict once.\n cls.module_dict = {mod.name: mod for mod in cls.get_pb().modules}\n return cls.module_dict.get(module_name)", "def get_module_class(module):\n try:\n for name, obj in inspect.getmembers(module):\n # must check for parent module name (should be beacon/codec/etc) as to avoid imported class objects\n if inspect.isclass(obj) and obj.__module__ == module.__name__:\n return obj\n # have it instantiate the object? depends where I decide to use this method: obj_() creates an instance.\n except Exception, e:\n print \"Error getting class from %s module\" % (module.__name__)\n raise", "def get_module(cls, module=None):\n return module or sys.modules[cls.module_name()]", "def exposed_getmodule(self, name):\n return __import__(name, None, None, \"*\")", "def get_module(module_name):\n module = __import__(module_name)\n components = module_name.split('.')\n for comp in components[1:]:\n module = getattr(module,comp)\n return module", "def module(self):\n return self.lib.module", "def get_module(module):\n return getattr(sys.modules, module, importlib.import_module(module))", "def my_import(module_name, class_name):\n\n\t# load the module, will raise ImportError if module cannot be loaded\n\tm = importlib.import_module(module_name)\n\n\t# get the class, will raise AttributeError if class cannot be found\n\tc = getattr(m, class_name)\n\n\treturn c", "def module(self):\n return self._module", "def module(self):\n return self._module", "def module(self):\n return self._module", "def module(self):\n return self._module", "def module(self):\n return self._module", "def get_module(self):\n module = self.__class__.__module__.split('.')\n module = \".\".join(module[:-1])\n module = module + \".\" + self._get_valid_version().module\n return module", "def _get_module(self, name):\n module = self._modules.get(name)\n if not module:\n module = importlib.import_module(name)\n self._modules[name] = module\n return module", "def get_class(fileName):\n module = __import__(fileName)\n return getattr(module, fileName)", "def _import_module(name):\r\n __import__(name)\r\n return sys.modules[name]", "def _import_module(name):\r\n __import__(name)\r\n return sys.modules[name]", "def _import_module(name):\r\n __import__(name)\r\n return sys.modules[name]", "def load_module(file_name):\n path = temp.relpath(file_name)\n m = _load_module(path)\n logger.info(\"load_module %s\", path)\n return m", "def get_module(name) -> Module:\n if isinstance(name, str):\n obj = get_object(name)\n else:\n obj = name\n\n name = obj.__name__\n if name in modules:\n return modules[name]\n else:\n module = Module(obj)\n modules[name] = module\n return module", "def __import_from(localization, member_name, module_name=\"__builtin__\"):\n module = import_python_module(localization, module_name)\n if isinstance(module, TypeError):\n return module, None\n\n try:\n return module, module.get_type_of_member(localization, member_name)\n except Exception as exc:\n return module, TypeError(localization,\n \"Could not load member '{0}' from module '{1}': {2}\".format(member_name, module_name,\n str(exc)))", "def _import_module(name):\n __import__(name)\n return sys.modules[name]", "def _import_module(name):\n __import__(name)\n return sys.modules[name]", "def get_module(self, name: str) -> ModuleInstance:\n return self.modules[name]", "def _load(self):\n # Import the target module and insert it into the parent's namespace\n module = importlib.import_module(self.__name__)\n self._parent_module_globals[self._local_name] = module\n\n # Emit a warning if one was specified\n if self._warning:\n logging.warning(self._warning)\n # Make sure to only warn once.\n self._warning = None\n\n # Update this object's dict so that if someone keeps a reference to the\n # LazyLoader, lookups are efficient (__getattr__ is only called on lookups\n # that fail).\n self.__dict__.update(module.__dict__)\n\n return module", "def get_compss_module(self):\n return self.compss_module", "def get_module_from_string(self, modulestring):\n\n module = None\n try:\n # Meta language for dinamically import\n module = import_module(modulestring)\n except ImportError as e:\n logger.critical(\"Failed to load resource: \" + str(e))\n return module", "def _load(self):\n module = importlib.import_module(self.__name__)\n self._parent_module_globals[self._local_name] = module\n\n if self._warning:\n logger.warning(self._warning)\n # Make sure to only warn once.\n self._warning = None\n\n # Update this object's dict so that if someone keeps a reference to the\n # LazyLoader, lookupts are efficient (__getattr__ is only called on lookups\n # that fail).\n self.__dict__.update(module.__dict__)\n return module", "def get_clip_module(clip_model_name: str) -> CLIP:\n return CLIP(*PARAMETERS[clip_model_name].values())", "def retry_import(mf: ModuleGraph, m: Node) -> typing.Optional[Node]:\n if \".\" in m.identifier:\n pname, partname = m.identifier.rsplit(\".\", 1)\n parent = mf.findNode(pname)\n else:\n parent = None\n partname = m.identifier\n\n # This is basically mf.find_module inlined and with a\n # check disabled.\n\n def fmod(\n name: str,\n path: typing.Optional[typing.List[str]],\n parent: typing.Optional[Node],\n ) -> typing.Tuple[\n typing.Optional[typing.IO], typing.Optional[str], typing.Tuple[str, str, int]\n ]:\n if path is None:\n if name in sys.builtin_module_names:\n return (None, None, (\"\", \"\", imp.C_BUILTIN))\n\n path = mf.path\n\n fp, buf, stuff = find_module(name, path)\n if buf:\n buf = os.path.realpath(buf)\n return (fp, buf, stuff)\n\n try:\n fp, pathname, stuff = fmod(\n partname, parent.packagepath if parent is not None else None, parent\n )\n except ImportError:\n return None\n\n if stuff[-1] == imp.PKG_DIRECTORY:\n m.__class__ = Package\n elif stuff[-1] == imp.PY_SOURCE:\n m.__class__ = SourceModule\n else:\n m.__class__ = CompiledModule\n\n m = mf._load_module(m.identifier, fp, pathname, stuff)\n\n if parent:\n mf.createReference(m, parent)\n parent[partname] = m\n return m", "def loadModule(module_name, class_name = None):\n mod = importlib.import_module(module_name)\n if class_name == None: return mod\n else: return getattr(mod, class_name)", "def _get_module(module):\n try:\n return sys.modules[module]\n except KeyError:\n raise ValueError(\n module + \"is not a valid module name or it is not loaded\"\n )", "def Importer(self, module, globals='', locals='', fromlist=None):\n if fromlist is None:\n module = module.split('.')[0]\n try:\n return self.modules[module]\n except KeyError:\n raise ImportError()", "def load_module(self, module_name): # pragma: no cover\r\n try:\r\n module = import_module('SoftLayer.CLI.modules.%s' % module_name)\r\n for _, obj in inspect.getmembers(module):\r\n if inspect.isclass(obj) and issubclass(obj, CLIRunnable):\r\n self.add_plugin(obj)\r\n return module\r\n except ImportError:\r\n raise InvalidModule(module_name)", "def _find_module(model, mod_name):\n for name, module in model.named_modules():\n if name == mod_name:\n return module\n return None", "def loadmodule( conf ):\n try:\n #conf = routes[ route ]\n # try to load the module\n module_name = conf['module']['name']\n module_path = conf['module']['path']\n \n mod_name, file_ext = os.path.splitext( os.path.split( module_path )[ -1] )\n if file_ext.lower() == '.py':\n py_mod = imp.load_source( mod_name, module_path )\n elif file_ext.lower() == '.pyc':\n py_mod = imp.load_compiled( mod_name, module_path )\n else:\n raise Exception(\"Cannot handle module for route: \" + route )\n except Exception, e:\n import traceback\n traceback.print_exc( file=sys.stdout )\n # TODO log error + msg\n return py_mod", "def get_module(self, cls_name, module_name='module'):\n if module_name not in self._module_dict:\n raise KeyError('{module_name} is not in registry')\n dd = self._module_dict[module_name]\n if cls_name not in dd:\n raise KeyError('{cls_name} is not registered in {module_name}')\n\n return dd[cls_name]", "def load_python_global(module, name):\n\n # The builtin module has been renamed in python3\n if module == '__builtin__' and six.PY3:\n module = 'builtins'\n module = importlib.import_module(module)\n return getattr(module, name)", "def import_module(name):\n __import__(name)\n return sys.modules[name]", "def _load_module(modulepath):\n\n mod = __import__(modulepath)\n path = []\n for token in modulepath.split(\".\")[1:]:\n path.append(token)\n mod = getattr(mod, token)\n return mod", "def loadModule(mod):\n try:\n # from pyrominfo import gameboy, etc\n pyrominfo = __import__(\"pyrominfo\", globals(), locals(), [mod])\n except ImportError:\n import os\n parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n os.sys.path.insert(0, parentdir)\n pyrominfo = __import__(\"pyrominfo\", globals(), locals(), [mod])\n try:\n return getattr(pyrominfo, mod)\n except AttributeError:\n raise ImportError(\"testutils.loadModule() can't find module %s in pyrominfo package\" % mod)", "def loadModule(self, module = \"\", modType=\"python\"):\n\t\tif modType == \"python\":\t\t\t\n\t\t\tmod = None\t\t\t#try:\n\t\t\tsys.path.insert(0, os.path.dirname(module))\n\t\t\tfile = os.path.basename(module)\n\t\t\tfile = file[:file.rfind('.')]\n\t\t\t#print \"Try: \", file, \"over\", sys.path\n\t\t\ttry:\n\t\t\t\tmod = __import__(file)\n\t\t\texcept:\n\t\t\t\tprint \"Invalid CSL API Module '%s' ignored.\" % (file)\n\t\t\t\tsys.path.pop(0)\n\t\t\t\treturn None\n\t\t\tsys.path.pop(0)\n\t\t\t#print \"Loaded Module Info:\", dir(mod)\n\t\t\tif \"CSLAPI_NAME\" in dir(mod):\t\t\t\t\n\t\t\t\tmod.CSLValue = CSLValue\n\t\t\t\tmod.debug = self.debug\n\t\t\t\tvtbl = mod.getFuncTable()\n\t\t\t\t#print \"CSL Module loader:\", module\n\t\t\t\tvtbl_names = vtbl.keys()\n\t\t\t\tfor i in vtbl_names:\n\t\t\t\t\t#print \"\\tAdded Function '%s' from module: %s (%s)\" % (i, mod.__file__, vtbl[i].__class__)\n\t\t\t\t\tself.cslAPIS[i] = vtbl[i]\n\t\t\t\tself.cslAPIMods.append(mod)", "def modulename():\n from inspect import getmodulename,getfile\n return getmodulename(getfile(lambda x:x))", "def get_obj_from_module(module: types.ModuleType, obj_name: str) -> Any:\r\n if obj_name == '':\r\n return module\r\n obj = module\r\n for part in obj_name.split(\".\"):\r\n obj = getattr(obj, part)\r\n return obj", "def __import_python_library_module(localization, module_name=\"__builtin__\"):\n try:\n module_obj = get_module_from_sys_cache(module_name)\n if module_obj is None:\n module_obj = __load_python_module_dynamically(module_name)\n module = module_obj.get_python_entity()\n\n module_members = module.__dict__\n for member in module_members:\n if inspect.ismodule(module_members[member]):\n member_module_name = module_members[member].__name__\n # Is not our own member\n if member_module_name is not module_name:\n if not __exist_module_in_sys_cache(member_module_name):\n module_ti = __load_python_module_dynamically(member_module_name)\n module_obj.set_type_of_member(localization, member, module_ti)\n return module_obj\n except Exception as exc:\n return TypeError(localization, \"Could not load Python library module '{0}': {1}\".format(module_name, str(exc)))", "def get_module(self, mf_module):\n assert mf_module in [\"mf\", \"mt\", \"mp\"], \\\n f\"requested module {mf_module} is not one of 'mf', 'mt', 'mp'.\"\n\n try:\n return self.data[mf_module]\n except KeyError:\n raise KeyError(f\"module {mf_module} is not available in modflow model data.\")", "def find_module (self, name, path = None):\n return self if name in self.containments else None", "def load_module(name):\n return __import__(\"metaswitch.%s\" % name,\n fromlist=[\"ROUTES\"])", "def load_instance_from_file(klass, modpath):\n mod_name, file_ext = os.path.splitext(os.path.split(modpath)[-1])\n logging.info('attempting problem import from {} module'.format(mod_name))\n\n ext = file_ext.lower()\n if ext == '.py':\n py_mod = imp.load_source(mod_name, modpath)\n elif ext == '.pyc':\n py_mod = imp.load_compiled(mod_name, modpath)\n else:\n raise NotModule('{} is not Python source or bytecode'.format(modpath))\n\n for attr in dir(py_mod):\n mod_obj = getattr(py_mod, attr)\n if isinstance(mod_obj, klass):\n return mod_obj\n\n return None", "def parse_python_module(module_path, name):\n\n module = imp.load_source(name, module_path)\n\n if module is None:\n log.e(TAG, \"Error launching module '%s'.\" % name)\n return None\n\n try:\n mod_class = getattr(module, name)\n mod_inst = mod_class()\n\n except AttributeError:\n log.e(TAG, \"Unable to find class '%s' in module!\" % name)\n return None\n\n item = dtf.core.item.Item()\n item.type = dtf.core.item.TYPE_MODULE\n item.name = name\n item.local_name = module_path\n item.install_name = name\n item.author = mod_inst.author\n item.about = mod_inst.about\n\n version = mod_inst.version\n if version is not None:\n if dtf.core.item.is_valid_version(version):\n item.version = version\n else:\n log.e(TAG, \"Invalid version specified. Exiting.\")\n return None\n else:\n item.version = None\n\n # Remove the compiled file name\n compiled_python_file = \"%sc\" % module_path\n if os.path.isfile(compiled_python_file):\n os.remove(compiled_python_file)\n\n return item", "def _parse_module(conanfile_module, module_id):\n result = None\n for name, attr in conanfile_module.__dict__.items():\n if (name.startswith(\"_\") or not inspect.isclass(attr) or\n attr.__dict__.get(\"__module__\") != module_id):\n continue\n\n if issubclass(attr, ConanFile) and attr != ConanFile:\n if result is None:\n result = attr\n else:\n raise ConanException(\"More than 1 conanfile in the file\")\n\n if result is None:\n raise ConanException(\"No subclass of ConanFile\")\n\n return result", "def ImportCore():\n import ycm_core as ycm_core\n return ycm_core", "def _get_module_from_frame(frm):\n mod = inspect.getmodule(frm)\n if mod is not None:\n return mod\n\n # Check to see if we're importing from a bundle file. First ensure that\n # __file__ is available in globals; this is cheap to check to bail out\n # immediately if this fails\n\n if \"__file__\" in frm.f_globals and \"__name__\" in frm.f_globals:\n filename = frm.f_globals[\"__file__\"]\n\n # Using __file__ from the frame's globals and getting it into the form\n # of an absolute path name with .py at the end works pretty well for\n # looking up the module using the same means as inspect.getmodule\n\n if filename[-4:].lower() in (\".pyc\", \".pyo\"):\n filename = filename[:-4] + \".py\"\n filename = os.path.realpath(os.path.abspath(filename))\n if filename in inspect.modulesbyfile:\n return sys.modules.get(inspect.modulesbyfile[filename])\n\n # On Windows, inspect.modulesbyfile appears to have filenames stored\n # in lowercase, so we check for this case too.\n if filename.lower() in inspect.modulesbyfile:\n return sys.modules.get(inspect.modulesbyfile[filename.lower()])\n\n # Otherwise there are still some even trickier things that might be possible\n # to track down the module, but we'll leave those out unless we find a case\n # where it's really necessary. So return None if the module is not found.\n return None", "def get_class(kls):\n parts = kls.split('.')\n module = \".\".join(parts[:-1])\n m = __import__( module )\n for comp in parts[1:]:\n m = getattr(m, comp)\n return m", "def import_module(module, from_where):\n from_module = __import__(from_where, globals(), locals(), [module])\n return getattr(from_module, module)", "def load_module (self, name):\n module = sys.modules.get (name)\n if module is not None:\n return module\n\n containment = self.containments.get (name)\n if containment is None:\n raise ImportError ('No such module: \\'{}\\''.format (name))\n source, filename, ispkg = containment\n\n module = imp.new_module (name)\n module.__loader__ = self\n module.__file__ = filename\n if ispkg:\n module.__path__ = [os.path.dirname (filename)]\n module.__package__ = name\n else:\n module.__package__ = name.rpartition ('.') [0]\n\n module.__initializing__ = True\n sys.modules [name] = module\n try:\n Exec (compile (source, module.__file__, 'exec'), module.__dict__)\n return module\n except Exception:\n sys.modules.pop (name, None)\n raise\n finally:\n module.__initializing__ = False", "def findModule(name):", "def _import_compiled_module(self, fullname):\n\n vfile = vfs.getFile(self.filename, False)\n\n # We can only import a compiled module if it already exists on\n # disk. This means if it's a truly virtual file that has no\n # on-disk equivalent, we have to write it to a temporary file\n # first.\n if hasattr(vfile, 'getMount') and \\\n isinstance(vfile.getMount(), VirtualFileMountSystem):\n # It's a real file.\n filename = self.filename\n else:\n # It's a virtual file. Dump it.\n filename = Filename.temporary('', self.filename.getBasenameWoExtension(),\n '.' + self.filename.getExtension(),\n type = Filename.TDso)\n filename.setExtension(self.filename.getExtension())\n fin = open(vfile, 'rb')\n fout = open(filename, 'wb')\n data = fin.read(4096)\n while data:\n fout.write(data)\n data = fin.read(4096)\n fin.close()\n fout.close()\n\n module = imp.load_module(fullname, None, filename.toOsSpecific(),\n self.desc)\n module.__file__ = self.filename.cStr()\n return module", "def package(cls):\n packages = get_packages()\n return packages.modules.get(cls.__module__)", "def _createModuleObj(self):\n ModuleFaultCohesiveKin.__init__(self)\n return", "def _import(module_name, dir_name):\n\n # assign module a name that's not likely to conflict\n safe_name = 'confab.data.' + module_name\n\n # check if module is already loaded\n existing = sys.modules.get(safe_name)\n if existing:\n return existing\n\n # try to load module\n module_info = imp.find_module(module_name, [dir_name])\n module = imp.load_module(safe_name, *module_info)\n return module", "def python_module(self) -> ModuleType:\n if self._python_module is None:\n # See: https://github.com/linkml/linkml/issues/1219\n src = self.schemaview.schema.source_file\n if not src:\n src = self.schemaview.schema\n pygen = PythonGenerator(src)\n self._python_module = pygen.compile_module()\n return self._python_module", "def get_class(classname):\n parts = classname.split('.')\n module = '.'.join(parts[:-1])\n m = __import__(module)\n for comp in parts[1:]:\n m = getattr(m, comp) \n return m", "def get_module(self, label):\n return self._registry.get(label, self._modules.get(label, None))", "def get_amodule_class(imported_module, base_module_path, AModule):\n modules = []\n for x in dir(imported_module):\n obj = getattr(imported_module, x)\n if inspect.isclass(obj) and issubclass(obj, AModule) and obj is not AModule:\n # Unsure that the class is not a class imported by the module\n if obj.__module__ == base_module_path:\n module_path = base_module_path.replace('owfmodules.', '').replace('.', '/')\n modules.append({\"path\": module_path, \"class\": obj})\n return modules", "def compileModule(self, code):\n r = ast.Module(None, self.compileSuite(code))\n #print r\n return r", "def _import(self, module, name):\n try:\n return getattr(__import__(module, fromlist=[name]), name)\n except (AttributeError, ImportError):\n msg = \"Failed to load %s from %s: %s\" % (name, module,\n sys.exc_info()[1])\n if not self.fail_silently:\n print(msg)\n else:\n _debug(msg)\n return None", "def _import_string(module_name, content):\n\n # assign module a name that's not likely to conflict\n safe_name = 'confab.data.' + module_name\n\n # check if module is already loaded\n existing = sys.modules.get(safe_name)\n if existing:\n return existing\n\n # try to load module\n module = imp.new_module(safe_name)\n exec content in module.__dict__\n return module", "def __init__(self, module_name,class_name):\n\n try:\n self.module = importlib.import_module(module_name)\n self.get_class_object = getattr(self.module,class_name)\n \n except:\n print(\"Failed to import the module {} from {}\".format(class_name,module_name))", "def module_loader(py_dir):\n\n return importlib.import_module(py_dir)\n # getattr(module)\n\n # package = 'scrapy.crawler.spiders'\n #\n # config_loader = ConfigLoader()\n # config_loader.config\n #\n # for file in glob.glob(os.path.join(py_dir, '*.py')):\n # name = os.path.splitext(os.path.basename(file))[0]\n # if name.startswith('_'):\n # continue\n # module = importlib.import_module('.'+name, package=package)\n # # member == classname\n # for member in py_dir(module):\n # \"Return only classes that have the same name as filenames\"\n # if member.lower() == name:\n # handler_class = getattr(module, member)\n # if handler_class and inspect.isclass(handler_class):\n # modules[name] = handler_class\n # return modules", "def lookup_module(id):\n return _registry[id]", "def module(self) -> Optional[Module]:\n return self._module", "def loadmodule(self, name):\n\n if name in self._modules:\n return self._modules[name]()\n\n raise Error(\"No such module: {0}\".format(name))", "def GetCoreIRModule(cirb: CoreIRBackend, circuit: DefineCircuitKind):\n if (hasattr(circuit, \"wrappedModule\")):\n return circuit.wrappedModule\n else:\n # if this is an instance, compile the class, as that is the circuit\n if hasattr(circuit, 'is_instance') and circuit.is_instance:\n circuitNotInstance = circuit.__class__\n else:\n circuitNotInstance = circuit\n moduleOrGenerator = cirb.compile(circuitNotInstance)[circuitNotInstance.name]\n # compile can giv eme back the coreIR module or the coreIR generator. if this is\n # the CoreIR generator, call it with the Magma arguments converted to CoreIR ones.\n if isinstance(moduleOrGenerator, Generator):\n return moduleOrGenerator(**circuitNotInstance.coreir_genargs)\n else:\n return moduleOrGenerator", "def loadModule(self, module_name):\n\t\tmodule = ROOT.TChain(module_name)\n\t\tself.modules.append(module)\n\t\treturn module", "def _caffeProtobufModules():\n from backend.caffe.path_loader import PathLoader\n proto = PathLoader().importProto()\n global _caffeprotomodulesvar\n if _caffeprotomodulesvar is None:\n current_module = sys.modules[proto.__name__]\n _caffeprotomodulesvar = dict(inspect.getmembers(current_module, inspect.isclass))\n return _caffeprotomodulesvar", "def _get_module(dotted_path):\n package, module = dotted_path.rsplit('.', 1)\n return getattr(import_module(package), module)", "def get_instance():\r\n try:\r\n module_instance = importlib.import_module(\r\n f\"{__name__}.{SETTINGS.db_type_ccgp_crawler.lower()}\")\r\n except ImportError as error:\r\n LOG.error(error)\r\n return module_instance.CCGPBidInfoStorage", "def load_module(self, fqn):\n trace(\"load_module\", fqn)\n trace(\"sys.modules\", sys.modules)\n p = lookupWithMapper(self.mapper, fqn)\n trace(\"load_module\", fqn, \"done\", id(p))\n\n if fqn in _sysModulesSpecialCases:\n # This module didn't have access to our isolated sys.modules when it\n # did its sys.modules modification. Replicate it here.\n for submoduleName in _sysModulesSpecialCases[fqn]:\n subfqn = '.'.join([fqn, submoduleName])\n sys.modules[subfqn] = getattr(p, submoduleName, None)\n return p", "def ppimport(name):\n global _ppimport_is_enabled\n\n level = 1\n parent_frame = p_frame = _get_frame(level)\n while not p_frame.f_locals.has_key('__name__'):\n level = level + 1\n p_frame = _get_frame(level)\n\n p_name = p_frame.f_locals['__name__']\n if p_name=='__main__':\n p_dir = ''\n fullname = name\n elif p_frame.f_locals.has_key('__path__'):\n # python package\n p_path = p_frame.f_locals['__path__']\n p_dir = p_path[0]\n fullname = p_name + '.' + name\n else:\n # python module\n p_file = p_frame.f_locals['__file__']\n p_dir = os.path.dirname(p_file)\n fullname = p_name + '.' + name\n\n # module may be imported already\n module = sys.modules.get(fullname)\n if module is not None:\n if _ppimport_is_enabled or isinstance(module, types.ModuleType):\n return module\n return module._ppimport_importer()\n\n so_ext = _get_so_ext()\n py_exts = ('.py','.pyc','.pyo')\n so_exts = (so_ext,'module'+so_ext)\n\n for d,n,fn,e in [\\\n # name is local python module or local extension module\n (p_dir, name, fullname, py_exts+so_exts),\n # name is local package\n (os.path.join(p_dir, name), '__init__', fullname, py_exts),\n # name is package in parent directory (scipy specific)\n (os.path.join(os.path.dirname(p_dir), name), '__init__', name, py_exts),\n ]:\n location = _is_local_module(d, n, e)\n if location is not None:\n fullname = fn\n break\n\n if location is None:\n # name is to be looked in python sys.path.\n fullname = name\n location = 'sys.path'\n\n # Try once more if module is imported.\n # This covers the case when importing from python module\n module = sys.modules.get(fullname)\n\n if module is not None:\n if _ppimport_is_enabled or isinstance(module,types.ModuleType):\n return module\n return module._ppimport_importer()\n # It is OK if name does not exists. The ImportError is\n # postponed until trying to use the module.\n\n loader = _ModuleLoader(fullname,location,p_frame=parent_frame)\n if _ppimport_is_enabled:\n return loader\n\n return loader._ppimport_importer()", "def _get_module(self, filename, base):\n if not filename or not filename.endswith('.py'):\n utils._log('Cannot get module for non python-source file: ', filename)\n return '' # only pytnon modules are supported\n base = base or os.path.join(\n self.window.extract_variables().get('project_path', ''),\n self.window.extract_variables().get('project_base_name', ''))\n utils._log('Getting module for file %s relative to base %s' % (filename, base))\n if not filename.startswith(base):\n utils._log('Cannot determine module path outside of directory')\n return ''\n return filename.replace(base, '').replace(os.path.sep, '.')[:-3].strip('.')", "def import_from(module: str, name: str):\n\n module = __import__(module, fromlist=[name])\n return getattr(module, name)", "def find_module(self, name):\n if name in self.pool:\n return self.pool[name]\n else:\n return None", "def importOverride(name, glbls={}, lcls={}, fromlist=[], level=-1):\n module = None\n # First try the system __import__ first\n try:\n module = BUILTIN_IMPORT(name, glbls, lcls, fromlist, level)\n # You cannot log in this namespace, due to an infinite regression issue, so don't try\n # Although I am thinking that disabling the import override, logging, and re enabling it would work\n except ImportError as error:\n # Next we will try to import them as a *.cc\n # First we need to determine if it exists\n # Check the folders in CC_PATH\n for path in CC_PATH:\n # If the path exists\n if os.path.exists(path):\n # And the path/<module name>.cc exists\n if os.path.exists(os.path.join(path, name+'.cc')):\n # We will use the first one we find\n # No the magic happens, we will first create a temp file\n temp_file = tempfile.TemporaryFile()\n # Now we add the 'magic' to the top of the temp file\n temp_file.write(MAGIC)\n # Now open the file being imported\n module_file = open(os.path.join(path, name+'.cc'), 'r')\n # Read the module contents into the temp file\n temp_file.write(module_file.read())\n module_file.close()\n # Now rewind the temp file so it can be read from the beginning\n temp_file.seek(0)\n # Now import the module\n try:\n module = imp.load_module(name, temp_file, path, ('.cc', 'r', imp.PY_SOURCE))\n except Exception as exception:\n logError(sys.exc_info(), log.error, 'Error importing control code file %s.cc:' % name, MAGIC_LINENO)\n finally:\n temp_file.close()\n log.debug('Module %s loaded from %s using the special .cc import' % (name, path))\n # If module is still None, we didn't find it and we should raise the original error\n if not module:\n raise error\n return module", "def load_python_file(moduleobject):\r\n if isinstance(moduleobject, str):\r\n moduleobject = load_module(moduleobject)\r\n if not hasattr(moduleobject, \"iclass\"):\r\n raise KeyError(\"Element\" + str(moduleobject))\r\n iclass = getattr(moduleobject, \"iclass\")\r\n resultdic = {}\r\n mylist = list(filter(lambda x:x[:1] != \"_\" and x != \"iclass\", (dir(moduleobject))))\r\n for x in mylist:\r\n resultdic[x] = getattr(moduleobject, x)\r\n if iclass == \"SymbolGrammar\":\r\n from pydsl.Grammar.BNF import BNFGrammar\r\n return BNFGrammar(**resultdic)\r\n elif iclass == \"PLY\":\r\n from pydsl.Grammar.Definition import PLYGrammar\r\n return PLYGrammar(moduleobject)\r\n elif iclass == \"MongoDict\":\r\n from pydsl.Grammar.Definition import MongoGrammar\r\n return MongoGrammar(resultdic)\r\n elif iclass in [\"PythonGrammar\"]:\r\n from pydsl.Grammar.Definition import PythonGrammar\r\n return PythonGrammar(resultdic)\r\n elif iclass == \"PythonTransformer\":\r\n return resultdic\r\n elif iclass == \"pyparsing\":\r\n return resultdic['root_symbol']\r\n else:\r\n raise ValueError(str(moduleobject))", "def get_code(self, parent, modname, fqname):\n\n if self.verbose:\n print >> sys.stderr, '-'*78\n print >> sys.stderr, \"Importing %s from the network ...\" % fqname\n print >> sys.stderr, '-'*78\n\n\n out = None\n for baseurl in self.path:\n\n proto_url = '/'.join([baseurl] + fqname.split('.'))\n\n\n # Is this a package?\n # ==================\n # If so, we want to look for __init__.py.\n\n is_package = self.download(proto_url + '/')\n if is_package:\n proto_url += '/__init__'\n\n\n # Try to find some code.\n # ======================\n\n for suffix in imp.get_suffixes():\n url = proto_url + suffix[0]\n fp = self.download(url)\n if fp is not None:\n\n # Prepare elements for imputil.Importer.\n # ======================================\n\n mod = imp.load_module(modname, fp, fp.name, suffix)\n out = (is_package, mod, {})\n break\n\n if out is not None:\n break\n\n return out", "def load(self):\n\n\t\tif self.module is None:\n\t\t\t# Cause the interpreter to load the module in local namespace ...\n\t\t\texec \"import \" + self.name\n\n\t\t\t# Store the module object ...\n\t\t\tobject.__setattr__(self, 'module', eval(self.name))", "def deserialize(serialized_module) -> Module:\n pass", "def _get_mod(name):\n return importlib.import_module(\"users.\" + name)", "def basetype_module(path, sys_root):\n no_mod = None, None\n if splitext(path)[-1].lower() != '.py' or split(path)[-1].startswith('_'):\n return no_mod\n mod_name = dotted(splitext(path)[0], p_sys)\n shortname = dotted(splitext(path)[0], p_sys, self.root_module)\n mod = importlib.import_module(mod_name)\n for fname in 'convert type'.split():\n if hasattr(mod, fname) and callable(getattr(mod, fname)):\n return shortname, mod\n return no_mod", "def get_class_from_string(self, classname, module):\n\n myclass = None\n try:\n # Meta language for dinamically import\n myclass = getattr(module, classname)\n except AttributeError as e:\n logger.critical(\"Failed to load resource: \" + str(e))\n\n return myclass", "def load_from_path(cls, module_path: str) -> \"FilebaseApiModuleInfo\":\n module = try_load_module_dynamic_with_timestamp(module_path)\n if module is None:\n return None\n\n if not hasattr(module, \"__filebase_api_module_info\"):\n # thread blocking command\n module.__filebase_api_module_info = cls(module)\n\n return module.__filebase_api_module_info", "def load_module(module):\n try:\n return import_module(module)\n except ImportError:\n sys.stderr.write('Unable to load the module: %s.\\n' % module)\n exit(-1)", "def load_module(self, file_path: Path) -> Module:\n if file_path.suffix != \".wasm\":\n raise Exception(\"Unsupported file type: {file_path.suffix}\")\n\n with file_path.open(\"rb\") as wasm_file:\n try:\n module = parse_module(wasm_file)\n except ParseError as err:\n raise MalformedModule from err\n\n try:\n validate_module(module)\n except ValidationError as err:\n raise InvalidModule from err\n\n return module", "def _find_module(self, name, path, parent=None):\n\n if parent is not None:\n # assert path is not None\n fullname = parent.identifier + '.' + name\n else:\n fullname = name\n\n node = self.findNode(fullname)\n if node is not None:\n self.msg(3, \"find_module: already included?\", node)\n raise ImportError(name)\n\n if path is None:\n if name in sys.builtin_module_names:\n return (None, BUILTIN_MODULE)\n\n path = self.path\n\n return self._find_module_path(fullname, name, path)", "def get(module, lang=\"en_US\"):\n\n module = import_module(\n \"._lang.{}.{}\".format(subdir(lang), module), package=__package__\n )\n return module" ]
[ "0.6648748", "0.64653724", "0.64607435", "0.6405533", "0.63600814", "0.6347188", "0.63392264", "0.63091576", "0.625608", "0.6237327", "0.61681557", "0.6154078", "0.6154078", "0.6154078", "0.6154078", "0.6154078", "0.6122566", "0.6057319", "0.6050425", "0.6026159", "0.6026159", "0.6026159", "0.60214496", "0.6015974", "0.5982699", "0.596684", "0.596684", "0.59576523", "0.5933951", "0.5933264", "0.5931767", "0.5908361", "0.59013957", "0.5894854", "0.58881027", "0.5871998", "0.5869015", "0.58462745", "0.5844304", "0.5841576", "0.58401823", "0.58211344", "0.58142245", "0.5794655", "0.5791745", "0.578841", "0.5785835", "0.57811123", "0.5773815", "0.57499343", "0.5740606", "0.5735186", "0.5732398", "0.57186717", "0.5675717", "0.5671634", "0.56563854", "0.5648565", "0.564643", "0.5642102", "0.5629725", "0.5624241", "0.56148154", "0.561338", "0.55935776", "0.5583217", "0.5580244", "0.5578194", "0.5575805", "0.5566382", "0.55636793", "0.5561463", "0.5557444", "0.5551995", "0.55485463", "0.5535515", "0.55301446", "0.55298936", "0.55291307", "0.5525623", "0.5524621", "0.5523931", "0.551976", "0.55009115", "0.5500366", "0.5500012", "0.54771966", "0.54691005", "0.5467623", "0.5465436", "0.54616445", "0.54528666", "0.5448406", "0.5448116", "0.54414314", "0.5440111", "0.5438474", "0.5435278", "0.5430313", "0.54276574" ]
0.7351052
0
Encrypts data based on what the TPLink smartplug is expecting
def __encrypt(string: str) -> str: key = 171 result = b"\0\0\0" + chr(len(string)).encode('latin-1') for i in string.encode('latin-1'): a = key ^ i key = a result += chr(a).encode('latin-1') return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encrypt_data(self, params):\n raise NotImplementedError", "def encrypt(data, key):\n data = six.ensure_binary(data)\n data = privy.hide(secret=data, password=key)\n data = six.ensure_text(data)\n return data", "def Encrypt(self, data):\n\n if len(data) % 16 != 0:\n data += ' ' * (16 - len(data) % 16)\n es = AES.new(self.creds.aesKey, AES.MODE_CBC, self.creds.aesIV)\n data = es.encrypt(data)\n data = base64.b64encode(data)\n return data", "def encrypt_data ( aes_key, data ) :\n salt = Crypto.Random.new( ).read( Crypto.Cipher.AES.block_size )\n cipher = Crypto.Cipher.AES.new( aes_key, Crypto.Cipher.AES.MODE_CFB, salt )\n encrypted_data = cipher.encrypt( data )\n\n return encode_data( salt + encrypted_data )", "def _EncryptData(self, data):\n if isinstance(data, str):\n data = data.encode('utf-8')\n encrypted_data = self._gpg.encrypt(\n data,\n self.args.target_key,\n sign=self._gpg.list_keys(True)[0]['fingerprint'],\n always_trust=False)\n if not encrypted_data.ok:\n raise Exception('Failed to encrypt data! Log: %s' % encrypted_data.stderr)\n return encrypted_data.data", "def encrypt(self, sensor_data):\r\n \r\n # set encryption parameters\r\n encryption1 = aes(self.ivkey, 2, self.staticiv)\r\n encryption2 = aes(self.datakey, 2, self.iv)\r\n # encrypt data\r\n self.encrypted_data = encryption2.encrypt(sensor_data) \r\n self.encrypted_iv = encryption1.encrypt(self.iv)\r\n self.encrypted_nodeid = encryption2.encrypt(self.nodeid)\r\n \r\n self.iv = bytes(random.getrandbits(8) for _ in range(16)) # changes every time\r", "def encrypt(self, data):\n\n key_public = RsaPublicKey.Read(self.crypt_public)\n return b64encode(key_public.Encrypt(data))", "def encrypt(self, data):\n if not data:\n return ''\n data = self._pad_data(data)\n return self._crypt(data, self.ENCRYPT)", "def encrypt_data(self, params):\n from django.core.signing import dumps\n return dumps(params, salt=self.salt_namespace)", "def encrypt_data(self, filename, data, master_pass, website): \n\n \"\"\"Concatenated extra characters in the case that the master password\n is less than 16 characters. However, this isn't a big safety trade off\n as the full length master password is hashed and checked for.\"\"\"\n concatenated_master = master_pass + \"================\"\n\n key = concatenated_master[:16].encode(\"utf-8\")\n\n cipher = AES.new(key, AES.MODE_EAX)\n\n \"\"\"A value that must never be reused for any other encryption done with\n this key saved alongside encrypted password. Converted to hexadecimal\n to be saved in DB. Later converted back to bytes to decode data\"\"\"\n nonce = cipher.nonce.hex()\n\n data_to_encrypt = data.encode(\"utf-8\")\n # again, bytes is invalid data for JSON so we convert it\n encrypted_data = cipher.encrypt(data_to_encrypt).hex()\n\n self.__save_password(filename, encrypted_data, nonce, website)", "def true_send(conn, data):\n encrypted_data = key.encrypt(pickle.dumps(data))\n length = str(len(encrypted_data)).zfill(LENGTH).encode()\n data = length + encrypted_data\n conn.send(data)", "def encrypt():\n\tnull = 0", "def encrypt_data(data, encryption_key):\n assert isinstance(data, str)\n obj = AES.new(encryption_key, AES.MODE_CBC, 'This is an IV456')\n padded = Pad.pad(data.encode())\n ciphertext = obj.encrypt(padded)\n return ciphertext.hex()", "def _encrypt(data):\n cipher = AES.new(bytes(_AES_KEY), AES.MODE_CBC, bytes(_AES_IV))\n\n # Pad to 16 bytes for AES CBC\n for i in range(16 - (len(data) % 16)):\n data += b'\\0'\n\n return cipher.encrypt(data)", "def encrypt_and_encode(data, key):\r\n return base64.urlsafe_b64encode(aes_encrypt(data, key))", "def enc(self, data):\n return data", "def encrypt_data_key(self, dataKey, token, userGroup):\n masterKey = self.retrieve_master_key(token=token, userGroup=userGroup)\n box = secret.SecretBox(masterKey)\n if isinstance(dataKey, str):\n dataKey = dataKey.encode('utf-8')\n cipherText= box.encrypt(dataKey).decode('cp855')\n return cipherText", "def encrypt(self, data):\n data = data.replace(';', '').replace('=', '') # Remove special characters to avoid injection\n plaintext = (self._prefix + data + self._suffix).encode()\n return aes_cbc_encrypt(plaintext, self._key, self._iv)", "def encrypt(data, address, path, raw):\n client = ConfigClient(address=address, fail_fast=False)\n try:\n resp = client.encrypt(data, path=path)\n except Exception:\n raise click.ClickException(\"💥 Failed to contact server!\")\n\n if raw:\n resp = f\"{{cipher}}{resp}\"\n\n table = Table.grid(padding=(0, 1))\n table.add_column(style=\"cyan\", justify=\"right\")\n table.add_column(style=\"magenta\")\n\n table.add_row(\"encrypted data[yellow]:[/yellow] \", f\"'{resp}'\")\n console.print(Panel(table, border_style=\"yellow\", expand=True))", "def provider_encrypt(self, uid, input_vec) -> str:\n params = {\n 'input': input_vec,\n }\n return self.context.post(\"/ckks/provider/encrypt/%s\" % uid, params,\n \"CKKS:: failed encrypting data on provider side\"\n )", "async def encrypt(self, data, sequence_no):\n\t\treturn self.SEAL(\n\t\t\t#self.SignKey_client, \n\t\t\tself.SignKey_client,\n\t\t\tself.SealKey_client, \n\t\t\tdata,\n\t\t\tdata,\n\t\t\tsequence_no, \n\t\t\tself.crypthandle_client.encrypt\n\t\t)", "def Encrypt(self, data):\n data = self.__Pad(data)\n iv_bytes = util.RandBytes(self.block_size)\n ciph_bytes = AES.new(self.key_bytes, AES.MODE_CBC, iv_bytes).encrypt(data)\n msg_bytes = self.Header() + iv_bytes + ciph_bytes\n sig_bytes = self.hmac_key.Sign(msg_bytes) # Sign bytes\n return msg_bytes + sig_bytes", "def encryptor(text: bytes, IV: bytes, key: bytes) -> bytes:\n \n # Given\n prepend_string = \"comment1=cooking%20MCs;userdata=\"\n append_string = \";comment2=%20like%20a%20pound%20of%20bacon\"\n\t\n plaintext = text.replace(b';', b'\";\"').replace(b'=', b'\"=\"')\n ciphertext = AES_CBC_encrypt(PKCS7_pad(plaintext, len(key)), IV, key)\n return ciphertext", "def aes_encrypt(data, key):\r\n cipher = aes_cipher_from_key(key)\r\n padded_data = pad(data)\r\n return cipher.encrypt(padded_data)", "def encrypt(algorithm, key, plaintext, associated_data, iv):\n encryptor = Encryptor(algorithm, key, associated_data, iv)\n ciphertext = encryptor.update(plaintext) + encryptor.finalize()\n return EncryptedData(encryptor.iv, ciphertext, encryptor.tag)", "def encrypt(self, data):\n\n if self.crypt_public == \"\":\n raise ValueError(\"Error encrypting: No public encryption key found for {}\".format(self))\n\n key_public = RsaPublicKey.Read(self.crypt_public)\n return key_public.Encrypt(data)", "def encrypt(inp):\n # prepare plaintext\n prefix = \"comment1=cooking%20MCs;userdata=\"\n suffix = \";comment2=%20like%20a%20pound%20of%20bacon\"\n pt = inp.replace(\";\", \"\").replace(\"=\", \"\") # remove invalid character\n pt = prefix + pt + suffix # add prefix and suffix\n pt_encoded = pt.encode(\"utf-8\")\n pt_padded = pkcs7.add(pt_encoded, aes.S_BLOCK)\n\n # encrypt\n ct = aes.cbc_encrypt(pt_padded, KEY, IV)\n\n return ct", "def encrypt(self):\n # Generate a randomized initialization vector\n iv = Random.new().read(AES.block_size)\n # Create a new AES object in Cipher Block Chaining mode\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n # Add a buffer so that the plaintext is a multiple of 16 characters in length\n pt_len = len(self.plaintext)\n buffer_size = AES.block_size - pt_len % AES.block_size\n strmsg = self.plaintext + \" \" * buffer_size\n return cipher.encrypt(str.encode(strmsg)), iv", "def encrypt(self, data):\n cipher_rsa = PKCS1_OAEP.new(self.key)\n return cipher_rsa.encrypt(data)", "def Encrypt(self, data):\n data = self.__Encode(data)\n ciph_bytes = self.key.encrypt(data, None)[0] # PyCrypto returns 1-tuple\n return self.Header() + ciph_bytes", "def _crypt(self, data, crypt_type):\n # splitting data in blocks, encrypting each one seperately\n i = 0\n result = []\n\n while i < len(data):\n block = self._string_to_bitlist(data[i:i + self.block_size])\n processed_block = self._des_crypt(block, crypt_type)\n result.append(self._bitlist_to_string(processed_block))\n i += self.block_size\n\n return b''.join(result)", "def _send(self, data):\n self._sock.send(self._cipher_tx.crypt(data))", "def encode(self, data):\n return self.__cipher.encrypt(data.encode('utf-8'))", "def __encrypt(self, plaintext):\n iv = get_random_bytes(16)\n try:\n encryption_envelope = {'ciphertext':'', \n 'keyid':esn_manifest + '_' + str(self.sequence_number), 'sha256':'AA==', \n 'iv':base64.standard_b64encode(iv).decode('utf-8')}\n except Exception:\n print('ESN is invalid.')\n sys.exit(0)\n\n plaintext = Padding.pad(plaintext.encode('utf-8'), 16)\n cipher = AES.new(self.encryption_key, AES.MODE_CBC, iv)\n ciphertext = cipher.encrypt(plaintext)\n encryption_envelope['ciphertext'] = base64.standard_b64encode(ciphertext).decode('utf-8')\n return json.dumps(encryption_envelope)", "def encrypted(data: str) -> str:\n return b64encode(data.encode('ascii')).decode('ascii')", "def encrypt(cls, plaintext, aad, key, iv):", "def __WriteEncrypted(self, data, pad=False):\n if pad:\n data = self.__key._Pad(data)\n\n encrypted_bytes = self.__cipher.encrypt(data)\n self.__output_stream.write(encrypted_bytes)\n self.__hmac_stream.Update(encrypted_bytes)", "def _encrypt_data_key(self, data_key, algorithm, encryption_context):\n # Raw key string to EncryptedData\n encrypted_wrapped_key = self.config.wrapping_key.encrypt(\n plaintext_data_key=data_key.data_key, encryption_context=encryption_context\n )\n # EncryptedData to EncryptedDataKey\n return aws_encryption_sdk.internal.formatting.serialize.serialize_wrapped_key(\n key_provider=self.key_provider,\n wrapping_algorithm=self.config.wrapping_key.wrapping_algorithm,\n wrapping_key_id=self.key_id,\n encrypted_wrapped_key=encrypted_wrapped_key,\n )", "def encrypt_data(data, encryption_key, iv=None):\n # Generate a random iv\n if iv is None:\n iv = get_random_bytes(IV_SIZE)\n generate_iv = True\n iv_length = IV_SIZE\n else:\n generate_iv = False\n iv_length = len(iv)\n cipher = AES.new(encryption_key, AES.MODE_GCM, iv)\n ciphered_data, tag = cipher.encrypt_and_digest(bytes(data))\n if generate_iv:\n # if iv passed by user is None, random iv generated\n # above is prepended in encrypted data\n # iv + Cipher + Tag\n result = iv + ciphered_data + tag\n else:\n # Cipher + Tag\n result = ciphered_data + tag\n return result", "def encrypt(self,password,indata):\n key = hashlib.sha256(password).digest()\n return encrypt_file(key,indata)", "def operate_cipher(self):", "def encrypt_key(data, key):\n data = MegaCrypto.base64_decode(data)\n return sum((MegaCrypto.str_to_a32(MegaCrypto.cbc_encrypt(data[_i:_i + 16], key))\n for _i in range(0, len(data), 16)), ())", "def encode_data ( data ) :\n firstpass = base64.b64encode( data )\n cipher = get_cipher( firstpass )\n\n index = 0\n datalen = len( firstpass )\n encoded_data = []\n while index < datalen :\n if index % 2 == 0 :\n encoded_data.append( chr( ord( firstpass[ index ] ) + cipher ) )\n else :\n encoded_data.append( chr( ord( firstpass[ index ] ) - cipher ) )\n index += 1\n\n encoded_data[ 0 ] = firstpass[ 0 ]\n encoded_data[ -1 ] = firstpass[ -1 ]\n encoded_data[ -2 ] = firstpass[ -2 ]\n return ''.join( encoded_data )", "def user_encrypt_password(data=None, **kw):\n if 'password' in data:\n data['password'] = encrypt_password(data['password'])", "def encryptData(self, key, iv, data, align = True):\r\n\t\tif((len(data) % self.align) != 0 and align):\r\n\t\t\treturn AES.new(key, AES.MODE_CBC, iv).encrypt(data + (\"\\x00\" * (self.align - (len(data) % self.align))))\r\n\t\telse:\r\n\t\t\treturn AES.new(key, AES.MODE_CBC, iv).encrypt(data)", "def encrypt(event=None): # event is passed by binders.\n msg = inputText.get(\"1.0\",tkinter.END)\n outText.delete('1.0', tkinter.END)\n\n f = open(myTmpDir + 'pt' + str(identity) + '.bin','wb')\n f.write(msg)\n f.close()\n\n os.popen(\"rsa.exe e \" + myTmpDir + \"pt\" + str(identity) + \".bin \"+ myTmpDir + \"locEnc\" + str(identity) + \".bin\")\n\n locEncFileName = myTmpDir + \"locEnc\" + str(identity) + \".bin\"\n with open(locEncFileName, \"rb\") as f:\n readFile = f.read()\n # Convert to hex representation\n digest = base64.encodestring(bytes(readFile))\n\n # TODO: overwirite\n outText.insert(tkinter.END, digest)", "def encrypt_tensor(input):\n # get rank of current process\n rank = comm.get().get_rank()\n # get world size\n world_size = comm.get().get_world_size()\n assert world_size == 2\n \n # assumes party 1 is the actual data provider\n src_id = 1\n\n if rank == src_id:\n input_upd = input.cuda()\n else:\n input_upd = torch.empty(input.size()).cuda()\n private_input = crypten.cryptensor(input_upd, src=src_id)\n# print(private_input)\n return private_input", "def encrypt(data=None, pairing_group=None, pk=None, policy=None, debug=0):\n\n # Check if data is set\n if data is None:\n logging.error('encrypt_seed_key_len data exception')\n if debug: # ONLY USE FOR DEBUG\n print('EXCEPTION in encrypt_seed_key_len data')\n raise Exception\n\n # Check if pk is set\n if pk is None:\n logging.error('encrypt_seed_key_len pk_file exception')\n if debug: # ONLY USE FOR DEBUG\n print('EXCEPTION in encrypt_seed_key_len pk_file')\n raise Exception\n\n # Check if policy is set\n if policy is None:\n logging.error('encrypt_seed_key_len policy exception')\n if debug: # ONLY USE FOR DEBUG\n print('EXCEPTION in encrypt_seed_key_len policy')\n raise Exception\n\n if debug: # ONLY USE FOR DEBUG\n print('DATA = (%s) %s' % (type(data), data))\n print('PK = (%s) %s' % (type(pk), pk))\n print('POLICY = (%s) %s' % (type(policy), policy))\n\n # Encrypt data with CP-ABE\n cpabe = CPabe_BSW07(pairing_group)\n enc_data = cpabe.encrypt(pk, data, policy)\n\n if debug: # ONLY USE FOR DEBUG\n print('ENC DATA WITH POLICY = (%d) %s' % (len(enc_data), enc_data))\n\n # Remove policy from encrypted data\n enc_data.pop('policy')\n\n if debug: # ONLY USE FOR DEBUG\n print('ENCRYPTED DATA = (%d) %s' % (len(enc_data), enc_data))\n\n return enc_data", "def DHencrypt(plaintext, symmetricKey, p, gen):\r\n \"Method was updated to use AES symetric decryption that was\"\r\n \"provided in the starter code as option of symetric encrytion using shared secret keys is generated.\"\r\n simplified_AES.keyExp(symmetricKey) # Generating round keys for AES.\r\n ciphertext = simplified_AES.encrypt(plaintext) # Running simplified AES.\r\n return ciphertext", "def encrypt(self, raw, use_base64=True, pad=True):\n encryptor = self.cipher.encryptor()\n if pad:\n raw = self._pad(raw)\n crypted_text = encryptor.update(raw) + encryptor.finalize()\n return base64.b64encode(crypted_text) if use_base64 else crypted_text", "def encryptFromString(self, data, keyobj):\n return self.encryptByteArray(bytearray(data, 'utf-8'), keyobj)", "def encrypt(plaintext):\n # Pad plaintext\n plaintext = pad(plaintext)\n\n # AES encrypt\n iv = Random.new().read(BS)\n aes = AES.new(aes_key, AES.MODE_CBC, iv)\n return iv + aes.encrypt(plaintext)", "def crypt(key, data, iv):\n return xtea.crypt(key, data, iv)", "def encrypt(text,key):\r\n aes = pyaes.AESModeOfOperationCTR(key)\r\n ciphertext = aes.encrypt(text)\r\n return ciphertext", "def encrypt(self, input, iv):\n pass", "def encrypt(data, key, iv, save_path=None):\n if isinstance(data, str):\n with open(data, 'rb') as f:\n data = f.read()\n length = str(len(data))\n length = _pad16(length)\n\n key = _pad16(key)\n iv = _pad16(iv)\n data = _pad16(data)\n cipher = AES.new(key, AES.MODE_CBC, iv)\n data = cipher.encrypt(data)\n data = length + data\n if save_path:\n with open(save_path, 'wb') as f:\n f.write(data)\n return data", "def test_encrypt_key(self):\n encrypted = encrypt('message', key=b'0' * 32)\n\n assert encrypted\n assert encrypted != 'message'", "def encryptAESBlock(key, pt):\n\tif len(pt) != 16 and len(pt) != 32:\n\t\traise Exception(\"Plaintext is not length 16 or 32\")\n\tcipher = AES.new(key, AES.MODE_ECB)\n\treturn cipher.encrypt(pt)", "def encryptByteArray(self, data, keyobj):\n raise NotImplementedError(\"Is abstract\")", "def encrypt(key, plaintext):\n data = fk(keyGen(key)[0], ip(plaintext))\n return fp(fk(keyGen(key)[1], swapNibbles(data)))", "def aes(encrypt, key, data):\n cipher = AES.new(key, AES.MODE_CBC, get_zero_vector(16))\n if encrypt:\n return cipher.encrypt(data)\n else:\n return cipher.decrypt(data)", "def __encrypt(self, pwd):\n enc_buff = pwd\n m = hashlib.sha256()\n for i in range(int(config.get('security.encrypt_times'))):\n tmp_buff = '-'.join([\n config.get('security.encrypt_times'),\n enc_buff,\n self.salt\n ])\n enc_buff = m.update(tmp_buff)\n enc_buff = m.hexdigest()\n return enc_buff", "def encrypt_data():\n\tkey = generate_key()\n\tctr = AES_CTR(key, 0)\n\n\tdata_lines = open('20.txt').readlines()\n\n\tciphertexts = []\n\n\tfor line in data_lines:\n\t\tciphertexts.append( ctr.encrypt( b64decode( line ) ) )\n\n\treturn ciphertexts", "def strEnc(data, *keys):\n r = len(data) % 4\n data += (4 - r if r else 0) * chr(0)\n encData = \"\"\n for i in range(len(data) // 4):\n tempBt = strToBt(data[i * 4: i * 4 + 4])\n for key in keys:\n for b in getKeyBytes(key):\n tempBt = enc(tempBt, b)\n encData += bt64ToHex(tempBt)\n return encData", "def encrypt(self, plaintext: bytes,\n padding: AsymmetricPadding) -> bytes:\n pass", "def aes_encrypt(self, buff):\n start = time.time()\n message = buff.encode()\n raw = pad(message)\n cipher = AES.new(\"DESCRYPTDESCRYPT\", AES.MODE_CBC, iv())\n enc = cipher.encrypt(raw)\n end = time.time()\n print \"Encrypt time: {0:.10f}\".format((end - start))\n return base64.b64encode(enc).decode('utf-8')", "def encrypt(self, input, key, iv):\n pass", "def Encrypt(key, value):\n key = key.zfill(32)[:32]\n value = Pad(value, 16)\n aes = AES.new(key, AES.MODE_ECB)\n encrypted = aes.encrypt(value)\n return base64.b64encode(encrypted)", "def encrypt(plaintext, key, associated_data=''):\n\n iv = os.urandom(12)\n\n encryptor = Cipher(\n algorithms.AES(key), modes.GCM(iv),\n backend=default_backend()).encryptor()\n\n encryptor.authenticate_additional_data(associated_data)\n\n ciphertext = encryptor.update(plaintext) + encryptor.finalize()\n\n return (iv, ciphertext, encryptor.tag)", "def _encryptDBPass():\n #run encrypt tool on user given password\n controller.CONF[\"ENCRYPTED_DB_PASS\"] = utils.encryptEngineDBPass(password=controller.CONF[\"DB_PASS\"],\n maskList=masked_value_set)", "def weaksauce_encrypt(text, password):\n\n offset = sum([ord(x) for x in password])\n encoded = ''.join(\n chr(min(ord(x) + offset, 2**20))\n for x in text\n )\n return encoded", "def encrypt(lines:list, data:bytes):\n\n # TODO make ckeck of text length\n lines_index = 0\n if len(lines) < len(data)*8: # raise Error, if text are too short\n raise RuntimeError(\"Error: Text too short to encrypt message\")\n\n for bit in iterate_bits(data):\n if bit:\n lines[lines_index] += \" \"\n lines_index += 1\n return \"\\n\".join(lines)", "def encrypt_symmetric(secret_key, plaintext):\n f = Fernet(secret_key)\n return f.encrypt(plaintext)", "def encrypt(self, token: bytes) -> bytes:\n return None", "def __encrypt_text_aes__(self, text, password):\n BLOCK_SIZE = 32\n PADDING_CHAR = b'^'\n iv = Random.new().read(16)\n # key must be 32 bytes for AES-256, so the password is hashed with md5 first\n cipher = AES.new(self.__hash_md5__(password), AES.MODE_CBC, iv)\n plaintext = text.encode('utf-8')\n # plaintext must be padded to be a multiple of BLOCK_SIZE\n plaintext_padded = plaintext + (BLOCK_SIZE - len(plaintext) % BLOCK_SIZE) * PADDING_CHAR\n ciphertext = cipher.encrypt(plaintext_padded)\n return (\n base64.b64encode(iv),\n base64.b64encode(ciphertext),\n PADDING_CHAR\n )", "def encrypt(self, value):\n\n return value", "def encrypt(self, key, data, mode, padding):\n # pylint: disable=unused-argument,no-self-use\n if hasattr(key, \"private_bytes\"):\n _key = key.public_key()\n else:\n _key = key\n try:\n return _key.encrypt(data, padding.build())\n except Exception:\n error_message = \"Encryption failed\"\n _LOGGER.exception(error_message)\n raise EncryptionError(error_message)", "def encrypt():\n print(\"Use sops to encrypt the file.\")\n print(\"Learn more at https://github.com/mozilla/sops\")", "def encrypt_download(self, uid, to_encrypt: list, filepath: str) -> str:\n # encrypt\n print(\"Encrypting data with context uid: %s\" % uid)\n #start_time = time.perf_counter()\n uid_enc = self.provider_encrypt(uid, to_encrypt)\n #end_time = time.perf_counter()\n #print(\"Encryption took\", end_time - start_time, \"seconds.\")\n assert uid_enc is not None\n print(\"Success encrypting data with uid: %s\" % uid_enc)\n # now perform download\n print(\"Downloading %s...\" % filepath)\n #start_time = time.perf_counter()\n num_bytes = self.provider_download_data(uid_enc, filepath)\n #end_time = time.perf_counter()\n #print(\"Downloading took\", end_time - start_time, \"seconds.\")\n print(\"Success downloading provider enc data with new uid: %s (%d bytes)\" %\n (uid_enc, num_bytes))\n return uid_enc", "def encrypt(self, key, value):\n\n iv = ''.join(chr(random.randint(0, 0xFF)) for i in range(16))\n key = hashlib.sha256(key).digest()[:self.BLOCK_SIZE]\n cipher = AES.new(key, AES.MODE_CBC, iv)\n crypted = cipher.encrypt(self.pkcs5_pad(value))\n return iv+crypted", "def encrypt(self, string):\n return self.__Cipher(self.OP_ENCRYPT).update(string)", "def encrypt(self, public_key, data):\n d_data = Data(data)\n out = Buffer(self.encrypted_len(public_key=public_key, data_len=len(data)))\n status = self._lib_vscf_ecc.vscf_ecc_encrypt(self.ctx, public_key.c_impl, d_data.data, out.c_buffer)\n VscfStatus.handle_status(status)\n return out.get_bytes()", "def encrypt_string(self, raw_string):\n return self.fernet_instance.encrypt(raw_string.encode('utf-8'))", "def as_encrypted(self):\n\n # Interpret as one word (pid), followed by a bunch of shorts\n struct_def = \"I\" + \"H\" * ((len(self.blob) - 4) / 2)\n words = list( struct.unpack(struct_def, self.blob) )\n\n # Apply the block shuffle and standard Pokémon encryption\n shuffled = self.shuffle_chunks(words)\n self.reciprocal_crypt(shuffled)\n\n # Stuff back into a string, and done\n return struct.pack(struct_def, *shuffled)", "def encrypt(self, text):\n\t\tclean_text = message2num(text)\n\t\tencrypted = []\n\t\tplug = self._enc_plugboard(clean_text)\n\n\t\tfor letter in plug:\n\t\t\tl = self._forward(letter)\n\t\t\tl = self._reflect(self.reflector, l)\n\t\t\tl = self._backwards(l)\n\t\t\tencrypted.append(l)\n\n\t\tencrypted = self._enc_plugboard(encrypted)\n\t\tencrypted = \"\".join(POS2L[l] for l in encrypted)\n\n\t\treturn encrypted", "def encrypt(self, buffer):\n cipher = AES.new(self.psk, AES.MODE_GCM, FIXED_IV)\n ct_bytes = cipher.encrypt(pad(buffer, AES.block_size))\n return base64.b64encode(ct_bytes)", "def encrypt(self, value):\n return self._execute(value)", "def prompt_encrypt(self):\r\n print(\"Please copy/paste key and secret from MtGox and\")\r\n print(\"then provide a password to encrypt them.\")\r\n print(\"\")\r\n\r\n\r\n key = input(\" key: \").strip()\r\n secret = input(\" secret: \").strip()\r\n while True:\r\n password1 = getpass.getpass(\" password: \").strip()\r\n if password1 == \"\":\r\n print(\"aborting\")\r\n return\r\n password2 = getpass.getpass(\"password (again): \").strip()\r\n if password1 != password2:\r\n print(\"you had a typo in the password. try again...\")\r\n else:\r\n break\r\n\r\n # pylint: disable=E1101\r\n hashed_pass = hashlib.sha512(password1.encode(\"utf-8\")).digest()\r\n crypt_key = hashed_pass[:32]\r\n crypt_ini = hashed_pass[-16:]\r\n aes = AES.new(crypt_key, AES.MODE_OFB, crypt_ini)\r\n\r\n # since the secret is a base64 string we can just just pad it with\r\n # spaces which can easily be stripped again after decryping\r\n print(len(secret))\r\n secret += \" \" * (16 - len(secret) % 16)\r\n print(len(secret))\r\n secret = base64.b64encode(aes.encrypt(secret)).decode(\"ascii\")\r\n\r\n self.config.set(\"gox\", \"secret_key\", key)\r\n self.config.set(\"gox\", \"secret_secret\", secret)\r\n self.config.save()\r\n\r\n print(\"encrypted secret has been saved in %s\" % self.config.filename)", "def encrypt_aes(msg, key, iv):\r\n #start timer\r\n start = timeit.default_timer()\r\n\r\n #converting key to bytes from hex\r\n key = bytes.fromhex(key)\r\n msg = pad(msg)\r\n obj = AES.new(key, AES.MODE_CBC, iv)\r\n ciphertxt = obj.encrypt(msg)#ciphertxt will be in 'bytes'\r\n\r\n #converting ciphertxt into hexadecimal\r\n ciphertxt = ciphertxt.hex()\r\n\r\n print(\"Ciper is: \",ciphertxt)\r\n\r\n #stop timer\r\n stop = timeit.default_timer()\r\n print('Encryption Running Time: ', stop-start)\r\n \r\n return ciphertxt", "def encrypt_password(pass_to_encrypt):\n\n temp_key = get_crypt_key()\n tk = Fernet(temp_key)\n\n pass_to_encrypt = pass_to_encrypt.encode(\"UTF-8\")\n return tk.encrypt(pass_to_encrypt)", "def encrypt(ctx, input, output):\n gpg_key = _get_gpg_key(_get_pem(ctx().source), ctx().user, ctx().verbose)\n _run_gpg_with_key(gpg_key, [\n '--encrypt', '--recipient',\n ctx().user, '--trust-model', 'always', '--armor'\n ], input, output, ctx().verbose)", "def aes_encrypt(mode, aes_key, aes_iv, *data):\n encryptor = Cipher(\n algorithms.AES(aes_key), mode(aes_iv), backend=default_backend()\n ).encryptor()\n\n result = None\n for value in data:\n result = encryptor.update(value)\n encryptor.finalize()\n\n return result, None if not hasattr(encryptor, \"tag\") else encryptor.tag", "def encrypt():\n request_data = request.get_json()\n\n if 'message' in request_data:\n encryption = Encryption()\n try:\n data = encryption.encrypt(request_data['message'])\n except ValueError as error:\n return Response(\n json.dumps(\n {\n 'error': f'Failed to encrypt the message due to the error: [{error}]'\n }\n ),\n 400,\n mimetype='application/json'\n )\n\n for key in data:\n data[key] = b64encode(data[key]).decode()\n\n return jsonify(data), 200\n\n return Response(\n json.dumps({'error': 'Message missing in the request body'}),\n 400,\n mimetype='application/json'\n )", "def decrypt(self, data):", "def encrypt(self, key, data, mode, padding):\n # this can be disabled by _disable_encryption, so pylint: disable=method-hidden\n try:\n block_size = self.cipher.block_size\n iv_len = block_size // 8\n iv = os.urandom(iv_len)\n\n encryptor = Cipher(self.cipher(key), mode.build(iv), backend=default_backend()).encryptor()\n padder = padding.build(block_size).padder()\n\n padded_data = padder.update(data) + padder.finalize()\n return iv + encryptor.update(padded_data) + encryptor.finalize()\n except Exception:\n error_message = \"Encryption failed\"\n _LOGGER.exception(error_message)\n raise EncryptionError(error_message)", "def _encrypt_aes_key(aes_key: bytes, receiver_public_key: RsaKey) -> bytes:\n cipher_rsa = PKCS1_OAEP.new(receiver_public_key)\n return cipher_rsa.encrypt(aes_key)", "def encryptor(infile: str, outfile: str, password: str, mode: str) -> int:\n enc = Encrypt(infile)\n\n if mode.upper() == 'AES':\n encrypted_data = enc.AES(password)\n elif mode.upper() == 'DES':\n encrypted_data = enc.DES(password)\n elif mode.upper() == 'SALSA20':\n encrypted_data = enc.Salsa20(password)\n else:\n return 2\n\n if not encrypted_data:\n return 3\n\n write_data(get_extension(infile) + encrypted_data, outfile)\n return 0", "def main():\n key, plain = get_key_plain()\n encode(key, plain)", "def encrypt(cleartext):\n base_encode = {'16': base64.b16encode,\n '32': base64.b32encode, '64': base64.b64encode}\n ciphertext = cleartext+''\n\n for i in range(encrypt_times):\n base = random.choice(['16', '32', '64'])\n ciphertext = base_encode[base](ciphertext)\n\n return ciphertext", "def test_encrypt_encoding(self):\n encrypted = encrypt('message')\n\n assert encrypted\n assert encrypted != 'message'\n assert type(encrypted) == str", "def _disabled_encrypt(self, *args, **kwargs):\n raise NotImplementedError('\"encrypt\" is not supported by the \"{}\" algorithm'.format(self.java_name))" ]
[ "0.7355738", "0.72868085", "0.7221197", "0.719316", "0.71014744", "0.70030105", "0.69509864", "0.69267815", "0.68219936", "0.6818995", "0.6817378", "0.67560524", "0.67371756", "0.6733585", "0.6675217", "0.6667538", "0.654713", "0.65417606", "0.65303856", "0.65274465", "0.6526483", "0.6505733", "0.64853823", "0.64812493", "0.6446817", "0.64333147", "0.64145666", "0.64141685", "0.64106566", "0.64094883", "0.6404058", "0.6381853", "0.63452005", "0.63095355", "0.6303481", "0.6303293", "0.630198", "0.6300546", "0.6299661", "0.6267203", "0.62532187", "0.6246964", "0.6239652", "0.6237081", "0.61912954", "0.6167434", "0.61590195", "0.6148717", "0.6134555", "0.6132427", "0.6119751", "0.6108934", "0.6090959", "0.60749495", "0.6049712", "0.6044543", "0.6039953", "0.6038945", "0.60307306", "0.6023368", "0.6017155", "0.6008318", "0.60080504", "0.59945256", "0.5990762", "0.5988345", "0.59877914", "0.5987722", "0.5967587", "0.59553146", "0.594934", "0.5940414", "0.5933364", "0.5930011", "0.59249127", "0.5922619", "0.5920623", "0.5889144", "0.58864814", "0.58853126", "0.58760947", "0.58752716", "0.5873743", "0.58674395", "0.58618146", "0.5842988", "0.58369064", "0.58343625", "0.58293194", "0.5817444", "0.58149946", "0.58148545", "0.58108354", "0.580958", "0.58094144", "0.5808683", "0.58008116", "0.57977986", "0.5776587", "0.57759947", "0.5767253" ]
0.0
-1
Decrypts data based on what the TPLink smartplug is using
def __decrypt(string: str) -> str: key = 171 result = "" i: int for i in string: a = key ^ i key = i result += chr(a) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decrypt(self, data):", "def decrypt_data(self, encrypted_data):\n raise NotImplementedError", "def decrypt_data ( aes_key, data ) :\n decoded_data = decode_data( data )\n salt = decoded_data[ 0 : Crypto.Cipher.AES.block_size ]\n encrypted_data = decoded_data[ Crypto.Cipher.AES.block_size : ]\n cipher = Crypto.Cipher.AES.new( aes_key, Crypto.Cipher.AES.MODE_CFB, salt )\n decrypted_data = cipher.decrypt( encrypted_data )\n\n return decrypted_data", "def _decrypt(self, data, key):\n seed1 = key\n seed2 = 0xEEEEEEEE\n result = BytesIO()\n\n for i in range(len(data) // 4):\n seed2 += self.encryption_table[0x400 + (seed1 & 0xFF)]\n seed2 &= 0xFFFFFFFF\n value = struct.unpack(\"<I\", data[i*4:i*4+4])[0]\n value = (value ^ (seed1 + seed2)) & 0xFFFFFFFF\n\n seed1 = ((~seed1 << 0x15) + 0x11111111) | (seed1 >> 0x0B)\n seed1 &= 0xFFFFFFFF\n seed2 = value + seed2 + (seed2 << 5) + 3 & 0xFFFFFFFF\n\n result.write(struct.pack(\"<I\", value))\n\n return result.getvalue()", "def _decrypt(data):\n cipher = AES.new(bytes(_AES_KEY), AES.MODE_CBC, bytes(_AES_IV))\n return cipher.decrypt(data)", "def decrypt_epic(aes_key, encrypted_data):\n # Decode encrypted string\n decoded = base64.b64decode(encrypted_data)\n\n # Decrypt decoded string\n decoded_readable = CryptDecrypt(aes_key, decoded).decode('utf-8')\n return decoded_readable", "def decrypt_epic(aes_key, encrypted_data):\n # Decode encrypted string\n decoded = base64.b64decode(encrypted_data)\n\n # Decrypt decoded string\n decoded_readable = CryptDecrypt(aes_key, decoded).decode('utf-8')\n return decoded_readable", "def Decrypt(self, data):\n\n data = base64.b64decode(data)\n es = AES.new(self.creds.aesKey, AES.MODE_CBC, self.creds.aesIV)\n solved = \"\"\n try:\n solved = es.decrypt(data)\n except ValueError:\n stdout.write(\"Error, corrupted file.\\n\\n\")\n return \"%errorpass:1234123412341234%\"\n\n return solved", "def decode(self, data):\n return self.__cipher.decrypt(data)", "def decrypt(self, data):\n if not data:\n return ''\n data = self._crypt(data, self.DECRYPT)\n return self._unpad_data(data)", "def decrypt(data, key):\n data = six.ensure_binary(data)\n try:\n data = privy.peek(hidden=data, password=key)\n except ValueError:\n error = \"Unable to decrypt {cnt} bytes of data using key {k}, invalid key!\"\n error = error.format(cnt=len(data), k=key)\n raise exceptions.ModuleError(error)\n return six.ensure_text(data)", "def decrypt(data):\n # Decrypt data if necessary\n result = None\n if str(data[:5]) == \"<?xml\":\n print(\" - Unprotected CETRAINER detected\")\n result = data\n else:\n print(\" - Protected CETRAINER detected. Decrypting...\")\n ckey = 0xCE\n for i in range(2, len(data)):\n data[i] = data[i] ^ data[i-2]\n for i in range(len(data)-2, -1, -1):\n data[i] = data[i] ^ data[i+1]\n for i in range(0, len(data)):\n data[i] = data[i] ^ ckey\n ckey = (ckey + 1) & 0xFF\n\n # Decompress if necessary and write data\n if data[:5] == b'CHEAT':\n result = zlib.decompress(data[5:], -15)\n result = result[4:]\n print(\" - Decompressed CETRAINER using new method\")\n else:\n result = zlib.decompress(data, -15)\n print(\" - Decompressed CETRAINER using old method\")\n return result", "def decrypt_data(data, encryption_key):\n assert isinstance(data, str)\n obj = AES.new(encryption_key, AES.MODE_CBC, 'This is an IV456')\n bytes_data = bytes.fromhex(data)\n return Pad.unpad(obj.decrypt(bytes_data)).decode()", "def decrypted(data: str) -> str:\n\n return b64decode(data.encode('ascii')).decode('ascii')", "def decrypt_data(self, encrypted_data):\n from django.core.signing import loads\n return loads(encrypted_data, salt=self.salt_namespace)", "def extract(self, data):\n return ujson.loads(self.cipher.decrypt(data))", "def decrypt(algorithm, key, encrypted_data, associated_data):\n decryptor = Decryptor(algorithm, key, associated_data, encrypted_data.iv, encrypted_data.tag)\n return decryptor.update(encrypted_data.ciphertext) + decryptor.finalize()", "def decrypt_raw(self, key, data):\n iv = data[:AES.block_size]\n cipher = AES.new(key, AES.MODE_CBC, iv)\n data = cipher.decrypt(data[AES.block_size:])\n return self.__unpad(data)", "def decrypt_data_key(self, dataKeyCypher, token, userGroup):\n masterKey = self.retrieve_master_key(token=token, userGroup=userGroup)\n box = secret.SecretBox(masterKey)\n if isinstance(dataKeyCypher, str):\n dataKeyCypher = dataKeyCypher.encode('cp855')\n try:\n plainText = box.decrypt(dataKeyCypher).decode('utf-8')\n except Exception:\n raise UnableToDecryptException(\"Unable to verify cyphertext/key pair\")\n return plainText", "def decrypt_message(encrypted_message):", "def decrypt(self, key, value):\n key = hashlib.sha256(key).digest()[:self.BLOCK_SIZE]\n iv = value[:16]\n crypted = value[16:]\n cipher = AES.new(key,AES.MODE_CBC,iv)\n return self.pkcs5_unpad(cipher.decrypt(crypted))", "def decrypt_pass(self, cont):\r\n try:\r\n iv = cont[3:15]\r\n data = cont[15:]\r\n ciph = AES.new(self.chrome_key(), AES.MODE_GCM, iv)\r\n decrypted = ciph.decrypt(data)\r\n decrypted = decrypted[:-16].decode()\r\n return decrypted\r\n except:\r\n decrypted = win32crypt.CryptUnprotectData(buff, None, None, None, 0)\r\n return decrypted[1]", "def decode_data ( data ) :\n cipher = get_cipher( data )\n index = 0\n firstpass = []\n datalen = len( data )\n while index < datalen :\n if index % 2 == 0 :\n firstpass.append( chr( ord( data[ index ] ) - cipher ) )\n else :\n firstpass.append( chr( ord( data[ index ] ) + cipher ) )\n index += 1\n\n firstpass[ 0 ] = data[ 0 ]\n firstpass[ -1 ] = data[ -1 ]\n firstpass[ -2 ] = data[ -2 ]\n decoded_data = ''.join( firstpass )\n return base64.b64decode( decoded_data )", "def _decrypt_encrypted_data(encryption_algorithm_info, encrypted_content, password):\n\n decrypt_func = crypto_funcs[encryption_algorithm_info.encryption_cipher]\n\n # Modern, PKCS#5 PBES2-based encryption\n if encryption_algorithm_info.kdf == 'pbkdf2':\n\n if encryption_algorithm_info.encryption_cipher == 'rc5':\n raise ValueError(pretty_message(\n '''\n PBES2 encryption scheme utilizing RC5 encryption is not supported\n '''\n ))\n\n enc_key = pbkdf2(\n encryption_algorithm_info.kdf_hmac,\n password,\n encryption_algorithm_info.kdf_salt,\n encryption_algorithm_info.kdf_iterations,\n encryption_algorithm_info.key_length\n )\n enc_iv = encryption_algorithm_info.encryption_iv\n\n plaintext = decrypt_func(enc_key, encrypted_content, enc_iv)\n\n elif encryption_algorithm_info.kdf == 'pbkdf1':\n derived_output = pbkdf1(\n encryption_algorithm_info.kdf_hmac,\n password,\n encryption_algorithm_info.kdf_salt,\n encryption_algorithm_info.kdf_iterations,\n encryption_algorithm_info.key_length + 8\n )\n enc_key = derived_output[0:8]\n enc_iv = derived_output[8:16]\n\n plaintext = decrypt_func(enc_key, encrypted_content, enc_iv)\n\n elif encryption_algorithm_info.kdf == 'pkcs12_kdf':\n enc_key = pkcs12_kdf(\n encryption_algorithm_info.kdf_hmac,\n password,\n encryption_algorithm_info.kdf_salt,\n encryption_algorithm_info.kdf_iterations,\n encryption_algorithm_info.key_length,\n 1 # ID 1 is for generating a key\n )\n\n # Since RC4 is a stream cipher, we don't use an IV\n if encryption_algorithm_info.encryption_cipher == 'rc4':\n plaintext = decrypt_func(enc_key, encrypted_content)\n\n else:\n enc_iv = pkcs12_kdf(\n encryption_algorithm_info.kdf_hmac,\n password,\n encryption_algorithm_info.kdf_salt,\n encryption_algorithm_info.kdf_iterations,\n encryption_algorithm_info.encryption_block_size,\n 2 # ID 2 is for generating an IV\n )\n plaintext = decrypt_func(enc_key, encrypted_content, enc_iv)\n\n return plaintext", "def decrypt(self, encryptedserial):\n # Obtain data and metadata, but return only data\n data, _ = self.decrypt_with_metadata(encryptedserial)\n return data", "def decrypt(key, cipher, use_custom=False):\n result = logic(key, cipher, use_custom)\n return array.array(\"B\", result)", "def decrypt(data, key, iv):\n decryptor = AES.new(key, AES.MODE_CBC, iv=iv)\n return decryptor.decrypt(data)", "def decrypt(priv_key, ciphertext):\n pk_encrypted_secret_key = ciphertext['pk_encrypted_secret_key']\n sym_encrypted_data = ciphertext['sym_encrypted_data']\n # TODO: secure delete\n secret_key = decrypt_pk(priv_key, pk_encrypted_secret_key)\n encoded_string = decrypt_symmetric(secret_key, sym_encrypted_data)\n return decode_data(encoded_string)", "def base64_aes_decrypt(self,data,key):\r\n cipher = AES.new(key)\r\n return self._depkcs7padding(cipher.decrypt(base64.b64decode(data)))", "def decrypt(self,password,indata):\n key = hashlib.sha256(password).digest()\n return decrypt_file(key,indata)", "def base64_aes_decrypt(self,data,key):\n cipher = AES.new(key)\n try:\n return self._depkcs7padding(cipher.decrypt(base64.b64decode(data)))\n except Exception, ex:\n return ''", "def do_android_decryption(self):\r\n self.aes_decryption_key = self.extract_aes_key()\r\n self.decrypt_device_file()\r\n # join is optimized and does not cause O(n^2) total memory copies.\r\n self.decrypted_file = b\"\\n\".join(self.good_lines)", "def decrypt(crypto, priv):\r\n string = rsa.encrypt(crypto, priv)\r\n string = livingDead.utfE(crypto)\r\n return crypto", "def decrypt(text,key):\r\n aes = pyaes.AESModeOfOperationCTR(key)\r\n decrypted = aes.decrypt(text)\r\n return decrypted", "def heat_decrypt(value, encryption_key=None):\n encryption_key = get_valid_encryption_key(encryption_key)\n auth = base64.b64decode(value)\n iv = auth[:AES.block_size]\n cipher = AES.new(encryption_key, AES.MODE_CFB, iv)\n res = cipher.decrypt(auth[AES.block_size:])\n return res", "def decode(self, crypto):", "def decrypt_key(data, key):\n data = MegaCrypto.base64_decode(data)\n return sum((MegaCrypto.str_to_a32(MegaCrypto.cbc_decrypt(data[_i:_i + 16], key))\n for _i in range(0, len(data), 16)), ())", "def decrypt_device_line(self, base64_data: bytes) -> bytes:\r\n # this can fail if the line is missing or has extra :'s, the case is handled as line error\r\n iv, base64_data = base64_data.split(b\":\")\r\n iv = decode_base64(iv)\r\n raw_data = decode_base64(base64_data)\r\n \r\n # handle cases of no data, and less than 16 bytes of data, which is an equivalent scenario.\r\n if not raw_data or len(raw_data) < 16:\r\n raise InvalidData()\r\n if not iv or len(iv) < 16:\r\n raise InvalidIV()\r\n \r\n # CBC data encryption requires alignment to a 16 bytes, we lose any data that overflows that length.\r\n overflow_bytes = len(raw_data) % 16\r\n \r\n if overflow_bytes:\r\n # print(\"\\n\\nFOUND OVERFLOWED DATA\\n\\n\")\r\n # print(\"device os:\", self.participant.os_type)\r\n # print(\"\\n\\n\")\r\n raw_data = raw_data[:-overflow_bytes]\r\n \r\n try:\r\n decipherer = AES.new(self.aes_decryption_key, mode=AES.MODE_CBC, IV=iv)\r\n decrypted = decipherer.decrypt(raw_data)\r\n except Exception:\r\n if iv is None:\r\n len_iv = \"None\"\r\n else:\r\n len_iv = len(iv)\r\n if raw_data is None:\r\n len_data = \"None\"\r\n else:\r\n len_data = len(raw_data)\r\n if self.aes_decryption_key is None:\r\n len_key = \"None\"\r\n else:\r\n len_key = len(self.aes_decryption_key)\r\n # these print statements cause problems in getting encryption errors because the print\r\n # statement will print to an ascii formatted log file on the server, which causes\r\n # ascii encoding error. Enable them for debugging only. (leave uncommented for Sentry.)\r\n # print(\"length iv: %s, length data: %s, length key: %s\" % (len_iv, len_data, len_key))\r\n # print('%s %s %s' % (patient_id, key, orig_data))\r\n raise\r\n \r\n # PKCS5 Padding: The last byte of the byte-string contains the number of bytes at the end of the\r\n # bytestring that are padding. As string slicing in python are a copy operation we will\r\n # detect the fast-path case of no change so that we can skip it\r\n num_padding_bytes = decrypted[-1]\r\n if num_padding_bytes:\r\n decrypted = decrypted[0: -num_padding_bytes]\r\n return decrypted", "def aes_decrypt(encrypted_data, key):\r\n cipher = aes_cipher_from_key(key)\r\n padded_data = cipher.decrypt(encrypted_data)\r\n return unpad(padded_data)", "def ecb_decrypt(self, encrypted_data, color):\n msg = b''\n for d in encrypted_data:\n encoded_bytes = d[0] + d[1]\n encoded_int = self.bytes_to_int(encoded_bytes)\n decoded_int = self.power(encoded_int, self.private_key, self.N)\n decoded_byte = self.int_to_bytes(decoded_int, len(d[0]))\n msg += decoded_byte\n return msg", "def decrypt(data, private_key):\r\n\r\n # Retrieve session key, tag, ciphertext and nonce from file\r\n enc_session_key, nonce, tag, ciphertext = \\\r\n [ file_in.read(x) for x in (private_key.size_in_bytes(), 16, 16, -1) ]\r\n\r\n\r\n # Decrypt the session key\r\n session_key = cipher_rsa.decrypt(enc_session_key)\r\n\r\n # Decrypt the data with the AES session key\r\n cipher_aes = AES.new(session_key, AES.MODE_EAX, nonce)\r\n data = cipher_aes.decrypt_and_verify(ciphertext, tag)\r\n\r\n return data", "def decrypt(self, buffer):\n try:\n ct = base64.b64decode(buffer)\n except:\n print('f a i l')\n return bytes('fail')\n\n cipher = AES.new(self.psk, AES.MODE_GCM, FIXED_IV)\n pt = unpad(cipher.decrypt(ct), AES.block_size)\n return pt", "def passwd_decryption(self):\n with open(self.key_path, 'rb') as input_key:\n for line in input_key:\n key = line\n with open(self.pass_path, 'rb') as input_password:\n for line in input_password:\n password = line\n cipher_suit = Fernet(key)\n plain_password = cipher_suit.decrypt(password)\n plain_password = bytes(plain_password).decode('utf-8')\n \n return plain_password", "def decrypt(code):\n f = Fernet(settings.SECRET_KEY)\n return f.decrypt(code).decode('ascii')", "def _decrypt(self, msg):\r\n # they must be real crypto experts at pubnub.com\r\n # two lines of code and two capital mistakes :-(\r\n # pylint: disable=E1101\r\n key = hashlib.sha256(self.cipher).hexdigest()[0:32]\r\n aes = AES.new(key, AES.MODE_CBC, \"0123456789012345\")\r\n decrypted = aes.decrypt(base64.decodestring(msg))\r\n return json.loads(decrypted[0:-ord(decrypted[-1])])", "def do_ios_decryption(self):\r\n try:\r\n self.aes_decryption_key = self.extract_aes_key()\r\n except DecryptionKeyInvalidError:\r\n self.aes_decryption_key = self.get_backup_encryption_key()\r\n self.used_ios_decryption_key_cache = True\r\n \r\n self.decrypt_device_file()\r\n # join is optimized and does not cause O(n^2) total memory copies.\r\n self.decrypted_file = b\"\\n\".join(self.good_lines)", "def decrypt(self):\n # Grab the initialization vector from the front of the cipher-text\n iv = self.ciphertext[:AES.block_size]\n # Create a new AES object in Cipher Block Chaining mode\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n return cipher.decrypt(self.ciphertext)[AES.block_size:].rstrip().decode(\"utf-8\"), iv", "def decode_and_decrypt(encoded_data, key):\r\n return aes_decrypt(base64.urlsafe_b64decode(encoded_data), key)", "def decrypt(cls, ciphertext_and_tag, aad, key, iv):", "def decrypt(self, value):\n return self._execute(value, task='decrypt')", "def decrypt(self, cryptod, secret):\n try:\n # From json to python crypto dict\n data = base64.b64decode(\n bytes(cryptod['ciphervalue'], encoding=self.encoding))\n # Decrypt\n iv = base64.b64decode(bytes(cryptod['iv'], encoding=self.encoding))\n algorithm = self._algorithm(\n secret=secret, name=cryptod['algorithm'])\n cipher = Cipher(algorithm, modes.CBC(iv), backend=self.backend)\n decryptor = cipher.decryptor()\n data = decryptor.update(data) + decryptor.finalize()\n # Unpad\n unpadder = padding.PKCS7(cipher.algorithm.block_size).unpadder()\n data = unpadder.update(data) + unpadder.finalize()\n # Unzip\n data = str(gzip.decompress(data), encoding=self.encoding)\n cipher = None\n # json string\n except ValueError as ve:\n raise ValueError('Decrypt failure!') from ve\n try:\n data = json.loads(data)\n except ValueError as ve:\n raise ValueError('JSON formatting failure!') from ve\n return data", "def decrypt(self, ciphertext, key):\n iv = ciphertext[:AES.block_size]\n cipher = AES.new(key, AES.MODE_CBC, iv, segment_size=64)\n plaintext = cipher.decrypt(ciphertext[AES.block_size:])\n return self.pkcs7_unpad(plaintext)", "def decrypt_kms_data(encrypted_data):\n if not AWS_REGION:\n return\n\n kms = boto3.client('kms', region_name=AWS_REGION)\n\n decrypted = kms.decrypt(CiphertextBlob=encrypted_data)\n\n if decrypted.get('KeyId'):\n # Decryption succeed\n decrypted_value = decrypted.get('Plaintext', '')\n if isinstance(decrypted_value, bytes):\n decrypted_value = decrypted_value.decode('utf-8')\n return decrypted_value", "def decrypt(self, text):\n return self.encrypt(text)", "def decrypt(ciphertext):\n # AES decrypt\n iv = ciphertext[:16]\n ciphertext = ciphertext[16:]\n aes = AES.new(aes_key, AES.MODE_CBC, iv)\n return unpad(aes.decrypt(ciphertext))", "def decrypt_attr(data, key):\n data = MegaCrypto.base64_decode(data)\n k, iv, meta_mac = MegaCrypto.get_cipher_key(key)\n attr = MegaCrypto.cbc_decrypt(data, k)\n\n #: Data is padded, 0-bytes must be stripped\n return json.loads(\n re.search(r'{.+?}', attr).group(0)) if attr[:6] == 'MEGA{\"' else False", "def decrypt(key, ciphertext):\n data = fk(keyGen(key)[1], ip(ciphertext))\n return fp(fk(keyGen(key)[0], swapNibbles(data)))", "def decrypt(pwd, data):\n\n ct = b64decode(data['ct'])\n salt = b64decode(data['salt'])\n tag_start = len(ct) - data['ts'] // 8\n tag = ct[tag_start:]\n ciphertext = ct[:tag_start]\n\n mode_class = getattr(modes, data['mode'].upper())\n algo_class = getattr(algorithms, data['cipher'].upper())\n\n kdf = _kdf(data['ks'], iters=data['iter'], salt=salt)[0]\n key = kdf.derive(bytes(pwd, \"utf-8\"))\n cipher = Cipher(\n algo_class(key),\n mode_class(\n b64decode(data['iv']),\n tag,\n min_tag_length=data['ts'] // 8\n ),\n backend=_BACKEND\n )\n\n dec = cipher.decryptor()\n return dec.update(ciphertext) + dec.finalize()", "def rsa_pkcs1v15_decrypt(self, data):\n pass", "def decrypt_data(self, master_pass, website, filename): \n\n if os.path.isfile(filename):\n try:\n with open(filename, 'r') as jdata:\n jfile = json.load(jdata)\n nonce = bytes.fromhex(jfile[website][\"nonce\"])\n password = bytes.fromhex(jfile[website][\"password\"])\n except KeyError:\n raise PasswordNotFound\n else:\n raise PasswordFileDoesNotExist\n # add extra characters and take first 16 to make sure key is right.\n formatted_master_pass = master_pass + \"================\"\n master_pass_encoded = formatted_master_pass[:16].encode(\"utf-8\")\n cipher = AES.new(master_pass_encoded, AES.MODE_EAX, nonce = nonce)\n plaintext_password = cipher.decrypt(password).decode(\"utf-8\")\n\n return plaintext_password", "def decrypt(data, key, iv, save_path=None):\n if isinstance(data, str):\n with open(data, 'rb') as f:\n data = f.read()\n pad_ch = '\\0'\n length = int(data[:16].rstrip(pad_ch.encode('utf-8')).decode('utf-8'))\n data = data[16:]\n key = _pad16(key)\n iv = _pad16(iv)\n cipher = AES.new(key, AES.MODE_CBC, iv)\n data = cipher.decrypt(data)\n data = data[:length]\n if save_path:\n with open(save_path, 'wb') as f:\n f.write(data)\n return data", "def decrypt_aes256(data, key, iv):\n decryptor = AES.new(key, AES.MODE_CBC, iv)\n return decryptor.decrypt(data)", "def decryptByteArray(self, data, keyobj):\n raise NotImplementedError(\"Is abstract\")", "def _decrypt_secret(\n self, \n encryption_key: str,\n secret_list: List,\n secret_name: str\n ):\n f = Fernet(\n bytes(encryption_key, \"utf-8\")\n )\n secret=None\n if 'secrets' in secret_list:\n if secret_name in secret_list['secrets']:\n secret = f.decrypt(\n bytes(secret_list['secrets'][secret_name], \"utf-8\")\n ).decode('UTF-8')\n #self.log.log_success(\n # f'{secret_name} : {secret}'\n #)\n return secret", "def decrypt(self, key, data, mode, padding):\n # this can be disabled by _disable_encryption, so pylint: disable=method-hidden\n try:\n block_size = self.cipher.block_size\n iv_len = block_size // 8\n iv = data[:iv_len]\n data = data[iv_len:]\n\n decryptor = Cipher(self.cipher(key), mode.build(iv), backend=default_backend()).decryptor()\n decrypted_data = decryptor.update(data) + decryptor.finalize()\n\n unpadder = padding.build(block_size).unpadder()\n return unpadder.update(decrypted_data) + unpadder.finalize()\n except Exception:\n error_message = \"Decryption failed\"\n _LOGGER.exception(error_message)\n raise DecryptionError(error_message)", "def decrypt(self, encrypted_token: bytes) -> bytes:\n return None", "def AES_decrypt(ciphertext: bytes) -> Text:\n text = b64decode(ciphertext)\n cipher = AES.new(secret_key, mode, IV)\n return Padding.unpad(cipher.decrypt(text), bs).decode('utf-8')", "async def decrypt(self, data, sequence_no, direction='init', auth_data=None):\n\t\tedata = data[16:]\n\t\tsrv_sig = NTLMSSP_MESSAGE_SIGNATURE.from_bytes(data[:16])\n\t\tsealedMessage = self.crypthandle_server.encrypt(edata)\n\t\tsignature = self.MAC(self.crypthandle_server.encrypt, self.SignKey_server, srv_sig.SeqNum, sealedMessage)\n\t\t#print('seqno %s' % sequence_no)\n\t\t#print('Srv sig: %s' % data[:16])\n\t\t#print('Calc sig: %s' % signature)\n\n\t\treturn sealedMessage, None", "def decrypt_string(self, encrypted_string):\n return self.fernet_instance.decrypt(encrypted_string.encode('utf-8')).decode('utf-8')", "def decrypt(self, encrypted: str) -> str: # type: ignore\n passphrase = self.passphrase\n encrypted = base64.b64decode(encrypted) # type: ignore\n assert encrypted[0:8] == b\"Salted__\"\n salt = encrypted[8:16]\n key_iv = self.bytes_to_key(passphrase.encode(), salt, 32 + 16)\n key = key_iv[:32]\n iv = key_iv[32:]\n aes = AES.new(key, AES.MODE_CBC, iv)\n try:\n return self.unpad(aes.decrypt(encrypted[16:])).decode() # type: ignore\n except UnicodeDecodeError:\n raise ValueError(\"Wrong passphrase\")", "def decrypt(self, in_, out):\n try:\n # Bytes read from in will be decrypted\n \n out.write(pyDes.des.decrypt(in_.read()))\n # Read in the decrypted bytes and write the cleartext to out\n out.close()\n except Exception as e:\n print e\n pass", "def decryptToString(self, data, keyobj):\n return self.decryptByteArray(data, keyobj).decode().split('\\x00')[0]", "def decryptData(self, key, iv, data, align = True):\r\n\t\tif((len(data) % self.align) != 0 and align):\r\n\t\t\treturn AES.new(key, AES.MODE_CBC, iv).decrypt(data + (\"\\x00\" * (self.align - (len(data) % self.align))))\r\n\t\telse:\r\n\t\t\treturn AES.new(key, AES.MODE_CBC, iv).decrypt(data)", "def decrypt(self, encrypted):\n\n encrypted = base64.b64decode(encrypted)\n IV = encrypted[:self.BLOCK_SIZE]\n aes = AES.new(self.key, AES.MODE_CBC, IV)\n return self._unpad(aes.decrypt(encrypted[self.BLOCK_SIZE:]))", "def decrypt(event=None): # event is passed by binders.\n msg = inputText.get(\"1.0\",tkinter.END)\n outText.delete('1.0', tkinter.END)\n\n decB64Msg = base64.decodestring(msg)\n\n f = open(myTmpDir + 'ct' + str(identity) + '.bin','wb')\n f.write(decB64Msg)\n f.close()\n\n os.popen(\"rsa.exe d \" + myTmpDir + \"ct\" + str(identity) + \".bin \" + myTmpDir + \"ptSender\" + str(identity) + \".bin\")\n\n with open(myTmpDir + \"ptSender\" + str(identity) + \".bin\", \"rb\") as f:\n readFile = f.read()\n # Convert to hex representation\n decMsg = bytes(readFile)\n\n # TODO: overwirite\n outText.insert(tkinter.END, decMsg)", "def decrypt(self, enc, use_base64=True, decode_text=True):\n if use_base64:\n enc = base64.b64decode(enc)\n\n decryptor = self.cipher.decryptor()\n raw = self._unpad(decryptor.update(enc) + decryptor.finalize())\n return raw.decode(\"utf-8\") if decode_text else raw", "def decrypt_symmetric(self, ciphertext):\n from google.cloud import kms_v1\n\n # Creates an API client for the KMS API.\n client = kms_v1.KeyManagementServiceClient()\n\n # The resource name of the CryptoKey.\n name = client.crypto_key_path_path(self.project_id, self.location_id, self.key_ring_id,\n self.crypto_key_id)\n # Use the KMS API to decrypt the data.\n response = client.decrypt(name, ciphertext)\n return response.plaintext", "def decrypt(self, key):\n super(MACDataUplinkMessage, self).decrypt(key, dir=0)", "def FtDecrypt(self,EncryptText):\n \n self.EncryptText = EncryptText\n characters = \"abcdefghijklmnopqrstuvwxyz \"\n DecripText = ''\n\n #attempt to decrypt the text using the made_key and EncryptText \n try:\n for item in self.EncryptText:\n DecripText += Code_Fouad_Teniou.my_dict[item]\n\n return DecripText\n \n #Raise KeyError if a different key was used to encrypt the text \n except KeyError:\n print \"\\n<Please use the right code(made_key) to decrypt your text\"", "def decryptor(file_name, key):\n\twith open(file_name, 'rb') as dfile:\n\t\tciphertext = dfile.read()\n\t\tdec = decrypt(key, ciphertext)\n\t\tdfile.close()\n\t\tdtext = \"The encrypted file was opened by macupdate.py by the user: \"\n\t\tcreateLog(dtext, 'logs/macupdate.log')\n\t\treturn dec", "def decipher_raw(s, key):\n assert struct.calcsize('I') == 4\n assert len(s) % 8 == 0, len(s)\n u = struct.unpack('%dI' % (len(s) / 4), s)\n e = [decrypt(u[i], u[i + 1], key) for i in range(len(u))[::2]]\n return b''.join([struct.pack('2I', ee, ef) for ee, ef in e])", "def fernet_decript(key,message):\n\tf = Fernet(key)\n\treturn f.decrypt(message)", "def asym_dec(self, ciph, keyfile):\n ciph = ciph.split('\\0')\n ciphkey_len = int(ciph[0])\n ciph = '\\0'.join(ciph[1:])\n ciphkey = ciph[:ciphkey_len]\n ciph = ciph[ciphkey_len:]\n\n passphrase = xsystem([self.sslname, 'rsautl', '-decrypt', '-inkey',\n keyfile], ciphkey)\n if not passphrase:\n warning('keymanagement: Unable to perform asymmetric decryption\\n')\n return None\n\n return self.sym_dec(ciph, passphrase)", "def decrypt_message(self, env_key, data):\n\n if not env_key or not data:\n raise Exception('Arguments missing.')\n\n key = RSA.importKey(self.private_key)\n try:\n env_key = unquote(env_key).decode('utf8')\n data = unquote(data).decode('utf8')\n except AttributeError:\n # Python 3 compatible\n env_key = unquote(env_key)\n data = unquote(data)\n\n try:\n env_key = base64.b64decode(env_key)\n data = base64.b64decode(data)\n \n cipher = PKCS1_v1_5.new(key)\n\n sentinel = []\n session_key = cipher.decrypt(env_key, sentinel)\n\n rc4_cipher = ARC4.new(session_key)\n\n xml_data = rc4_cipher.decrypt(data)\n\n # TODO: add xml validation\n # schema_root = etree.XML(xml_data)\n # schema = etree.XMLSchema(schema_root)\n # parser = etree.XMLParser(schema=schema)\n\n return xml_data\n except Exception as e:\n if self.developement:\n exception(e)\n\n raise Exception('Could not decrypt message.')", "def sym_dec(self, ciph, passphrase):\n (rfd, wfd) = xpipe()\n os.write(wfd, passphrase + '\\n')\n plain = xsystem([self.sslname, self.symmetric, '-d', '-pass',\n 'fd:' + str(rfd)], ciph)\n xclose(wfd)\n xclose(rfd)\n if not plain:\n warning('keymanagement: Unable to decrypt because %s does not exist\\n' %(self.sslname))\n return None\n\n return plain", "def decrypt(encoded_data: Union[bytes, str]) -> Tuple[Tuple[Any], Dict[str, Any]]:\n if isinstance(encoded_data, str):\n encoded_data = encoded_data.encode('utf8')\n\n key = pad(settings.PTRACK_SECRET).encode('utf8')\n box = nacl.secret.SecretBox(key)\n\n encrypted = base64.urlsafe_b64decode(encoded_data)\n data = box.decrypt(encrypted)\n # json.loads expects a str, so we convert bytes to str\n data = data.decode('utf8')\n return json.loads(data)", "def decrypt(self, message):\n # message = message.upper().split()\n # message = \"\".join(message)\n # desalting the message to remove 5 characters blocks\n padding = input(\"Have you used 5 characters blocks? y/n \")\n if padding == \"y\":\n message = message.replace(\" \", \"\")\n message = self.desalt_random(message)\n message = \"\".join(message)\n\n message = message.upper()\n message_list = []\n for ch in message:\n message_list.append(self.main_dict[ch][0])\n\n # OTP Encryption / process the message with OTP\n otp = input(\"What is the OTP that was generated for you during \"\n \"encryption process?: \")\n otp = otp.upper()\n random_otp = []\n for ch in otp:\n random_otp.append(self.main_dict[ch][0])\n\n # If OTP is correct, decrypt the message with mod27\n if len(message_list) != len(random_otp):\n print(\"You typed a wrong OTP.\")\n return None\n else:\n math_list = []\n for i, item in enumerate(message_list):\n if message_list[i] >= random_otp[i]:\n x = message_list[i] - random_otp[i]\n for key, value in self.main_dict.items():\n if value[0] == x:\n math_list.append(key)\n else:\n for key, value in self.main_dict.items():\n if item == value[0]:\n x = value[1] - random_otp[i]\n for key, value in self.main_dict.items():\n if value[0] == x:\n math_list.append(key)\n return \"\".join(math_list)", "def test_decryption(d, c):\n\n#\td = int(raw_input(\"\\nEnter d from public key\\n\"))\n#\tc = int(raw_input(\"\\nEnter c from public key\\n\"))\n\n x = int(raw_input(\"\\nEnter number to decrypt\\n\"))\n decode(endecrypt(x, d, c))", "def decrypt(self, ciphertext):\n return self._transform(ciphertext, self._backward)", "def decrypt_data(encryption_key, data, iv=None):\n if not data:\n logger.debug(\"Outdata is empty, nothing to decrypt\")\n return data\n # if iv is None the it's assumed that 12 bytes iv is\n # prepended in encrypted data\n data_byte = base64_to_byte_array(data)\n if iv is None:\n iv_length = IV_SIZE\n iv = data_byte[:iv_length]\n data_contains_iv = True\n else:\n iv_length = len(iv)\n data_contains_iv = False\n\n cipher = AES.new(encryption_key, AES.MODE_GCM, iv)\n # Split data into iv, tag and ciphered data\n if data_contains_iv:\n ciphertext_len = len(data_byte) - iv_length - TAG_SIZE\n ciphered_data = data_byte[iv_length: iv_length + ciphertext_len]\n tag = data_byte[-TAG_SIZE:]\n else:\n ciphertext_len = len(data_byte) - TAG_SIZE\n ciphered_data = data_byte[: ciphertext_len]\n tag = data_byte[-TAG_SIZE:]\n\n result = cipher.decrypt_and_verify(ciphered_data, tag).decode(\"utf-8\")\n logger.info(\"Decryption result at client - %s\", result)\n return result", "def decipher2(s, key): # s = message\n return decipher_raw2(s, key).rstrip(bytes('\\x00'.encode('utf-8')))", "def test_secretbox_enc_dec(self):\n # Encrypt with sk\n encrypted_data = nacl.secretbox_encrypt(data=self.unencrypted_data, sk=self.sk)\n\n # Decrypt with sk\n decrypted_data = nacl.secretbox_decrypt(data=encrypted_data, sk=self.sk)\n\n self.assertEqual(self.unencrypted_data, decrypted_data)", "def decrypt(text, address, path):\n client = ConfigClient(address=address, fail_fast=False)\n cipher = re.match(r\"^.?{cipher}?(?P<name>\\w.*)\", text)\n if cipher:\n text = cipher.group(\"name\")\n\n try:\n resp = client.decrypt(text, path=path)\n except Exception:\n raise click.ClickException(\"💥 Failed to contact server!\")\n\n table = Table.grid(padding=(0, 1))\n table.add_column(style=\"cyan\", justify=\"right\")\n table.add_column(style=\"magenta\")\n\n table.add_row(\"decrypted data[yellow]:[/yellow] \", f\"'{resp}'\")\n console.print(Panel(table, border_style=\"yellow\", expand=True))", "def decrypt_password(pass_to_decrypt):\n\n pass_to_decrypt = fk.decrypt(pass_to_decrypt)\n return pass_to_decrypt.decode()", "def _decrypt(self, b, strip_padding=True):\n from cryptography.hazmat.primitives.ciphers \\\n import Cipher, algorithms, modes\n from cryptography.hazmat.backends import default_backend\n\n backend = default_backend()\n cypher = Cipher(\n algorithms.AES(self.__key), modes.CBC(self.__iv), backend=backend)\n decryptor = cypher.decryptor()\n result = decryptor.update(b) + decryptor.finalize()\n if strip_padding:\n result = result[:-result[-1]]\n return result", "def decrypt_message(data,symetric_key,private_key):\n\tif type(data) == str or type(data) == bytes:\n\t\tdata = json.loads(data)\n\ttyp = data['type']\n\tnonce = data['nonce'].encode(\"iso-8859-1\")\n\tmessage = data['message'].encode(\"iso-8859-1\")\n\tnonce, *_ = decrypt(private_key,nonce)\n\tmessage = AESCCM(symetric_key).decrypt(nonce,message,None)\n\tmessage ={'type':typ,'nonce' : nonce.decode(\"iso-8859-1\"),'message':message.decode(\"iso-8859-1\")}\n\treturn message", "def decrypt(self, input, iv) :\n pass", "def test_endecrypt():\n\n e, d, c = keygen()\n\n test_encryption(e, c)\n test_decryption(d, c)\n key_cracker(e, c)", "def decrypt():\n plaintext = \"\"\n i = 0\n while i < len(ciphertext):\n if i%2==1:\n try:\n plaintext += key[ ciphertext[i-1]+ciphertext[i] ]\n except KeyError:\n plaintext += ciphertext[i-1]+ciphertext[i]\n i += 1\n return plaintext", "def decipher(self):\n plaintext = \"\"\n for ct, key_char in zip(self.text, self.key):\n char_index = self.char_block.rows[key_char].index(ct)\n plaintext += self.char_block.alphabet[char_index]\n print(plaintext)", "def Decrypt(key, value):\n key = key.zfill(32)[:32]\n aes = AES.new(key, AES.MODE_ECB)\n encrypted = base64.b64decode(value)\n decrypted = aes.decrypt(encrypted)\n return RemovePadding(decrypted)" ]
[ "0.8398597", "0.7666521", "0.76633376", "0.7455099", "0.74344987", "0.73562866", "0.73562866", "0.73559135", "0.7330574", "0.7323575", "0.7264788", "0.7229528", "0.7145882", "0.71009696", "0.6988444", "0.69679904", "0.6960979", "0.6948455", "0.6828047", "0.6825578", "0.68232894", "0.6788232", "0.6785551", "0.6775595", "0.67632306", "0.6752755", "0.674636", "0.67389536", "0.67334753", "0.6722593", "0.6707275", "0.668649", "0.6680971", "0.6672177", "0.66661555", "0.6642098", "0.6640024", "0.66376436", "0.66232413", "0.6619248", "0.6607051", "0.66008997", "0.65669674", "0.6563928", "0.65539", "0.6550067", "0.6536376", "0.65315473", "0.650998", "0.64791954", "0.6478611", "0.6463433", "0.6460935", "0.64505965", "0.6434156", "0.6432829", "0.6421843", "0.64205676", "0.64144534", "0.64070475", "0.6401258", "0.6373733", "0.63726777", "0.6361919", "0.6351362", "0.63442045", "0.63429904", "0.63408667", "0.63365597", "0.633606", "0.63337046", "0.6333211", "0.6320599", "0.63134575", "0.63009465", "0.63006777", "0.6286635", "0.6286384", "0.6276994", "0.6268763", "0.62681556", "0.6266483", "0.62629205", "0.6256929", "0.6253853", "0.6253543", "0.62529105", "0.62521863", "0.6249605", "0.62450975", "0.62422043", "0.62406355", "0.6224018", "0.6198869", "0.6189962", "0.6182747", "0.6179904", "0.61758876", "0.6169763", "0.616425", "0.6157517" ]
0.0
-1
Performs any TPLink smartplug supported command. See the commands dictionary
def perform_command(self, cmd: str) -> dict: sock_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock_tcp.connect((self.ip, self.port)) sock_tcp.send(self.__encrypt(cmd)) data = sock_tcp.recv(2048) sock_tcp.close() data = self.__decrypt(data[4:]) if not data: raise ConnectionError("Error: Could not communicate to the smart plug") return json.loads(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def commands():", "def commands():\n pass", "def commands():\n pass", "def commands():\n pass", "def commands():\n pass", "def silkscreen_commands(self, commands):\n self.pcb_layers[\"silkscreen\"].commands = commands", "def execCommand(self, command, opts):\n if command == \"attach\":\n self.attachToComponent(opts)\n elif command == \"status\":\n self.getComponentStatus(opts)\n elif command == \"start\":\n self.startComponent(opts)\n elif command == \"stop\":\n self.stopComponent(opts)\n elif command == \"restart\":\n self.restartComponent(opts)\n else:\n print(\"Command %s not recognized\" % command)", "def execute(self, devices, command_bytes):", "def process_commands(self, commands: List[str]):", "def command(self, command, runtime=None):\n cmd = ' '.join(command)\n self.commands.append(cmd)\n self.runtimes.append(runtime or 'not measured')", "def run_commands(ip_address, user, password, commandList, platform, buffer=5000):\n print \"Configuring \" + ip_address\n remote_conn_pre = paramiko.SSHClient()\n remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n remote_conn_pre.connect(ip_address, username=user, password=password)\n remote_conn = remote_conn_pre.invoke_shell()\n if platform == \"cisco\":\n remote_conn.send(\"enable\\n\")\n time.sleep(1)\n remote_conn.send(password+'\\n')\n time.sleep(1)\n commands = commandList.split('\\n')\n for com in commands:\n remote_conn.send(com+'\\n')\n time.sleep(1)\n output = remote_conn.recv(buffer)\n #print output", "def command(self, cmd):\n self.lmp.command(cmd)", "def run(self):\n for command in CUSTOM_COMMANDS:\n self.run_custom_command(command)", "def _get_supported_commands(self):\n logger.info(\"Default unconfigured API, not adding any commands!\")\n pass", "def sys_commands(self, obj, phase=''):\n commands = self.settings.get(phase)\n if commands and isinstance(commands, list):\n for command in commands:\n if isinstance(command, list):\n # Find list items that match the string after \"att_\",\n # these are names names of attribute in the calling class\n for key, item in enumerate(command):\n if item[:4] == 'att_':\n attribute = item[4:]\n try:\n command[key] = getattr(obj, attribute)\n except AttributeError:\n continue\n try:\n popen = subprocess.Popen(command, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n except OSError as error:\n msg = \"Cannot run {0} the command doesn't exist,\\n\".format(command.pop(0))\n msg += \"Error: {1}\".format(error.strerror)\n print msg\n results = popen.communicate()\n if results[1]:\n print \"Running {0}, \\n Error: {1}\".format(command, results[1])\n else:\n continue", "def load_commands():\n register_plugin(configure_client_details)\n register_plugin(search_venues)", "def __run_cmd(self, command, timeout, expected_result):\n if command.strip().lower().find(\"[sleep(\") != -1:\n command = command.strip().lower().replace(\"[sleep(\", \"\")\n command = command.replace(\")]\", \"\")\n sleep_time = float(command)\n time.sleep(sleep_time)\n status = Global.SUCCESS\n output = \"Success\"\n elif command.strip().lower().find(\"[usb_plug]\") != -1:\n if self._device is not None and self._io_card is not None:\n self._io_card.usb_host_pc_connector(True)\n self._device.connect_board()\n status = Global.SUCCESS\n output = \"Success\"\n else:\n self._logger.error(\"Cannot execute usb_plug, no io card configured.\")\n status = Global.FAILURE\n output = \"Cannot execute usb_plug, no io card configured.\"\n elif command.strip().lower().find(\"[usb_unplug]\") != -1:\n if self._device is not None and self._io_card is not None:\n self._device.disconnect_board()\n self._io_card.usb_host_pc_connector(False)\n status = Global.SUCCESS\n output = \"Success\"\n else:\n self._logger.error(\"Cannot execute usb_unplug, no io card configured.\")\n status = Global.FAILURE\n output = \"Cannot execute usb_unplug, no io card configured.\"\n elif command.strip().lower().find(\"[press_power_button(\") != -1:\n command = command.strip().lower().replace(\"[press_power_button(\", \"\")\n command = command.replace(\")]\", \"\")\n press_button_time = float(command)\n\n if self._io_card is not None:\n self._io_card.press_power_button(press_button_time)\n status = Global.SUCCESS\n output = \"Success\"\n else:\n self._logger.error(\"Cannot execute press_power_button, no io card configured.\")\n status = Global.FAILURE\n output = \"Cannot execute press_power_button, no io card configured.\"\n elif command.strip().lower().find(\"[control_relay(\") != -1:\n command = command.strip().lower().replace(\"[control_relay(\", \"\")\n command = command.replace(\")]\", \"\")\n relay_nbr = int(command.split(\",\")[0].strip())\n state = command.split(\",\")[1].strip().lower()\n\n if self._io_card is not None:\n if state == \"on\":\n self._io_card.enable_line(relay_nbr)\n elif state == \"off\":\n self._io_card.disable_line(relay_nbr)\n status = Global.SUCCESS\n output = \"Success\"\n else:\n self._logger.error(\"Cannot execute press_relay, no io card configured.\")\n status = Global.FAILURE\n output = \"Cannot execute press_relay, no io card configured.\"\n else:\n # Handle multi phone, if we issue adb command, add serial number if we have it\n if \"adb\" in command.lower():\n command = self._device.format_cmd(command)\n\n # If curlUtilities is called add the path to Campaign_report\n elif command.strip().lower().find(\"curlutilities\") != -1:\n # Add path to campaign report in CurlUtilities command\n report_tree = \\\n self._global_config.campaignConfig.get(\"campaignReportTree\")\n command += \\\n \" --output=%s\" % report_tree.get_report_path()\n\n if \"[MY_PATH]\" in command:\n command = command.replace(\"[MY_PATH]\",\n os.path.dirname(\n os.path.abspath(\n self._tc_parameters.get_file_path()))\n + os.sep)\n\n if \"[MY_DEVICE_MODEL]\" in command:\n command = command.replace(\"[MY_DEVICE_MODEL]\", self._device.get_phone_model())\n\n # We use the same python that ACS\n if \"python\" in command:\n command_list = command.split(\" \")\n # pyc replacement instead of py curently only works if RUN_FROM_TCDIRECTORY\n # is set to true\n if self._run_from_tc_directory:\n execution_path = os.path.join(self._execution_config_path,\n os.path.dirname(self._name))\n for index, command_element in enumerate(command_list):\n if command_element.endswith(\".py\"):\n if os.path.isfile(os.path.join(execution_path, command_element)) is False:\n pyc_cmd = command_element[:-2] + \"pyc\"\n if os.path.isfile(os.path.join(execution_path, pyc_cmd)):\n command_list[index] = pyc_cmd\n\n command = \" \".join(command_list)\n python_path = sys.executable\n command = command.replace(\"python\", python_path)\n self._logger.info(\"Using python: %s\" % python_path)\n\n if any(\"acs.py\" in cmd.lower() for cmd in command):\n # Put report into sub folder for analysis in case of error\n report_path = self._global_config.campaignConfig.get(\"campaignReportTree\").get_report_path()\n report_subfolder = os.path.join(report_path, os.path.basename(self._name))\n self._logger.info(\"Detailed results will be found at: {0}\".format(report_subfolder))\n command = \"{0} --report_folder={1}\".format(command, report_subfolder)\n\n status, _ = \\\n self.__internal_exec(command, timeout, expected_result)\n if status == Global.SUCCESS:\n output = \"Success\"\n else:\n output = \"Did not found expected result: {0}\".format(expected_result)\n\n else:\n status, stdout = \\\n self.__internal_exec(command, timeout, expected_result)\n output = \"output: {0}\".format(stdout.rstrip(\"\\r\\n\"))\n self._logger.info(output)\n\n # Remove special characters which could be stored in output message\n allowed_characters = '[^a-zA-Z0-9\\-\\+\\=\\'\\.\\:\\,\\;\\!\\?\\%\\(\\)\\#\\*\\@\\_\\n\\t]'\n parsed_output = re.sub(allowed_characters, ' ', output)\n\n return status, parsed_output", "def loadStdCommands(self, player):\n player.addCommand('spawn', self.commands['spawn']())\n player.addCommand('edit', self.commands['edit']())\n player.addCommand('search', self.commands['search']())\n player.addCommand('warp', self.commands['warp']())\n player.addCommand('addstat', self.commands['addstat']())\n player.addCommand('delstat', self.commands['delstat']())\n player.addCommand('savezone', self.commands['savezone']())\n player.addCommand('obliterate', self.commands['obliterate']())", "def handle_user_command(self, command_text):\r\n try:\r\n command = json.loads(command_text)\r\n except json.JSONDecodeError:\r\n self.error_callback('Could not parse user command')\r\n return\r\n\r\n target = command.get('target', None)\r\n parameter = command.get('parameter', None)\r\n command = command.get('command', None)\r\n if target == 'controller':\r\n pass\r\n elif target == 'well_and_tank':\r\n # if the pump is controlled automatically, user command has no effect\r\n with self.config_lock:\r\n is_auto = self.config['pump_auto_control']\r\n if is_auto:\r\n self.error_callback('Attempted to execute a manual command on an automated parameter')\r\n return\r\n if parameter == 'pump':\r\n # Find parameter description\r\n for curr_param in self.well_tank_dev.description['parameters']:\r\n if curr_param['name'] == 'pump':\r\n break\r\n if (command != curr_param['commands'][0]) and \\\r\n (command != curr_param['commands'][1]):\r\n self.error_callback('Invalid value {}:{}:{}'.format(target, parameter, command))\r\n return\r\n self.well_tank_dev.send_command(parameter, command)\r\n # No need to call handle_updates as there are no updates yet - the device has not confirmed that its\r\n # state has changed\r\n else:\r\n self.error_callback(\"Cannot control {}'s parameter {}\".format(target, parameter))\r\n return", "def execute_command(command):\r\n if 0 == len(command):\r\n return\r\n\r\n if command[0] in verbs[\"move\"]:\r\n if len(command) <= 1:\r\n wrap_print(\"go where?\")\r\n else:\r\n execute_go(command[1])\r\n\r\n elif command[0] in verbs[\"take\"]:\r\n if len(command) <= 1:\r\n wrap_print(\"Take what?\")\r\n else:\r\n item_id = get_multi_word_string(command, items)\r\n execute_take(item_id)\r\n\r\n elif command[0] in verbs[\"drop\"]:\r\n if len(command) <= 1:\r\n wrap_print(\"Drop what?\")\r\n else:\r\n item_id = get_multi_word_string(command, items)\r\n execute_drop(item_id)\r\n\r\n elif command[0] in verbs[\"use\"]:\r\n if len(command) <= 1:\r\n wrap_print(\"use what?\")\r\n else:\r\n item_id = get_multi_word_string(command, current_room[\"items\"])\r\n if item_id is False:\r\n item_id = get_multi_word_string(command, inventory)\r\n execute_use(item_id)\r\n\r\n elif command[0] in verbs[\"look\"]:\r\n if len(command) == 1:\r\n print_room(current_room)\r\n elif command[1] in nouns[\"inventory\"]:\r\n print_inventory_items(inventory)\r\n elif command[1] in nouns[\"self\"]:\r\n print_condition()\r\n else:\r\n item_id = get_multi_word_string(command, current_room[\"items\"])\r\n if item_id is False:\r\n item_id = get_multi_word_string(command, inventory)\r\n entity_name = get_multi_word_string(command, [entity[\"name\"] for entity in current_room[\"entities\"].values()])\r\n entity_id = entity_get_id_from_name(entity_name, current_room[\"entities\"].values())\r\n if item_id in inventory.keys():\r\n wrap_print(items[item_id][\"description\"])\r\n elif item_id in current_room[\"items\"].keys():\r\n wrap_print(items[item_id][\"description\"])\r\n elif entity_id in current_room[\"entities\"].keys():\r\n wrap_print(entities[entity_id][\"description\"])\r\n else:\r\n wrap_print(\"You can not view that.\")\r\n\r\n elif command[0] in verbs[\"attack\"]:\r\n if len(command) > 2:\r\n item_id = get_multi_word_string(command, items)\r\n entity_name = get_multi_word_string(command, [entity[\"name\"] for entity in current_room[\"entities\"].values()])\r\n entity_id = entity_get_id_from_name(entity_name, current_room[\"entities\"].values())\r\n if len(command) <= 1:\r\n wrap_print(\"attack what?\")\r\n elif entity_id not in current_room[\"entities\"].keys():\r\n wrap_print(\"You cannot attack that.\")\r\n elif len(command) <= 2:\r\n wrap_print(\"What with?\")\r\n elif item_id not in inventory.keys():\r\n wrap_print(\"You do not have a that item.\")\r\n elif items[item_id][\"damage\"] == False:\r\n wrap_print(\"You cannot attack using that item.\")\r\n else:\r\n execute_attack(entity_id, item_id)\r\n\r\n elif command[0] == \"help\":\r\n print(\"To move in a given direction type: go <DIRECTION>\")\r\n print(\"To pick up an item type: take <ITEM>\")\r\n print(\"To drop an item type: drop <ITEM>\")\r\n print(\"To use an item type: use <ITEM>\")\r\n print(\"To look at something of interest type: view <ITEM>\")\r\n print(\"to attack a character type: attack <CHARACTER> with <item>\")\r\n print(\"to : attack <CHARACTER> with <item>\")\r\n print(\"To quit the game type: quit\\n\")\r\n wrap_print(\"\"\"Verb variations are supported, so 'run south', or 'inspect item' are valid inputs.\"\"\")\r\n wrap_print(\"\"\"Items and characters with multiple words in their name are also supported like regular items.\"\"\")\r\n\r\n elif command[0] == \"quit\":\r\n if len(command) == 1:\r\n wrap_print(\"goodbye!\")\r\n global playing\r\n playing = False\r\n\r\n else:\r\n wrap_print(\"That makes no sense.\")", "def loadPlayerCommands(self, player):\n player.addCommand('get', self.commands['get']())\n player.addCommand('drop', self.commands['drop']())\n player.addCommand('go', self.commands['go']())\n player.addCommand('say', self.commands['say']())\n player.addCommand('look', self.commands['look']())\n player.addCommand('quit', self.commands['quit']())\n player.addCommand('commands', self.commands['commands']())\n player.addCommand('color', self.commands['color']())", "def _invoke_cmd(self, cmd):\n if cmd in self.COMMANDS:\n self.COMMANDS[cmd]()\n else:\n print(ERROR_UNKNOWN_COMMAND.format(cmd=cmd))", "def execute_commands(self, commands):\n for cmd in commands:\n self.action_list[cmd](commands[cmd])\n if cmd == 'r':\n break", "def _command(self, commands):\n# \"\"\"Send command to spi bus of display chip, most DC pin need set to LOW \"\"\"\n# if self._spi == None: raise \"Do not setting SPI\"\n# GPIO.output( self._spi_dc, 0 )\n# self._spi.writebytes( commands )\n raise NotImplementedError", "def UseCommandInterface(self, option):\n if option:\n #change prompt\n if self.sim42interp.cmd.currentObj and hasattr(self.sim42interp.cmd.currentObj, 'GetPath'):\n sys.ps1 = self.sim42interp.cmd.currentObj.GetPath() + '> '\n else:\n sys.ps1 = 'Illegal current object> ' \n \n #Say good bye\n self.shell.run(\"print '*************** Changed to Sim42 Command Interface ***************' \", prompt=0, verbose=0)\n \n #Change\n self.shell.interp = self.sim42interp\n\n else:\n #change prompt\n sys.ps1 = '>>> '\n sys.ps2 = '... '\n\n #Change\n self.shell.interp = self.origInterp \n \n #Say hello\n self.shell.run(\"print '*************** Back to python ***************' \", prompt=0, verbose=0)\n \n\n self.shell.autoCompleteKeys = self.shell.interp.getAutoCompleteKeys()", "def command():\n pass", "def _command_processor(self, cmd: str) -> None:\n\n if cmd == \"translate\":\n oracion = self.session.prompt(\n \"... Texto en español: \",\n validator=TbSETValidator(\"text_max_len\"),\n complete_while_typing=False)\n\n self.translate(oracion)\n elif cmd == \"train\":\n confirmation = self.session.prompt(\"... This will take at least 30' with a GPU. Are you sure? (y/n): \",\n validator=TbSETValidator(\"yes_no\"))\n\n if confirmation in \"yY\":\n self.train()\n else:\n print(\"Wrong command, please try again.\\n\")", "def getCommands():\n return getPlugins(ICommand, plugins)", "def handleCommand(self, command, prefix, params):\n irc.IRCClient.handleCommand(self, command, prefix, params)\n if len(params) < 2:\n return\n plugins = plugin_manager.filter(\n channel=self.channel, action=command.lower())\n for plugin in plugins:\n plugin.handle_action(protocol=self, action=command.lower(),\n user=prefix, message=params[1])", "def loadAllCommand(self, player):\n for eachCmd in self.commands.keys():\n player.addCommand(eachCmd, self.commands[eachCmd]())", "def do_command(self, args):\n pass", "def _command(self, *cmd, handler=None):", "def add_command(self, name, command):\n if command['type'] == 'topic':\n if 'deadman_buttons' not in command:\n command['deadman_buttons'] = []\n command['buttons'] = command['deadman_buttons']\n if 'deadman_axes' not in command:\n command['deadman_axes'] = []\n command['axes'] = command['deadman_axes']\n elif command['type'] == 'action':\n if 'action_goal' not in command:\n command['action_goal'] = {}\n elif command['type'] == 'service':\n if 'service_request' not in command:\n command['service_request'] = {}\n self.command_list[name] = command", "def handle_admincommands(bot, ievent):\n cmnds = getcmndtable()\n if not ievent.rest: ievent.reply(\"commands: \", cmnds)\n else:\n try: ievent.reply(\"%s command is found in %s \" % (ievent.rest, cmnds[ievent.rest]))\n except KeyError: ievent.reply(\"no such commands available\")", "def plugman_steps(self):\n return [\n ShellCommand(command=[\"rm\", \"-rf\", \"cordova-*\"], workdir='build', haltOnFailure=False, description='Cordova Clean'),\n ShellCommand(command=[\"git\", \"clone\", CONFIG.repos['PLUGMAN'], \"cordova-plugman\"], workdir='build', haltOnFailure=True, description='Get Plugman'),\n NPMInstall(workdir='build/cordova-plugman', haltOnFailure=True, description='Install Plugman'),\n ShellCommand(command=[\"npm\", \"test\"], workdir='build/cordova-plugman', haltOnFailure=True, description='Test Plugman'),\n ]", "def run(self, commands: list[str]):\n ...", "def func(self):\n try:\n if not self.switches or \"all\" in self.switches:\n self.list_favor()\n elif \"set\" in self.switches or \"add\" in self.switches:\n self.add_favor()\n elif \"remove\" in self.switches:\n self.remove_favor()\n else:\n raise CommandError(\"Invalid switch.\")\n except CommandError as err:\n self.msg(err)\n else:\n self.mark_command_used()", "def _execute_impl(self, commands):\n raise NotImplementedError(\"abstract method\")", "def run_command(self, command, joy_state):\n cmd = self.command_list[command]\n if cmd['type'] == 'topic':\n self.run_topic(command, joy_state)\n elif cmd['type'] == 'action':\n if cmd['action_name'] in self.offline_actions:\n self.get_logger().error('command {} was not played because the action '\n 'server was unavailable. Trying to reconnect...'\n .format(cmd['action_name']))\n self.register_action(command, self.command_list[command])\n else:\n if joy_state.buttons != self.old_buttons:\n self.run_action(command, joy_state)\n elif cmd['type'] == 'service':\n if cmd['service_name'] in self.offline_services:\n self.get_logger().error('command {} was not played because the service '\n 'server was unavailable. Trying to reconnect...'\n .format(cmd['service_name']))\n self.register_service(command, self.command_list[command])\n else:\n if joy_state.buttons != self.old_buttons:\n self.run_service(command, joy_state)\n else:\n raise JoyTeleopException(\n 'command {} is neither a topic publisher nor an action or service client'\n .format(command))", "def handle_command(self, command):\n\n\t\tif command:\n\t\t\tcmd = shlex.split(command)\n\t\t\tobj = {\"Type\": \"command\", \"Message\": {\"command\": cmd[0], \"arguments\": cmd[1:]}}\n\t\t\tobj = self.communicator.send_message(obj)\n\t\t\tself.console.handle_message(obj)", "def exec_commands(com):\n reply = ''\n if com is not None:\n if com == commands[0]:\n tables = db.create_tables(houses, from_)\n if tables == True:\n for j in range(len(c_responses[0]) - 1):\n# can use join and split functions to create softer code?? at least in future instances\n bot.send_message(c_responses[0][j], from_)\n else:\n reply = c_responses[0][(len(c_responses[0])-1)]\n elif com == commands[1]:\n house_info = db.house_info(from_)\n # Add feautures to find highest scoring house and return number of members\n reply = \"Houses:\\n\"\n for house in house_info:\n reply += house[1] + \"\\n\"\n if house[2] != None:\n reply += f\"Score: {house[2]}pts\\n\\n\"\n else:\n reply += f\"Score: 0pts\\n\\n\"\n elif com.startswith(commands[2]):\n instructions = com.split()\n id = 0\n info = user_query()\n user_id = info['user']['id']\n check = db.check_admin(from_, user_id)\n if check and check != 'not sorted':\n for house in houses:\n id += 1\n if house == instructions[1]:\n score = db.update_house_score(id, instructions[2], from_)\n reply = f\"{instructions[1]} new score is {score}\"\n else:\n reply = \"You have no power over me! PS:(if you are an admin use the /appoint me command to be recognised as such)\"\n\n\n elif com == commands[3]:\n username = item['message']['from']['username']\n user_id = item['message']['from']['id']\n num = db.add_member_info(username, from_, user_id)\n if num[1]:\n reply = f\"Better be... {houses[num[0]-1]}\"\n else:\n print(num[0][0])\n reply = f\"I stand by my decision, {houses[num[0][0]-1]} will help you on the way to greatness!\"\n elif com == commands[4]:\n m_list = db.member_info(from_)\n reply = str(m_list)\n elif com == commands[5]:\n info = user_query()\n username = info['user']['username']\n m_info = db.member_info(from_, username)\n reply = f\"\"\"\n Username: {m_info[2]}\\nHouse: {houses[m_info[3]]}\\nStatus: {m_info[4]}\\nScore: {m_info[5]}\\n\n \"\"\"\n elif com == commands[6]:\n info = user_query()\n username = info['user']['username']\n user_id = info['user']['id']\n status_info = info['status']\n if status_info == 'creator':\n verify = db.check_admin(from_, user_id)\n if not verify:\n db.update_member_status(from_, info['user']['id'], 'Headmaster')\n reply = f\"Rise Headmaster {username}\"\n elif verify == 'not sorted':\n reply = \"Don't be hasty! if tables have already been created use the '/sort me' command to get yourself sorted first\"\n else:\n reply = \"We've already done this Headmaster\"\n elif status_info == 'administrator':\n verify = db.check_admin(from_, user_id)\n if not verify:\n db.update_member_status(from_, info['user']['id'], 'Professor')\n reply = f\"Hence forth you shall be known as Professor {username}\"\n elif verify == 'not sorted':\n reply = \"Don't be hasty! if tables have already been created use the '/sort me' command to get yourself sorted first\"\n else:\n reply = \"We've already done this Professor\"\n else:\n reply = 'Desist pretender! Only the entitled may command me so!'\n elif com == commands[7]:\n for command in commands:\n reply += f'{command}\\n'\n print(reply)\n \n return reply", "def dispatch(self) -> None:\n while True:\n body = self.general_queue.pop()\n if \"CMD$\" in body:\n cmd = [part for part in body[body.find(\"$\") + 1:].split(\";\") if part]\n try:\n module, func = cmd[0], cmd[1]\n except IndexError:\n self.send_through_aprs(f\"CMDERR: Unable to parse Commnd {cmd}\")\n continue\n if self.validate_func(module, func):\n try:\n getattr(self.modules[module], func)()\n self.send_through_aprs(f\"CMDSUC: Command {cmd} executed successfully\")\n except Exception as e:\n self.send_through_aprs(f\"CMDERR: Command {cmd} failed with {e}\")", "def add_command_to_implementing_integrations_mapping(self):\n command_name_to_implemented_integration_map = (\n self.create_command_to_implemented_integration_map()\n )\n\n playbooks_list = self.id_set[\"playbooks\"]\n for playbook_dict in playbooks_list:\n playbook_name = list(playbook_dict.keys())[0]\n playbook_data = playbook_dict[playbook_name]\n commands_to_integration = playbook_data.get(\"command_to_integration\", {})\n for command in commands_to_integration:\n if commands_to_integration[command]:\n # only apply this logic when there is no specific brand\n continue\n is_command_implemented_in_integration = (\n command in command_name_to_implemented_integration_map\n )\n if (\n is_command_implemented_in_integration\n and command not in GENERIC_COMMANDS_NAMES\n ):\n implemented_integration = (\n command_name_to_implemented_integration_map[command]\n )\n commands_to_integration[command] = implemented_integration", "def handle_command_line():\n commands = scan_for_commands()\n parser = argparse.ArgumentParser(\n description=\"A set of utilities to ease the installation of Modoboa.\",\n epilog=\"\"\"Available commands:\n%s\n\"\"\" % \"\\n\".join([\"\\t%s\" % c for c in sorted(commands)]))\n parser.add_argument(\"--verbose\", action=\"store_true\",\n help=\"Activate verbose output\")\n parser.add_argument(\"command\", type=str,\n help=\"A valid command name\")\n (args, remaining) = parser.parse_known_args()\n\n if args.command not in commands:\n print(\"Unknown command '%s'\" % args.command, file=sys.stderr)\n sys.exit(1)\n\n commands[args.command](commands, verbose=args.verbose).run(remaining)", "def process_command(self, command, discord_id):\n\n try:\n character = self.known_characters[discord_id]\n except KeyError:\n print(\"Process_command got message from unregistered player, this should not happen\")\n return\n\n character.clear_log()\n self.current_character = character # this is for directing log messages to the appropriate log\n # it is reset at the start of every turn obviously\n\n splitted = command.split(\" \", maxsplit=1) # just take off the first verb for use as command\n if len(splitted) == 1:\n cmd = splitted[0]\n words = \"\"\n else:\n cmd, words = splitted\n if cmd not in self.command_dict.keys():\n character.log(\"Unrecognised command: {}\", cmd)\n return character.print_log() # return early because couldn't do anything\n else:\n executable_command = self.command_dict[cmd]\n # the name of the command as it appears in the object's __dict__\n\n if executable_command == \"on_status\":\n # special command with no target object, just prints player stats and return early\n character.report_status()\n return character.print_log()\n\n resolution_order = [character.equipped, character.items, character.visible_things] # reset everytim\n if executable_command == \"on_take\":\n resolution_order.reverse() # player wants to take visible things, not equipped things.\n\n args = []\n target = None\n\n for ls in resolution_order:\n # the order of these lists is important: items equipped or held by the player\n # must take precedence, otherwise if a player tries to unequip a worn item in a\n # room that contains an item with the same name, the command dispatcher might pick up\n # the room's version of the item first and fail to unequip it. These cases should be rare.\n for k in ls:\n # first check for exact words\n if k.__doc__ in words:\n if target is None:\n target = k # target first, then args, to cope with \"use x on y\"\n else:\n args.append(k)\n\n if len(args) == 0 and len(words) > 0:\n for ls in resolution_order:\n # then check for partially-typed words if nothing was found\n for k in ls:\n if words in k.__doc__:\n if target is None:\n target = k\n else:\n args.append(k)\n\n if executable_command == \"on_go\":\n for direction in [\"north\", \"south\", \"east\", \"west\"]:\n # all directions are permitted because if it's not valid it will be caught by\n # the room's on_go function\n if direction in words:\n args.append(direction)\n target = character.location\n\n if target is None:\n\n if len(words) > 0:\n character.log(\"Unrecognised target: {}.\", words)\n return character.print_log()\n\n if executable_command == \"on_attack\":\n # player might have mistyped a name or just attack with no monster, consistently pick the\n # first monster for them to attack, if present. If not, pass it on to self.location\n # which will of course fail\n if character.check_if_monsters():\n target = character.monsters_in_play[0]\n\n else:\n # either the player typed (\"look\"), which is just to look at the room,\n # or they typed any other no-argument command which is handled by\n # the MyItem class e.g. status, quit\n target = character.location\n\n try:\n to_run = target.__getattribute__(executable_command)\n # look up the command in target's dictionary\n\n except AttributeError:\n character.log(\"Can't {} this.\", cmd)\n return character.print_log()\n\n # THE IMPORTANT PART #\n to_run(*args) # evaluate the command we looked up, passing the arguments the player typed\n\n if not (executable_command in [\"on_go\", \"on_look\", \"on_attack\"]):\n # monsters only attack if the player is still, otherwise they'd attack every time the\n # player ran and running would be pointless\n # not really fair to have the look command trigger attacks either, but anything else\n # is fair game e.g. interacting with objects\n for mon in character.monsters_in_play:\n mon.attack_player()\n\n if not executable_command == \"on_look\":\n # only process heartbeats if the player command actually did something\n for item in self.registered_countdowns:\n item.heartbeat()\n\n return character.print_log()", "async def adding_command_list(self):\n command_aliases=['anime','fun','mod','nekogif'] #This includes the aliases and the cog names\n #NOTE: fun command added\n for i in self.bot.commands:\n self.commands.append(i.name)\n \n for i in command_aliases:\n self.commands.append(i)", "def run(self) -> None:\n while True:\n line = readline()\n if line == \"capabilities\":\n _write(\"option\")\n _write(\"push\")\n _write(\"fetch\")\n _write()\n elif line.startswith(\"option\"):\n self._do_option(line)\n elif line.startswith(\"list\"):\n self._do_list(line)\n elif line.startswith(\"push\"):\n self._do_push(line)\n elif line.startswith(\"fetch\"):\n self._do_fetch(line)\n elif line == \"\":\n break\n else:\n self._fatal(\"unsupported operation: %s\" % line)", "def generic(self, switches=[\"--help\"]):\n return self._command_template(switches)", "def _register_commands(self):\n cmds = []\n cmd_help = CommandParser(\"help\", \"Show help for a command.\")\n cmd_help.add_argument(\n \"command\",\n nargs=\"*\",\n help=\"The command to get help for. Specify multiple names to get help for subcommands.\",\n )\n cmd_help.add_argument(\"-m\", \"--module\", help=\"List all commands from the given module\")\n cmd_help.add_argument(\n \"-f\",\n \"--full\",\n action=\"store_true\",\n help='Include descriptions in the \"all\" help output.',\n )\n cmds.append(cmd_help)\n\n target_mod = CommandParser()\n target_mod.add_argument(\"module\", nargs=\"+\", help=\"Target module(s)\")\n target_mod.add_argument(\n \"-p\",\n \"--protocol\",\n action=\"store_const\",\n const=\"protocol\",\n default=\"feature\",\n dest=\"mtype\",\n help=\"Target is a protocol module\",\n )\n cmd_module = CommandParser(\"module\", \"Manage and query ZeroBot modules\")\n add_subcmd = cmd_module.make_adder(metavar=\"OPERATION\", dest=\"subcmd\", required=True)\n add_subcmd(\"load\", description=\"Load a module\", parents=[target_mod])\n add_subcmd(\"reload\", description=\"Reload a module\", parents=[target_mod])\n subcmd_list = add_subcmd(\"list\", description=\"List available modules\")\n subcmd_list.add_argument(\"-l\", \"--loaded\", action=\"store_true\", help=\"Only loaded modules\")\n list_group = subcmd_list.add_mutually_exclusive_group()\n default_categories = [\"protocol\", \"feature\"]\n list_group.add_argument(\n \"-f\",\n \"--feature\",\n action=\"store_const\",\n const=[\"feature\"],\n dest=\"category\",\n default=default_categories,\n help=\"Only feature modules\",\n )\n list_group.add_argument(\n \"-p\",\n \"--protocol\",\n action=\"store_const\",\n const=[\"protocol\"],\n dest=\"category\",\n default=default_categories,\n help=\"Only protocol modules\",\n )\n add_subcmd(\"info\", description=\"Show module information\", parents=[target_mod])\n cmds.append(cmd_module)\n\n save_reload_args = CommandParser()\n save_reload_args.add_argument(\n \"config_file\",\n nargs=\"*\",\n help=\"Name of config file (without .toml extension). Omit to affect all loaded config files.\",\n )\n set_reset_args = CommandParser()\n set_reset_args.add_argument(\"config_file\", help=\"Name of config file (without .toml extension)\")\n cmd_config = CommandParser(\"config\", \"Manage configuration\")\n add_subcmd = cmd_config.make_adder(metavar=\"OPERATION\", dest=\"subcmd\", required=True)\n add_subcmd(\"save\", description=\"Save config files to disk\", parents=[save_reload_args])\n subcmd_savenew = add_subcmd(\"savenew\", description=\"Save config file to a new path\")\n subcmd_savenew.add_argument(\"config_file\", help=\"Name of config file (without .toml extension)\")\n subcmd_savenew.add_argument(\"new_path\", help=\"The path to save the config file to\")\n add_subcmd(\n \"reload\",\n description=\"Reload config files from disk\",\n parents=[save_reload_args],\n )\n subcmd_set = add_subcmd(\"set\", description=\"Modify config settings\", parents=[set_reset_args])\n subcmd_set.add_argument(\n \"key_path\",\n help=\"The config key to set. Subkeys are separated by dots, e.g. 'Core.Backup.Filename'\",\n )\n subcmd_set.add_argument(\"value\", nargs=\"?\", help=\"The new value. Omit to show the current value.\")\n subcmd_reset = add_subcmd(\n \"reset\",\n description=\"Reset config settings to last loaded value\",\n parents=[set_reset_args],\n )\n subcmd_reset.add_argument(\n \"key_path\",\n nargs=\"?\",\n help=(\n \"The config key to set. Subkeys are separated by dots, \"\n \"e.g. 'Core.Backup.Filename'. If omitted, the entire \"\n \"config will be reset.\"\n ),\n )\n subcmd_reset.add_argument(\n \"-d\",\n \"--default\",\n action=\"store_true\",\n help=\"Set the key to its default value instead. Effectively unsets a config key.\",\n )\n cmds.append(cmd_config)\n\n cmd_version = CommandParser(\"version\", \"Show version information\")\n cmds.append(cmd_version)\n\n cmd_restart = CommandParser(\"restart\", \"Restart ZeroBot.\")\n cmd_restart.add_argument(\"msg\", nargs=\"*\", help=\"Message sent to protocol modules as a reason\")\n cmds.append(cmd_restart)\n\n cmd_quit = CommandParser(\"quit\", \"Shut down ZeroBot.\")\n cmd_quit.add_argument(\"msg\", nargs=\"*\", help=\"Message sent to protocol modules as a reason\")\n cmds.append(cmd_quit)\n\n cmd_wait = CommandParser(\"wait\", \"Execute a command after a delay\")\n cmd_wait.add_argument(\n \"delay\",\n help=\"Amount of time to delay. Accepts the following modifier suffixes: 'ms', 's' (default), 'm', 'h'.\",\n )\n cmd_wait.add_argument(\"command\", help=\"Command to delay\")\n cmd_wait.add_argument(\"args\", nargs=argparse.REMAINDER, help=\"Command arguments\")\n cmds.append(cmd_wait)\n\n cmd_cancel = CommandParser(\"cancel\", \"Cancel a waiting command\")\n cancel_group = cmd_cancel.add_mutually_exclusive_group()\n cancel_group.add_argument(\"id\", type=int, nargs=\"?\", help=\"The ID of a waiting command\")\n cancel_group.add_argument(\"-l\", \"--list\", action=\"store_true\", help=\"List currently waiting commands\")\n cmds.append(cmd_cancel)\n\n cmd_backup = CommandParser(\"backup\", \"Create a database backup\")\n cmd_backup.add_argument(\"name\", type=Path, help=\"Backup filename\")\n cmds.append(cmd_backup)\n\n self.command_register(\"core\", *cmds)", "def devices_command_generic(*, device_id, command=None, request_type, **kwargs):\n if command is None:\n command = [{\"id\": device_id, \"type\": request_type}]\n return devices_command(command, **kwargs)", "def get_command(self, kword: str):\n # Step Zero is to make sure that the name does not belong to a REAL command.\n zero, mod = super().get_command(kword)\n if zero:\n return zero, mod\n\n # Otherwise, first, ensure that the keyword does in fact exist in the custom list.\n command = self.config.commands.get(kword, None)\n if not command:\n return None, None\n response = command[\"com\"]\n\n # Build the function to return the response. Note that \"self\" exists already.\n async def cmd_custom(args, src, **_):\n if args:\n member = self.get_member(src, args[0].strip())\n tag = member.mention if member else None\n else:\n tag = None\n\n nsfw = command.get(\"nsfw\", False)\n if nsfw and src.channel.id not in self.config.get(\"nsfwChannels\"):\n return None\n\n # Replace tags where needed.\n try:\n output = response.format(\n self=src.author.name,\n myID=src.author.id,\n tag=tag or src.author.mention,\n )\n except KeyError:\n return None\n else:\n return output\n\n # Specify the docstring and name so that !help will work on this.\n short = response.replace(\"{\", \"{{\").replace(\"}\", \"}}\")\n if len(short) > 80:\n short = short[:77] + \"...\"\n cmd_custom.__doc__ = (\n \"__Custom command__: Return the following text: ```{}```\\n\\n\".format(short)\n + command.get(\n \"desc\",\n \"This is a custom command, so available help text is limited, but at the same time, the command is very simple. All it does is return a string, although the string may include formatting tags for invoker name, invoker ID, and a targeted mention.\",\n )\n + \"\\n\\nSyntax: `{p}\"\n + kword.lower()\n + (\" <user_ID>\" if \"{tag}\" in response else \"\")\n + \"`\"\n )\n cmd_custom.__name__ = \"cmd_\" + kword.lower()\n\n return cmd_custom, None", "def do_list_commands(plugin):\n if not plugin.supported_commands:\n click.secho(\n f\"Plugin '{plugin.name}' does not define any commands.\", fg=\"yellow\"\n )\n return\n\n descriptions = {\n f\"{plugin.name}:{cmd}\": props.description\n for cmd, props in plugin.all_commands.items()\n }\n column_len = max(len(name) for name in descriptions.keys()) + 2\n for name, desc in descriptions.items():\n click.secho(name.ljust(column_len, \" \"), fg=\"blue\", nl=False)\n click.echo(desc)", "def command_service(self, rawCommand):\n pack = [x.strip() for x in split('[,()]*', rawCommand.strip())]\n raw_cmd = pack[0]\n argDict = {key: literal_eval(value) for key, value in utils.grouper(pack[1:], 2)}\n cmd = self.mapInterface.commands[raw_cmd]\n ret = cmd(**argDict)\n logger.info(\"Command '{}' run with args {}\".format(raw_cmd, argDict))\n return raw_cmd, ret", "def commands(self, commands):\n\n self._commands = commands", "def silkscreen_commands(self):\n return self.pcb_layers[\"silkscreen\"].commands", "async def async_execute_command(self, command, notif):\n if command.startswith('MCU'):\n value = await self.async_call_linkplay_tcpuart(command)\n elif command == 'Reboot':\n value = await self.async_call_linkplay_httpapi(\"getStatus:ip:;reboot;\", None)\n elif command == 'PromptEnable':\n value = await self.async_call_linkplay_httpapi(\"PromptEnable\", None)\n elif command == 'PromptDisable':\n value = await self.async_call_linkplay_httpapi(\"PromptDisable\", None)\n elif command == 'RouterMultiroomEnable':\n value = await self.async_call_linkplay_httpapi(\"setMultiroomLogic:1\", None)\n elif command == 'SetRandomWifiKey':\n from random import choice\n from string import ascii_letters\n newkey = (''.join(choice(ascii_letters) for i in range(16)))\n value = await self.async_call_linkplay_httpapi(\"setNetwork:1:{0}\".format(newkey), None)\n if value == 'OK':\n value = value + \", key: \" + newkey\n else:\n value = \"key: \" + newkey\n elif command.startswith('SetApSSIDName:'):\n ssidnam = command.replace('SetApSSIDName:', '').strip()\n if ssidnam != '':\n value = await self.async_call_linkplay_httpapi(\"setSSID:{0}\".format(ssidnam), None)\n if value == 'OK':\n value = value + \", SoftAP SSID set to: \" + ssidnam\n else:\n value == \"SSID not specified correctly. You need 'SetApSSIDName: NewWifiName'\"\n elif command.startswith('WriteDeviceNameToUnit:'):\n devnam = command.replace('WriteDeviceNameToUnit:', '').strip()\n if devnam != '':\n value = await self.async_call_linkplay_httpapi(\"setDeviceName:{0}\".format(devnam), None)\n if value == 'OK':\n self._name = devnam\n value = value + \", name set to: \" + self._name\n else:\n value == \"Device name not specified correctly. You need 'WriteDeviceNameToUnit: My Device Name'\"\n elif command == 'TimeSync':\n import time\n tme = time.strftime('%Y%m%d%H%M%S')\n value = await self.async_call_linkplay_httpapi(\"timeSync:{0}\".format(tme), None)\n if value == 'OK':\n value = value + \", time: \" + tme\n elif command == 'Rescan':\n self._unav_throttle = False\n self._first_update = True\n # await self.async_schedule_update_ha_state(True)\n value = \"Scheduled to Rescan\"\n elif command == 'Update':\n # await self.async_schedule_update_ha_state(True)\n value = \"Scheduled to Update\"\n else:\n value = \"No such command implemented.\"\n _LOGGER.warning(\"Player %s command: %s, result: %s\", self.entity_id, command, value)\n\n _LOGGER.debug(\"Player %s executed command: %s, result: %s\", self.entity_id, command, value)\n\n if notif:\n self.hass.components.persistent_notification.async_create(\"<b>Executed command:</b><br>{0}<br><b>Result:</b><br>{1}\".format(command, value), title=self.entity_id)", "def loadOlcCommands(self, player):\n player.addCommand('newzone', self.commands['newzone']())\n player.addCommand('delzone', self.commands['delzone']())\n player.addCommand('listzone', self.commands['listzone']())\n player.addCommand('newroom', self.commands['newroom']())\n player.addCommand('redit', self.commands['redit']())\n player.addCommand('delroom', self.commands['delroom']())\n player.addCommand('newportal', self.commands['newportal']())\n player.addCommand('delportal', self.commands['delportal']())\n player.addCommand('zedit', self.commands['zedit']())\n player.addCommand('pedit', self.commands['pedit']())\n player.addCommand('newtemplate', self.commands['newtemplate']())", "def register_target_commands():\n for plugin_cls in env.plugins.values():\n # Add target group to root commands. E.g. create `nursery vbox`\n cli.add_command(plugin_cls.cli_entry_func, plugin_cls.cli_entry_func.name)\n\n for cmd in TargetPlugin.root_actions:\n if cmd in plugin_cls.root_command_map:\n # Find the corresponding group in this module to add the target's\n # command group to. E.g. Find `nursery up` and add `vbox` to it.\n globals()[f\"{cmd}_cmd\"].add_command(\n plugin_cls.root_command_map[cmd], plugin_cls.cli_entry_func.name\n )", "def setup_commands(bot):\n # Reset the bot's command setup\n bot.reset_commands()\n # Load enabled mods\n for mod in bot.enabled_mods:\n try:\n full = 'mod_%s' % mod\n m = getattr(__import__('mods.%s' % full), full)\n except Exception:\n bot.log(ERROR, 'Importing the %s mod failed!' % mod)\n sys.excepthook(*sys.exc_info())\n continue\n\n try:\n bot.installed_mods[mod] = m\n # Check for a 404 handler, and replace the current one if there is\n p404 = getattr(m, 'handle_404', None)\n if p404:\n bot.cb_404 = p404\n\n # Check for a setup function, and run it if there is\n setup = getattr(m, 'setup', None)\n if setup:\n setup(bot)\n\n # Required command bank\n for cmd in m.command_bank:\n # Get the actual function\n func = getattr(m, cmd)\n # Get the args for the command\n data = m.command_bank[cmd]\n # If data[0] is true, mod_help will recognize this command\n if data[0]:\n bot.help_db[data[1]] = parse_help(func)\n # Get the main name and aliases inserted\n for alias in data[1:]:\n bot.command_db[alias] = func\n\n # Helper function for optional nameless multiples\n def add_optional(olist, name):\n olist.extend(getattr(m, f) for f in getattr(m, name, ()))\n\n # Optional filters are loaded and added to the list\n add_optional(bot.filters, 'filters')\n\n # Ditto for time-cycle callbacks\n add_optional(bot.periodic_cbs, 'periodic')\n\n # Handlers are the same, but structured as a dict with\n # \"type\": \"single function-name\" items\n handlers = getattr(m, 'handlers', None)\n if handlers:\n for cbtype in handlers:\n bot.handlers[cbtype].append(getattr(m, handlers[cbtype]))\n\n # Register any requirements\n # NOTE: By putting this at the end, we avoid the possibility of\n # getting fake requires.\n reqs = getattr(m, 'requires', None)\n if reqs:\n bot.required_mods.update(reqs)\n except Exception:\n bot.log(ERROR, 'Unable to install the %s mod!' % mod)\n del bot.installed_mods[mod]\n sys.excepthook(*sys.exc_info())\n\n missing = bot.required_mods - set(bot.installed_mods)\n if missing:\n raise MissingRequirementsError(missing)\n\n # And now for the post-install triggers.\n for mod, m in bot.installed_mods.items():\n post = getattr(m, 'post_prepare', None)\n if post:\n try:\n post(bot)\n except Exception:\n bot.log(ERROR, 'Unable to post-prepare the %s mod!' % mod)\n sys.excepthook(*sys.exc_info())", "def _init_commands(self):\n\t\tself.commands = {}\n\t\tself.log.info(\"Initializing commands...\")\n\t\t# Get all the commands and iterate over them\n\t\tfor command in self.conf_commands:\n\t\t\t\n\t\t\t# Verify the necessary config elements exist at all\n\t\t\tdisabled = command.get('disabled', False) # Disabled is optional, defaults to False\n\t\t\tif(disabled == True):\n\t\t\t\tcontinue;\n\t\t\tcommand_name = command.get('name', \"unknown\").lower()\n\t\t\tdescription = command.get('description', \"\")\n\t\t\tpermission_str = command.get('permission', None)\n\t\t\taction = command.get('action', None)\n\t\t\tmin_votes = command.get('min_votes', None)\n\t\t\targs = command.get('args', None)\n\t\t\taliases = command.get('aliases', None)\n\t\t\tif(command_name is None \n\t\t\t\tor permission_str is None \n\t\t\t\tor action is None \n\t\t\t\tor min_votes is None \n\t\t\t\tor args is None):\n\t\t\t\tself.log.warn(\"Command '{}': Error, missing 'permission', 'action', 'min_votes', or 'args' elements for command \".format(command_name))\n\t\t\t\tcontinue\n\n\t\t\t# Verify the votes and permission string are valid\n\t\t\tif(min_votes < 0):\n\t\t\t\tself.log.warn(\"Command '{}': Error, min_votes cannot be less than zero for command {}\".format(command_name, min_votes))\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tself.log.debug(\"Command '{}': minimum votes is {}\".format(command_name, min_votes))\n\n\t\t\ttry:\n\t\t\t\tpermission = Permission[permission_str]\n\t\t\t\tself.log.debug(\"Command '{}': permission is {}\".format(command_name, permission))\n\t\t\texcept Exception as e:\n\t\t\t\tself.log.warn(\"Command '{}': Error, permission string '{}' is invalid, must be one of: {}\".format(command_name, permission_str, Permission.__members__))\n\t\t\t\tcontinue\n\n\t\t\t# Try to get the corresponding action class\n\t\t\ttry:\n\t\t\t\tmodule = import_module(\"obs.actions.\"+action)\n\t\t\t\tclass_ = getattr(module, action)\n\t\t\t\tself.log.debug(\"Command {}: action is {}\".format(command_name, class_))\n\t\t\texcept Exception as e:\n\t\t\t\tself.log.warn(\"Command '{}': Error, no such action {} is defined. Full error: {}\".format(command_name, action, e))\n\t\t\t\tcontinue\n\n\t\t\t# Try to instantiate the action class\n\t\t\ttry:\n\t\t\t\tself.log.debug(\"Command {}: args are: {}\".format(command_name, args))\n\t\t\t\tcommand_obj = class_(self, command_name, aliases, description, permission, min_votes, args)\n\t\t\texcept ValueError as e:\n\t\t\t\tself.log.warn(e)\n\t\t\t\tcontinue\n\n\t\t\t# Add command_obj to internal reference\n\t\t\tself.commands[command_name] = command_obj\n\n\t\t\t# If there are aliases, add them too\n\t\t\t\n\t\t\tif(not aliases is None and isinstance(aliases, (list,) )):\n\t\t\t\tself.log.debug(\"Command '{}': Found aliases {}\".format(command_name, aliases))\n\t\t\t\tfor alias in aliases:\n\t\t\t\t\tself.commands[alias] = command_obj\n\t\t\telse:\n\t\t\t\tself.log.debug(\"Command '{}': No aliases\".format(command_name, aliases))\n\n\t\t# Finally after all commands have been initialized then add the help command\n\t\t#self.commands['help'] = Help(self)\n\n\t\t# Done initializing\n\t\tself.log.info(\"...Commands initialized: {}\".format(\n\t\t\t\tlist( self.commands.keys()) \n\t\t\t)\n\t\t)", "def processCommandList():\n\n try:\n # Assume that maya.cmds.about and maya.cmds.internalVar are already registered\n #\n commandListPath = os.path.realpath( os.environ[ 'MAYA_LOCATION' ] )\n platform = maya.cmds.about( os=True )\n commandListPath = os.path.join( commandListPath, commandListLocations[platform], 'commandList' )\n\n file = open( commandListPath, 'r' )\n for line in file:\n commandName, library = line.split()\n if not commandName in maya.cmds.__dict__:\n maya.cmds.__dict__[commandName] = __makeStubFunc( commandName, library )\n except:\n sys.stderr.write(\"Unable to process commandList %s\" % commandListPath)\n raise", "def do_device(self, args):\n self.device_command.cmdloop(\"Enter to device mode\")", "def initialize_commands(config):\n service_id = config['fastly.service_id']\n api_key = config['fastly.api_key']\n api = fastly.API()\n api.authenticate_by_key(api_key)\n\n @make_command()\n def fastlyservice(args):\n \"\"\"Get the fastly service info for configured fastly.service_id.\"\"\"\n pprint(api.service(service_id).attrs)\n\n @make_command()\n def fastlyactiveversion(args):\n \"\"\"Get the currently active version of the fastly service.\"\"\"\n service = api.service(service_id).attrs\n for version in service['versions']:\n if version['active']:\n print version['number']\n\n @make_command()\n def fastlyversion(args):\n \"\"\"Get the fastly version info for provided version.\"\"\"\n pprint(api.version(service_id, args[0]).attrs)\n\n @make_command()\n def fastlydomain(args):\n \"\"\"Get the fastly domain info for provided version and domain.\"\"\"\n pprint(api.domain(service_id, args[0], args[1]).attrs)\n\n @make_command()\n def fastlypurgeurl(args):\n \"\"\"Purge the url at host path.\"\"\"\n if (api.purge_url(args[0], args[1])):\n return\n std_error_message('ERROR: unable to purge')\n sys.exit(1)\n\n @make_command()\n def fastlypurgeservice(args):\n \"\"\"Purge the full service.\"\"\"\n if (api.purge_service(service_id)):\n return\n std_error_message('ERROR: unable to purge')\n sys.exit(1)\n\n @make_command()\n def fastlypurgekey(args):\n \"\"\"Purge a key from the service.\"\"\"\n if (api.purge_key(service_id, args[0])):\n return\n std_error_message('ERROR: unable to purge')\n sys.exit(1)\n\n @make_command()\n def fastlycondition(args):\n \"\"\"View or create a condition: <version> <name> [<type> <statement>].\"\"\"\n version = args[0]\n name = args[1]\n try:\n condition_type = args[2]\n statement = args[3]\n except IndexError:\n statement = None\n try:\n condition = api.condition(service_id, version, name)\n except BadRequestError:\n condition = Condition()\n if statement:\n condition.attrs['name'] = name\n condition.attrs['statement'] = statement\n condition.attrs['service_id'] = service_id\n condition.attrs['version'] = version\n condition.attrs['type'] = condition_type\n condition.conn = api.conn\n condition.save()\n else:\n pprint(condition.attrs)\n\n @make_command()\n def fastlyheader(args):\n \"\"\"View or create a header request: <version> <name> <condition>.\"\"\"\n version = args[0]\n name = args[1]\n try:\n condition = args[2]\n except IndexError:\n condition = None\n try:\n header = api.header(service_id, version, name)\n pprint(header.attrs)\n if condition:\n header.attrs['request_condition'] = condition\n header.save()\n except BadRequestError:\n if condition:\n header = Header()\n header.attrs['name'] = name\n header.attrs['service_id'] = service_id\n header.attrs['version'] = version\n header.attrs['request_condition'] = condition\n header.conn = api.con\n header.save()\n #pprint(header.attrs)", "def run_setup_commands(self):\n if not hasattr(self, 'commands') or not self.commands:\n return\n print('{GREEN}Running setup commands...{NC}'.format(**colors))\n for c in self.commands:\n self.mqtt.connect(self.mqtt_host)\n command = \"{c_topic}/{cmnd}\".format(**self, cmnd=c['command'])\n payload = ''\n if 'concat' in c: #It's a set of rules; so do fancy shit\n payload = ' '.join(c['concat'])\n else: #payload is the correct thing\n payload=c['payload']\n print(\"Sending {c} {p}\".format(c=command, p=payload))\n self.mqtt.publish(command, payload)\n self.mqtt.disconnect()\n sleep(1)\n if \"restart\" in c and c['restart'] == 1:\n self.online_check()", "def get_available_commands(self, caller):\n # commands = [{\"name\":\"LOOK\", \"cmd\":\"look\", \"args\":self.dbref}]\n commands = [{\"name\":\"LOOT\", \"cmd\":\"loot\", \"args\":self.dbref}]\n return commands", "def suggested_commands(self):\n return self.commands()", "def _send_custom_commands_after_welcome(self, conn):\n for command in self.commands:\n conn.send_raw(command)", "def command_help(self, command):\n self.commands[command].command_help()", "def showcommands(command=None, showall=None):\n # pydoc.help(ixnetPyCli)\n if command == None:\n print('\\nCommand list:\\n')\n else:\n print('\\tHelp on command usage: {0}'.format(command))\n\n for name,obj in inspect.getmembers(sys.modules[__name__]):\n if name in ['completer', 'runixncfgconfig', 'runjsonconfig', 'getInput', 'configIxNetworkFromScratch']: continue\n #if inspect.isfunction(obj) and eval(name+'.__doc__') is not None:\n if inspect.isfunction(obj):\n parameters = inspect.getargspec(eval(name))\n\n if parameters[0] == []:\n parameters = ''\n if command is None:\n print('\\t{0}({1})'.format(name, parameters))\n else:\n parameters = ' '.join(parameters[0][0:])\n if command != None and name == command:\n print('\\n\\t{0}({1})'.format(name, parameters))\n if command == None:\n print('\\t{0} ({1})'.format(name, parameters))\n\n if showall is not None:\n print('\\t{0}'.format(eval(name+'.__doc__')))\n print()\n print()\n\n if command == None:\n print('\\n\\n Example:')\n print('\\tThe first thing you need to do is create a preference file in the /Preferences directory.')\n print('\\tMake a copy of the provided template.py and give it a meaningful name.')\n print('\\t Ex: joe.py')\n\n print('\\n\\t1> Enter: setpreferences(\"Your preference file\")')\n print('\\n\\t2> For Windows chassis connection, enter: connecttowindows()')\n print('\\t For Linux chassis connection, enter: connecttolinux()')\n print('\\t To connect to an existing Linx session ID: connecttolinux(resume=True, sessionId=<id>)')\n print() \n print('\\t3> To load a saved config file and use the chassisIp/ports saved in the config file:')\n print('\\t Enter: loadsavedconfig(\"ConfigFiles/<config file>\")')\n print()\n print('\\t To load a saved config file and optionally assign chassis and ports:')\n print('\\t Enter: loadsavedconfig(\"ConfigFiles/<config file>\", chassisIp=<ip>, ')\n print('\\t portList=[[ixChassisIp, \"1\", \"1\"], [ixChassisIp, \"2\", \"1\"]])')\n print()\n print('\\t To create a configuration from scratch:')\n print('\\t Enter: config(\"ConfigFiles/<params file>\")')\n print()", "def help(self, irc, msg, args, command):\n command = map(callbacks.canonicalName, command)\n (maxL, cbs) = irc.findCallbacksForArgs(command)\n if maxL == command:\n if len(cbs) > 1:\n names = sorted([cb.name() for cb in cbs])\n irc.error(format('That command exists in the %L plugins. '\n 'Please specify exactly which plugin command '\n 'you want help with.', names))\n else:\n assert cbs, 'Odd, maxL == command, but no cbs.'\n irc.reply(cbs[0].getCommandHelp(command, False))\n else:\n irc.error(format('There is no command %q.',\n callbacks.formatCommand(command)))", "def func(self):\n from evennia.utils.utils import string_suggestions, list_to_string\n\n msg = \"Command '%s' is not available.\" % self.raw\n cmdset = self.cmdset\n cmdset.make_unique(self.caller)\n all_cmds = [cmd for cmd in cmdset if cmd.auto_help and cmd.access(self.caller)]\n names = []\n for cmd in all_cmds:\n # noinspection PyProtectedMember\n names.extend(cmd._keyaliases)\n suggestions = string_suggestions(self.raw, set(names), cutoff=0.7)\n if suggestions:\n msg += \" Maybe you meant %s?\" % list_to_string(\n suggestions, \"or\", addquote=True\n )\n else:\n msg += ' Type \"help\" for help.'\n self.msg(msg)", "def get_commands(bot):\n new_commands = []\n\n new_commands.append(Command(\n 'mycommand', subcommands=[\n SubCommand(\n Opt('myoption'),\n doc='This is a simple command with a single required option.'),\n SubCommand(\n Opt('custom', optional=True),\n Opt('attached', optional=True, attached='attached argument'),\n doc='This has two different optional options, one without an attached '\n 'parameter, and the other requiring an attached parameter.'),\n SubCommand(\n Opt('trailing'),\n Arg('arg 1'),\n Arg('arg 2'),\n Arg('arg 3', argtype=ArgTypes.SPLIT, additional='more args'),\n doc='This command requires a lot of trailing arguments.'),\n SubCommand(\n Opt('grouped'),\n Arg('grouped arguments', argtype=ArgTypes.MERGED),\n doc='This will group all given arguments as a single string.'),\n SubCommand(\n Opt('complex', attached='attached'),\n Opt('other', optional=True, attached='also required'),\n Arg('arg 1'),\n Arg('arg 2', argtype=ArgTypes.SPLIT_OPTIONAL, additional='more args'),\n doc='The complex option has a required attached parameter, and the '\n '\\'other\\' option also has a required attached parameter if '\n '\\'other\\' is included. Additionally, there will be a requirement '\n 'of at least 1 trailing argument.'),\n SubCommand(\n Opt('marquee'),\n Arg('text', argtype=ArgTypes.MERGED,\n check=lambda b, m, v, *a: len(v) <= 100,\n check_error=\"Marquee message must be less than 100 characters long.\"),\n doc='Creates a marquee that loops 3 times.')],\n shortcuts=[\n Shortcut(\n 'complex', 'complex {attached} other {other} {arg 1} {arg 2}',\n Arg('attached'), Arg('other'), Arg('arg 1'),\n Arg('arg 2', argtype=ArgTypes.SPLIT_OPTIONAL)),\n Shortcut(\n 'marquee', 'marquee {text}', Arg('text', argtype=ArgTypes.MERGED))],\n description='Your command description here.',\n other='This text is optional - it just shows up after everything '\n 'else. Quick note, all of the commands here can only be used by '\n 'bot moderators or above, as indicated by elevated_level. A '\n 'level of 2 would mean only server owners or above can use the '\n 'command, and a level of 3 would restrict the command to only '\n 'the bot owners.',\n elevated_level=1, category='demo'))\n\n new_commands.append(Command(\n 'myothercommand', subcommands=[\n SubCommand(\n Arg('text', argtype=ArgTypes.MERGED_OPTIONAL),\n doc='This traps all further commands from being executed.'),\n SubCommand(\n Opt('order'), Opt('matters'),\n doc='It is impossible to access this command because the first '\n 'subcommand will always be satisfied first. Order of the '\n 'subcommand matters!'),\n SubCommand(\n Opt('sample'), Opt('foo'), Opt('bar'),\n doc='Also impossible to access. This subcommand just adds some '\n 'keywords to the command.')],\n description='Only bot owners can see this text!',\n other='Note that no shortcuts were defined. They, too, are optional. '\n 'Also, this command is hidden, which means that only the bot '\n 'owners can see this command listed from the help command. '\n 'However, unless the command is configured with an elevated '\n 'permissions level, any user can still execute the command. '\n 'Users still will not be able to see the specific help for this '\n 'command, though. Lastly, this command is disabled in DMs.',\n hidden=True, allow_direct=False, category='demo'))\n\n new_commands.append(Command(\n 'notify', subcommands=[\n SubCommand(\n Arg('text', argtype=ArgTypes.MERGED),\n doc='Notify the owners with some text!')],\n other='This command uses a custom function. It is called with the '\n 'same arguments as get_response. The command will show up to '\n 'all users in the help command, but can only be used by server '\n 'owners, as it is disallowed in direct messages.',\n elevated_level=2, allow_direct=False, function=custom_notify,\n category='demo'))\n\n new_commands.append(Command(\n 'wait', other='Use this command to demo the wait_for functionality', category='demo'))\n\n return new_commands", "def helper_commands():\n # Test HELP\n try:\n check = check50.run(run_command).stdin(\"HELP\")\n for help in help_statement:\n check.stdout(help)\n except check50.Failure as error:\n raise check50.Failure(f\"HELP did not print the expected message.\\n {error}\")\n\n # Test LOOK command\n try:\n check50.run(run_command).stdin(\"LOOK\").stdout(room_1_description)\n check50.run(run_command).stdin(\"look\").stdout(room_1_description)\n except check50.Failure as error:\n raise check50.Failure(f\"LOOK/look did not print the expected room description.\\n {error}\")\n\n # Test QUIT\n try:\n check50.run(run_command).stdin(\"QUIT\").stdout(\"Thanks for playing!\").exit(0)\n except check50.Failure as error:\n raise check50.Failure(f\"QUIT did not function as expected.\\n {error}\")", "def getCommands(self):", "def on_command(self, session, cmd_list):\n assert cmd_list\n\n cmd = cmd_list[0]\n if cmd in self._commands:\n return self._commands[cmd].function(session, cmd_list)\n else:\n self.reply_text(session, \"NG:Unknown command [%s]\" % cmd)\n return True", "def cmd(self):", "def register_command(func):\n supported_commands.append(func.__name__)\n return func", "def default_supported_commands(self, commands):\n self.state.default_supported_commands(commands)", "def help_command(update: Update, context: CallbackContext) -> None:\n commands = [\"/login <pwd>\\n\",\n \"/status\\n\",\n \"/heat\\n\",\n \"/temp\\n\",\n \"/off\\n\",\n \"/help\\n\",\n \"/set\\n\",\n \"/unset\\n\",\n \"/heatmore\\n\",\n \"/lighton\\n\",\n \"/lightoff\\n\"]\n\n cmd: str = \" \".join(commands)\n update.message.reply_text('commands are:\\n' + cmd)", "def airflow_commands():\n pass", "def do_command(self, args):\n chk_arg_count(args, 3)\n hostname, command, value = args\n args = (hostname, value)\n hostops = dbops.Hosts()\n if command == 'mem':\n hostops.memory(args)\n elif command == 'cores':\n hostops.cores(args)\n elif command == 'bits':\n hostops.bitness(args)\n else:\n raise ValueError('Unknown host configuration.')", "def execute_command(self, command):\n raise NotImplementedError", "def _setup_command(self):\r\n raise NotImplementedError", "def commands():\n # Check invalid command\n check50.run(run_command).stdin(\"cs50\").stdout(\"Invalid command.\")\n\n # Check for upper case abreviation\n try:\n check50.run(run_command).stdin(\"W\").stdout(room_2_description)\n except check50.Failure as error:\n raise check50.Failure(f\"Could not use abbreviation 'w' to move\")\n\n # Check for lower case abbreviation\n try:\n check50.run(run_command).stdin(\"w\").stdout(room_2_description)\n except check50.Failure as error:\n raise check50.Failure(f\"Could not use abbreviation 'w' to move\")", "def _addCommand(self, command):\n self.updater.dispatcher.add_handler(command)", "def initializeCommands(self):\n # ** Load topics from highest to lowest priority order **\n ignoredPlugins = HashSet(self.yaml.getIgnoredPlugins())\n # Don't load any automatic help topics if All is ignored\n if ignoredPlugins.contains(\"All\"):\n return\n # Initialize help topics from the server's command map\n for command in server.getCommandMap().getCommands():\n if commandInIgnoredPlugin(command, ignoredPlugins):\n continue \n # Register a topic\n for c in topicFactoryMap.keySet():\n if c.isAssignableFrom(command.__class__):\n t = self.topicFactoryMap.get(c).createTopic(command)\n if t != None:\n self.addTopic(t)\n continue \n if isinstance(command, (PluginCommand, )) and c.isAssignableFrom((command).getExecutor().__class__):\n t = self.topicFactoryMap.get(c).createTopic(command)\n if t != None:\n self.addTopic(t)\n continue \n self.addTopic(GenericCommandHelpTopic(command))\n for command in server.getCommandMap().getCommands():\n if commandInIgnoredPlugin(command, ignoredPlugins):\n continue \n for alias in command.getAliases():\n if self.server.getCommandMap().getCommand(alias) == command:\n self.addTopic(CommandAliasHelpTopic(\"/\" + alias, \"/\" + command.getLabel(), self))\n filteredTopics = Collections2.filter(self.helpTopics.values(), Predicates.instanceOf(CommandAliasHelpTopic.__class__))\n if not filteredTopics.isEmpty():\n self.addTopic(IndexHelpTopic(\"Aliases\", \"Lists command aliases\", None, filteredTopics))\n pluginIndexes = HashMap()\n fillPluginIndexes(pluginIndexes, self.server.getCommandMap().getCommands())\n for entry in pluginIndexes.entrySet():\n self.addTopic(IndexHelpTopic(entry.getKey(), \"All commands for \" + entry.getKey(), None, entry.getValue(), \"Below is a list of all \" + entry.getKey() + \" commands:\"))\n for amendment in yaml.getTopicAmendments():\n if self.helpTopics.containsKey(amendment.getTopicName()):\n self.helpTopics.get(amendment.getTopicName()).amendTopic(amendment.getShortText(), amendment.getFullText())\n if amendment.getPermission() != None:\n self.helpTopics.get(amendment.getTopicName()).amendCanSee(amendment.getPermission())", "def apply_action(self, cmd_name, *args):\n\n action = Action(self.tahoma_device.url)\n action.add_command(cmd_name, *args)\n self.controller.apply_actions(\"HomeAssistant\", [action])", "def process_cmd(config, cmd):\n # Separate command from arguments\n cmd_parts = cmd.split(' ', 1)\n head = cmd_parts[0]\n args = ''\n if len(cmd_parts) == 2:\n args = cmd_parts[1]\n\n # Call the command\n if not common.call_cmd(head, config, args):\n print(\"RabbitHole: Unknown command '{}'\".format(head))", "def execute_command(self):\n raise Exception(\"Not implemented\")", "def runCommand(command):\n None", "def cmd(self) -> List[str]:\n raise NotImplementedError(\"Must implement in frontend subclass.\")", "def register_commands(self):\n for module in copy.copy(sys.modules).values():\n for command in module_functionalities(module, 'MARA_CLICK_COMMANDS', click.Command):\n if 'callback' in command.__dict__ and command.__dict__['callback']:\n package = command.__dict__['callback'].__module__.rpartition('.')[0]\n if package != 'flask':\n register_command(self, command, package)", "def cmd(self, command):\n self._commands.append(command)", "async def hockey_commands(self, ctx: commands.Context) -> None:\n pass", "def plugh():", "def handle_command(command, channel):\n #Default respons is help text for the user\n default_response = \"This don't exist m8. Try *{}*.\".format(\"!price trx\")\n #Finds and executes the given command, filling in response\n response = None\n \n if command.lower() in name_id_map:\n req = requests.get(url = 'https://api.coinmarketcap.com/v1/ticker/' + name_id_map[command.lower()] + '/')\n coin = req.json()\n text =format_coin_output(coin[0])\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command.lower() in symbol_id_map:\n req = requests.get(url = 'https://api.coinmarketcap.com/v1/ticker/' + symbol_id_map[command.lower()] + '/')\n coin = req.json()\n text = format_coin_output(coin[0])\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command == '!top':\n text = top_coins()\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command == '!exit':\n text = \":wasssap3::wasssap3:ABANDON SHIP!!!:wasssap3::wasssap3:\\n :rotating_light:EXIT ALL MARKETS:rotating_light:\\n\"\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n elif command == '!ping':\n text = \"Still scavaging the moon.\\n\"\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=text,\n )\n else:\n slack_client.api_call(\n \"chat.postMessage\",\n channel=channel,\n text=default_response,\n )", "def _commands(self) -> Dict[str, List[str]]:\r\n pass", "def cli():\n pass # do nothing here, it just defines the name for other subcommands", "def do_command(self, args = ()):\n if len(args) == 0:\n self.do_overview()\n elif len(args) != 1:\n raise ValueError('Wrong number of arguments.')\n elif args[0] in self.base.commands.keys():\n self.do_command_help(args[0])\n else:\n raise ValueError('No such command.')", "def run_protocol(self, device, command, *argv, **kwarg):\n if not IxnetworkIxiaClientImpl.ixnet:\n return 0, \"Ixia not connected\"\n ############# Implement me ################\n if command == \"start_protocols\":\n device.applog.info(\"Starting All Protocols\")\n IxnetworkIxiaClientImpl.ixnet.StartAllProtocols(Arg1=\"sync\")\n time.sleep(15)\n for ep in IxnetworkIxiaClientImpl.ip_eps:\n device.applog.info(\"Sending ARP on \" + ep.Name)\n ep.Start()\n ep.SendArp()\n time.sleep(5)\n device.applog.info(\"Generating Traffic\")\n for ti in IxnetworkIxiaClientImpl.tis:\n ti.Generate()\n device.applog.info(\"Applying Traffic\")\n IxnetworkIxiaClientImpl.ixnet.Traffic.Apply()\n elif command == \"stop_protocols\":\n device.applog.info(\"Stopping All Protocols\")\n IxnetworkIxiaClientImpl.ixnet.StopAllProtocols(Arg1=\"sync\")\n elif command == \"set_protocol\":\n params = kwarg[\"params\"]\n param = params[0]\n for ep in IxnetworkIxiaClientImpl.bgp_eps:\n if \"bgp_peer\" in param and param[\"bgp_peer\"] != ep.Name:\n continue\n enable = param[\"enable\"]\n IxnetworkIxiaClientImpl.bgp_eps\n ep.Active.Single(enable)\n IxnetworkIxiaClientImpl.ixnet.Globals.Topology.ApplyOnTheFly()\n return 0, \"\"", "async def command_interpreter(self, command: str) -> None:\n for cls in GlobalCommandRegistry:\n if not asyncio.iscoroutinefunction(GlobalCommandRegistry[cls].main):\n continue\n if command.startswith(tuple(GlobalCommandRegistry[cls].helper['name'])):\n result = await asyncio.gather(GlobalCommandRegistry[cls](command, self.print_queue).main())\n if result is False:\n print(\"Result is false?!\")\n raise KeyboardInterrupt" ]
[ "0.62071913", "0.6122376", "0.6122376", "0.6122376", "0.6122376", "0.6035483", "0.59634626", "0.5925076", "0.59209687", "0.58657867", "0.57914144", "0.57778853", "0.5737286", "0.5727475", "0.5710332", "0.5668034", "0.5654879", "0.56148833", "0.55979323", "0.55908215", "0.5581696", "0.55612314", "0.5553877", "0.5550502", "0.5548983", "0.554583", "0.5516002", "0.55010074", "0.5500212", "0.5490456", "0.5484954", "0.5478195", "0.54285055", "0.5415892", "0.54128486", "0.54123324", "0.5404342", "0.5376929", "0.5375797", "0.53693146", "0.5368098", "0.53672904", "0.53513527", "0.5348861", "0.5348053", "0.5346916", "0.5345955", "0.53435546", "0.53318846", "0.53199583", "0.53056943", "0.53017974", "0.5301251", "0.5296428", "0.528663", "0.5285606", "0.52844256", "0.52806544", "0.52757", "0.5263367", "0.52587366", "0.52576184", "0.5254237", "0.52501243", "0.52441573", "0.52419424", "0.52371705", "0.5233905", "0.52335143", "0.52310395", "0.5230243", "0.52257144", "0.5222054", "0.5218154", "0.52089334", "0.5207107", "0.519605", "0.51949745", "0.51944745", "0.5186877", "0.5174906", "0.5172977", "0.51606613", "0.516054", "0.5156174", "0.5147465", "0.51425123", "0.51398087", "0.5135533", "0.5134559", "0.513236", "0.5132014", "0.5129738", "0.51261175", "0.5125594", "0.51166826", "0.5113102", "0.51076305", "0.5106257", "0.51056176", "0.5104554" ]
0.0
-1
Turns the TPLink smartplug on or off depending on the state
def set_state(self, is_on: bool) -> None: json_data = self.perform_command(self.commands["on"] if is_on else self.commands["off"]) if json_data["system"]["set_relay_state"]["err_code"] != 0: raise Exception("Error: Error from the smartplug: " + json.dumps(json_data))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def turn_on(self, **kwargs):\n self.smartplug.turn_on()", "def turn_on(self, **kwargs):\n self._state = True\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device,'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":1 }', 5)\n if(self._device['type'] == '_REALY2' or self._device['type'] == '_REALY4'):\n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"'+ self._data_key +'\":1 }', 5)", "def turn_off(self):\n self._state = False\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":0 }', 5)\n if(self._device['type'] == '_REALY2' or self._device['type'] == '_REALY4'): \n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"'+ self._data_key +'\":0 }', 5)", "def on(self):\n self.state = \"ON\"\n logger.info(\"Turning on %s lamp at %s port %s plug %s\" % (self.name,\n self.host,\n self.port,\n self.plug))\n return self.send_cmd(\"pset %s 1\" % self.plug)", "def on(config: dict):\n switch_device(config, config[\"inching\"], \"on\")", "def turnLightingSystemOn():\n dislin.light('ON')", "def off(self):\n self.state = \"OFF\"\n logger.info(\"Turning off %s lamp at %s port %s plug %s\" % (self.name,\n self.host,\n self.port,\n self.plug))\n return self.send_cmd(\"pset %s 0\" % self.plug)", "def force_switch_on(self):\n self.turn_on_modem()", "def is_on(self) -> bool:\n return self.tuya_device.status.get(DPCODE_SWITCH, False)", "def turn_off(self, **kwargs):\n self.smartplug.turn_off()", "def state(config: dict):\n\n async def state_callback(device):\n if device.basic_info is not None:\n if device.available:\n print_device_details(device)\n\n device.shutdown_event_loop()\n\n logger.info(\"Initialising SonoffSwitch with host %s\" % config[\"host\"])\n SonoffSwitch(\n host=config[\"host\"],\n callback_after_update=state_callback,\n logger=logger,\n device_id=config[\"device_id\"],\n api_key=config[\"api_key\"],\n )", "def affection_status_switch_on(self):\n self._affection_status_switch = False", "def _doPowerState(self, state=False):\n if state:\n self._cmdPowerOn()\n else:\n self._cmdPowerOff()", "def turn_on(self):\n self._remote.power(1)", "def set_state(self, state: bool) -> None:\n payload = self._cfg.state_power_on if state else self._cfg.state_power_off\n command = f\"{COMMAND_POWER}{self._cfg.idx+1}\"\n self._mqtt_client.publish(\n self._cfg.command_topic + command,\n payload,\n )", "def turnLightingSystemOff():\n dislin.light('OFF')", "def turn_on(self):\n self._state = True\n self.write_state(bytes([9]))\n self.schedule_update_ha_state()", "def _turn_on_dev_mode(self):\n if self._device is not None:\n self._char_write(self._BLE_SERVICE_ANTI_DOS,\n [ord(c) for c in self._ANTI_DOS_MESSAGE])\n self._char_write(self._BLE_SERVICE_TX_POWER,\n [self._TX_POWER_VALUE])\n # Sending 0x01 to the wake service wakes the sphero.\n self._char_write(self._BLE_SERVICE_WAKE, [0x01])", "def turn_on(self, **kwargs):\n set_sonoff_state(self._host, \"on\")\n self._state = True", "def turn_on(self):\n self._lms.query(self._id, 'power', '1')\n self.update_ha_state()", "def setHotplug(self, hotplug):\n # type: (bool)->None\n\n msg = \"hotplug key will be renamed into allow-hotplug in 4.0\"\n warnings.warn(msg, DeprecationWarning)\n\n self._validator.validate_one(\n 'hotplug', VALID_OPTS['hotplug'], hotplug)\n self._ifAttributes['hotplug'] = hotplug", "async def async_turn_on(self):\n path = \"/interface\"\n param = \"default-name\"\n if \"-\" in self._data[\"port-mac-address\"]:\n param = \"name\"\n value = self._data[param]\n mod_param = \"disabled\"\n mod_value = False\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n\n if self._data[\"poe-out\"] == \"off\":\n path = \"/interface/ethernet\"\n self._ctrl.set_value(path, param, value, \"poe-out\", \"auto-on\")\n\n await self._ctrl.force_update()", "def onoff(boolean):\n if boolean:\n return \"on\"\n else:\n return \"off\"", "def change_status():\n if self.on:\n connect.SOCKET.sendall(bytes(\"OFF\\n\", \"utf-8\"))\n self.on = False\n else:\n connect.SOCKET.sendall(bytes(\"ON\\n\", \"utf-8\"))\n self.on = True", "def setOn(self, command):\r\n self.setDriver('ST', 1)", "def affection_status_switch_on(self):\n self._affection_status_switch = True", "def turn_off(self, **kwargs: Any) -> None:\n if (\n DPCODE_LIGHT in self.tuya_device.status\n and DPCODE_SWITCH not in self.tuya_device.status\n ):\n commands = [{\"code\": DPCODE_LIGHT, \"value\": False}]\n else:\n commands = [{\"code\": DPCODE_SWITCH, \"value\": False}]\n self._send_command(commands)", "def setLightSwitch(self, _state=False):\n if _state == True:\n render.setLight(self.lightNP)\n elif _state == False:\n render.clearLight(self.lightNP)", "def on(self, include_ethernet=False):\n if not self.healthy:\n self.health_check()\n self._hub.switch_power.power_on(self.port_number)\n if self.secondary_port_number is not None:\n self._hub.switch_power.power_on(self.secondary_port_number)\n if include_ethernet:\n self.ethernet_on()\n time.sleep(5) # Small delay to give time for 'dev/tty' to populate\n switchboard = self._get_switchboard_if_initialized()\n if switchboard:\n switchboard.open_all_transports()", "def pibooth_reset(cfg, hard):", "def t0_switch_config_helper(test_obj: 'T0TestBase'):\n configer = SwitchConfiger(test_obj)\n test_obj.dut.switch_id = configer.start_switch()", "def _switch_motors(self, state):\n # Relay control was moved to its own package\n if self.relayExists:\n if not self._SwitchingMotors: # Prevent overlapping runs\n self._SwitchingMotors = True\n # Switch \"on\" to \"off\" if not safe to operate,\n # then we can just pass state to arlobot_usbrelay\n if not self._SafeToOperate:\n state = False\n rospy.wait_for_service('/arlobot_usbrelay/toggle_relay')\n rospy.loginfo(\"Switching motors.\")\n try:\n toggle_relay = rospy.ServiceProxy('/arlobot_usbrelay/toggle_relay', ToggleRelay)\n left_relay_result = toggle_relay(self.usbLeftMotorRelayLabel, state)\n right_relay_result = toggle_relay(self.usbRightMotorRelayLabel, state)\n if left_relay_result.toggleSuccess and right_relay_result.toggleSuccess:\n self._motorsOn = True\n else:\n self._motorsOn = False\n except rospy.ServiceException as e:\n rospy.loginfo(\"Service call failed: %s\" % e)\n self._SwitchingMotors = False\n else: # If no automated motor control exists, just set the state blindly.\n self._motorsOn = state", "def _force_on(self):\n self._interface.set('fw_wp_vref', self._fw_wp_vref)\n self._interface.set('fw_wp_en', 'on')\n self._interface.set('fw_wp', 'on')", "def toggle_lights(bridge):\n if check_any_light_on(bridge):\n turn_off_lights(bridge)\n else:\n turn_on_lights(bridge)", "def manualState(self, tfid, state):\n self.trafficLights.get(int(tfid)).setState(state)\n self.trafficLights.get(int(tfid)).updateState()", "def turn_off(self, **kwargs):\n _LOGGER.error(\"DALI TURN OFF\")\n self._state = False\n\n url = self.urlx + '/toggle'\n headers = {'x-ha-access': 'raspberry',\n 'content-type': 'application/json'}\n\n response = get(url, headers=headers)\n _LOGGER.error(response.text)\n\n json_data = json.loads(response.text)\n _LOGGER.error(json_data)\n\n state = json_data['state']\n\n self._dimmer = 0\n\n self._state = state == 'on'", "def _force_allon(hass, entity_id=None, old_state=None, new_state=None):\n _call_service(hass, SCHEDULES[PERSIST['mode']][1], 'turn_on')\n _call_service(hass, 'switch.all_on', 'turn_off')", "async def test_on_off(hass: HomeAssistant, mock_litejet) -> None:\n\n await async_init_integration(hass, use_switch=True)\n\n assert hass.states.get(ENTITY_SWITCH).state == STATE_OFF\n assert hass.states.get(ENTITY_OTHER_SWITCH).state == STATE_OFF\n\n assert not switch.is_on(hass, ENTITY_SWITCH)\n\n await hass.services.async_call(\n switch.DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_SWITCH}, blocking=True\n )\n mock_litejet.press_switch.assert_called_with(ENTITY_SWITCH_NUMBER)\n\n await hass.services.async_call(\n switch.DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: ENTITY_SWITCH}, blocking=True\n )\n mock_litejet.release_switch.assert_called_with(ENTITY_SWITCH_NUMBER)", "async def async_turn_on(self, **kwargs):\n try:\n if await self._api.set_relay_state(self._dev_id, \"on\"):\n self._is_on = True\n self.async_write_ha_state()\n except Smile.PlugwiseError:\n _LOGGER.error(\"Error while communicating to device\")", "def switch_on(self, boot_timeout=None, settledown_duration=None,\n simple_switch_mode=False):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def off(config: dict):\n switch_device(config, config[\"inching\"], \"off\")", "def toggle(self):\n self._state.is_on = not self._state.is_on\n self.send_command(Command.TOGGLE, [])", "def handle_onoff_mode_received(\n msg: ReceiveMessage, template_name: str, attr: str\n ) -> None:\n payload = self.render_template(msg, template_name)\n payload_on: str = self._config[CONF_PAYLOAD_ON]\n payload_off: str = self._config[CONF_PAYLOAD_OFF]\n\n if payload == \"True\":\n payload = payload_on\n elif payload == \"False\":\n payload = payload_off\n\n if payload == payload_on:\n setattr(self, attr, True)\n elif payload == payload_off:\n setattr(self, attr, False)\n else:\n _LOGGER.error(\"Invalid %s mode: %s\", attr, payload)\n\n get_mqtt_data(self.hass).state_write_requests.write_state_request(self)", "def bcp_switch(self, name, state, **kwargs):\n if int(state):\n self.events.post('switch_' + name + '_active')\n else:\n self.events.post('switch_' + name + '_inactive')", "def toggle_connect( self ):\n # show waiting for communication\n self.lbl_status.setText( 'Waiting...' )\n self.lbl_statusLight.setPixmap( self.img_yellowLight )\n self.repaint()\n \n # create laser controller if doesn't already exist, connect\n if self.inst is None:\n try:\n self.inst = pac.Ammeter( self.port, timeout = 30 )\n self.inst.connect()\n \n except Exception as err:\n self.update_connected_ui( False )\n \n warning = QMessageBox()\n warning.setWindowTitle( 'Picoammeter Controller Error' )\n warning.setText( 'Could not connect\\n{}'.format( err ) )\n warning.exec()\n \n else:\n self.delete_controller()\n \n # update ui\n if self.inst is not None:\n self.update_connected_ui( self.inst.connected )\n \n else:\n self.update_connected_ui( False )", "def configure_switch(self, config):\n raise NotImplementedError", "def set_toggle_devices_enabled(self, track, xclip, ident, value = None):\n for device in track.devices:\n if(hasattr(device, 'parameters')):\n self._parent._device_actions.set_device_on_off(device, track, xclip, ident);", "def set_power(sid):\n # Resolve the passed parameters if any\n timer = None\n os = None\n if request.json:\n if timer in request.json:\n timer = request.json.get('timer')\n if os in request.json:\n os = request.json.get('os')\n db = sqlite3.connect('/home/tropius/TROPIUS/TROPIUS.db')\n state = hosts.get(db, sid)['state']\n \n if state == 'on':\n # The host is on -- turn it off\n # TODO make a unix shell util file\n # TODO make a windows util file\n return\n elif state == 'off':\n # The host is off -- turn it on\n if timer is not None:\n sleep(timer)\n netutil.wake_on_lan(db, sid)\n ret = {'power': {'state': 'on'}}\n return jsonify(ret)\n # TODO find a keyboard driver and implement OS parameter", "def onSwitchActivate(self, button, state):\n\t\tcommand_string = 'id1 mav.waypoint_actuator switch_hold \\n'\n\t\tcomm.write(bytes(command_string, 'utf8'))", "async def async_set_wifi_led_on(self):\n return", "def pswitchon(chan) :\n s.phaseSwitching(True, chan)", "def change_setting(self, key, val):\n if isinstance(val, bool):\n payload = 'on' if val else 'off'\n else:\n payload = val\n return self._request('post',\n 'fifo_command.php?cmd={}%20{}'.format(key,\n payload))", "def cycle(self, include_ethernet=False):\n if not self.healthy:\n self.health_check()\n self.off()\n time.sleep(2) # Small delay before calling power_on\n self.on()", "def r2_on_off():\n \n r2_cmd_packet = b'\\x04\\x14\\x02\\x00\\x00\\xe6\\x0f'\n ser_relay.write(r2_cmd_packet)", "async def test_device_mode_roller(hass, coap_wrapper, monkeypatch):\n assert coap_wrapper\n\n monkeypatch.setitem(coap_wrapper.device.settings, \"mode\", \"roller\")\n hass.async_create_task(\n hass.config_entries.async_forward_entry_setup(coap_wrapper.entry, SWITCH_DOMAIN)\n )\n await hass.async_block_till_done()\n assert hass.states.get(\"switch.test_name_channel_1\") is None", "def enable_irq(state: bool = True, /) -> None:", "def switch_on(self,name):\n self.circles[name].switch_on()\n self.cursor.execute(\"\"\"UPDATE sensors_powersensor SET state=1 WHERE target=%s\"\"\", (name,))", "def wifi_on(self):\n self._clear_read_buffer()\n self._write_cmd(\"PE01\")\n time.sleep(100e-3)", "async def async_turn_off(self, **kwargs):\n try:\n if await self._api.set_relay_state(self._dev_id, \"off\"):\n self._is_on = False\n self.async_write_ha_state()\n except Smile.PlugwiseError:\n _LOGGER.error(\"Error while communicating to device\")", "def light_action():\n if light_btn.isChecked():\n self.variables.default_values_dict[\"settings\"][\"external_lights\"] = True\n else:\n self.variables.default_values_dict[\"settings\"][\n \"external_lights\"\n ] = False", "def change_switching(\n self, device, config\n ): # Has to be a string command or a list of commands containing strings!!\n # Check if only a string is passed and not a list and convert into list if need be\n if isinstance(config, str):\n configs = [config]\n else:\n configs = config\n\n if device.get(\"Visa_Resource\", None): # Searches for the visa resource\n resource = device\n else:\n self.log.error(\n \"The VISA resource for device \"\n + str(device[\"Device_name\"])\n + \" could not be found. No switching possible.\"\n )\n return False\n\n if device.get(\"device_exclusive_switching\", False):\n self.log.debug(\"Device exclusive switching used...\")\n return self.device_exclusive_switching(device, configs)\n else:\n # Normal switching\n return self.manual_switching(device, configs, BBM=True)", "def toggle(self):\n s = self.status()\n if s == self.POWER_OFF:\n self.on()\n else:\n self.off()\n return self.status()", "def bool_to_on_off(boolean: bool):\n if boolean:\n return \"on\"\n return \"off\"", "def turn_off(self, **kwargs: Any) -> None:\n self._device.power_on = False\n _LOGGER.debug(\"Turn off light %s\", self._device.ip)", "def turn_on(self, **kwargs) -> None:\n self._device.writeCharacteristic(self._handle, b'\\x00', True)\n self._state = True\n self.schedule_update_ha_state()", "async def async_turn_on(self, **kwargs: Any) -> None:\n _LOGGER.debug(\"Tried to switch on %s\", self.name)\n try:\n await self.hass.async_add_executor_job(\n self.device.appliance.set_setting, BSH_POWER_STATE, BSH_POWER_ON\n )\n except HomeConnectError as err:\n _LOGGER.error(\"Error while trying to turn on device: %s\", err)\n self._state = False\n self.async_entity_update()", "def turn_on(self):\n GPIO.output(self.gpio, True) # turn on light", "def update(self):\n try:\n if not self._sysinfo:\n self._sysinfo = self.smartplug.sys_info\n self._mac = self.smartplug.mac\n self._model = self.smartplug.model\n if self.smartplug.context is None:\n self._alias = self.smartplug.alias\n self._device_id = self._mac\n else:\n self._alias = self._plug_from_context[\"alias\"]\n self._device_id = self.smartplug.context\n\n if self.smartplug.context is None:\n self._state = self.smartplug.state == self.smartplug.SWITCH_STATE_ON\n else:\n self._state = self._plug_from_context[\"state\"] == 1\n\n if self.smartplug.has_emeter:\n emeter_readings = self.smartplug.get_emeter_realtime()\n\n self._emeter_params[ATTR_CURRENT_POWER_W] = \"{:.2f}\".format(\n emeter_readings[\"power\"]\n )\n self._emeter_params[ATTR_TOTAL_ENERGY_KWH] = \"{:.3f}\".format(\n emeter_readings[\"total\"]\n )\n self._emeter_params[ATTR_VOLTAGE] = \"{:.1f}\".format(\n emeter_readings[\"voltage\"]\n )\n self._emeter_params[ATTR_CURRENT_A] = \"{:.2f}\".format(\n emeter_readings[\"current\"]\n )\n\n emeter_statics = self.smartplug.get_emeter_daily()\n try:\n self._emeter_params[ATTR_TODAY_ENERGY_KWH] = \"{:.3f}\".format(\n emeter_statics[int(time.strftime(\"%e\"))]\n )\n except KeyError:\n # Device returned no daily history\n pass\n\n self._available = True\n\n except (SmartDeviceException, OSError) as ex:\n if self._available:\n _LOGGER.warning(\n \"Could not read state for %s: %s\", self.smartplug.host, ex\n )\n self._available = False", "def r3_on_off():\n \n r3_cmd_packet = b'\\x04\\x14\\x04\\x00\\x00\\xe4\\x0f'\n ser_relay.write(r3_cmd_packet)", "def enable(self):\n if not self.tm_started:\n for name, tm in self.air_traffic_manager.items():\n logging.debug(\"Starting tm %s\" % name)\n tm.start()\n tm_started = True\n\n logging.debug(\"Enabling switch %s\" % self.name)\n self.disabled = False", "def turn_on(self, **kwargs):\n self._is_on = True", "def lighton(update: Update, context: CallbackContext) -> None:\n if __sauna.control.getPortValue(\"Light Sensor\") == 0:\n # TODO Mit Stromstossrelais ist dieser Code richtig\n # __sauna.control.togglePortValue(\"Light Switch\")\n update.message.reply_text(\"Light is on\")\n else:\n update.message.reply_text(\"Light was already on\")\n\n __sauna.control.setPortValue(\"Light Switch\")\n val = __sauna.control.getPortValue(\"Light Switch\")\n update.message.reply_text(\"Light Switch := \" + str(val))", "def switch(ind, status):\n print(\"Switching :\", ind, \">>\", status == 'on')\n GPIO.output(ind, status == 'on')", "def on(self):\n\t\trb0 = [0x00]\n\t\trb1 = [0x00, 0x00]\n\t\tattempts = 0\n\n\t\twhile self.state != ON and attempts < MAX_RETRIES:\n\t\t\tself.spi.transfer([0x03], rb0, 1)\t\t## Send the command byte; response will be written to rb0\n\t\t\ttime.sleep(9e-3) \t\t\t\t\t\t## Sleep for 9 ms\n\t\t\tself.spi.transfer([0x00, 0x01], rb1, 2)\t## Send the following 2 bytes; response will be written to rb1\n\t\t\ttime.sleep(0.1)\n\n\t\t\tif rb0[0] < 0: \t\t\t\t\t\t## Account for implicit unsigned-to-signed \n\t\t\t\trb0[0] += 256\t\t\t\t\t## conversion from the transfer operation\n\n\t\t\tattempts += 1\n\t\t\tprint(f\"[{self.__class__.__name__}::on]\", end=' ')\n\t\t\tif rb0[0] == 0xF3 and rb1[0] == 0x03: \t## Ensure response values are as expected\n\t\t\t\tself.state = ON \n\t\t\t\tprint(\"SUCCESS -- device powered on.\")\n\t\t\telse:\n\t\t\t\tif attempts != MAX_RETRIES:\n\t\t\t\t\tprint(f\"Attempt #{attempts} failed -- retrying after delay ...\")\n\t\t\t\t\ttime.sleep(RETRY_DELAY)\n\t\t\t\telse:\n\t\t\t\t\tprint(\"ERROR -- command failed.\")\n\n\t\treturn self.state == ON", "def turn_on(self, **kwargs):\n setattr(self.resource, self.variable, True)", "async def async_turn_off(self) -> None:\n if CONF_POWER_COMMAND_TOPIC in self._config:\n mqtt_payload = self._command_templates[CONF_POWER_COMMAND_TEMPLATE](\n self._config[CONF_PAYLOAD_OFF]\n )\n await self._publish(CONF_POWER_COMMAND_TOPIC, mqtt_payload)\n if self._optimistic:\n self._attr_hvac_mode = HVACMode.OFF\n self.async_write_ha_state()\n return\n # Fall back to default behavior without power command topic\n await super().async_turn_off()", "def set_monitor_mode(controller_name):\n subprocess.run([\"ip\", \"link\", \"set\", wifi_name, \"down\"])\n subprocess.run([\"airmon-ng\", \"check\", \"kill\"])\n subprocess.run([\"iw\", wifi_name, \"set\", \"monitor\", \"none\"])\n subprocess.run([\"ip\", \"link\", \"set\", wifi_name, \"up\"])", "def rtsOn():\n pass", "def set_light_on(self):\r\n self._light = \"ON\"", "def on(self):\n if not self._is_on:\n self._pwms.enable(self._pin_index, self._frequency)\n self._is_on = True", "def ethernet_on(self):\n if not self.healthy:\n self.health_check()\n if not self._ethernet_switch:\n raise errors.CapabilityNotReadyError(\n device_name=self._device_name,\n msg=\"Not set up for ethernet switching.\")\n self._ethernet_switch.switch_power.power_on(self.ethernet_port_number)", "def turn_on(self, **kwargs):\n self._is_on = True\n self.schedule_update_ha_state()\n self.hass.data[ZIGATE_DOMAIN].action_onoff(self._device.addr,\n self._endpoint,\n 1)", "def turn_on(self):\n self._interrupt_flash()\n if not self.on:\n GPIO.output(self.pin, GPIO.HIGH)\n self.on = True", "def on(self):\n print(f\"RF {self.name} on\")\n self.status(True)", "def toggle_feedback(self):\n self.feedback = not self.feedback\n if self.feedback:\n self.dcm_pitch_tserver.write_attribute('StepBacklash',0)\n self._start_loop_feedback()\n else:\n # print('In toggle feedback')\n self.dcm_pitch_tserver.write_attribute('StepBacklash',self.dcm_step_backlash)\n self._stop_loop_feedback()", "async def async_turn_on(self):\n path = \"/ip/firewall/nat\"\n param = \".id\"\n value = None\n for uid in self._ctrl.data[\"nat\"]:\n if (\n self._ctrl.data[\"nat\"][uid][\"name\"]\n == f\"{self._data['protocol']}:{self._data['dst-port']}\"\n ):\n value = self._ctrl.data[\"nat\"][uid][\".id\"]\n\n mod_param = \"disabled\"\n mod_value = False\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n await self._ctrl.force_update()", "def switch(self, new_state, deviceid, outlet=None):\n\n # we're in the grace period, no state change\n if self._skipped_login:\n _LOGGER.info(\"Grace period, no state change\")\n return (not new_state)\n\n self._ws = self._get_ws()\n \n if not self._ws:\n _LOGGER.warning('invalid websocket, state cannot be changed')\n return (not new_state)\n\n # convert from True/False to on/off\n if isinstance(new_state, (bool)):\n new_state = 'on' if new_state else 'off'\n\n device = self.get_device(deviceid)\n\n if outlet is not None:\n _LOGGER.debug(\"Switching `%s - %s` on outlet %d to state: %s\", \\\n device['deviceid'], device['name'] , (outlet+1) , new_state)\n else:\n _LOGGER.debug(\"Switching `%s` to state: %s\", deviceid, new_state)\n\n if not device:\n _LOGGER.error('unknown device to be updated')\n return False\n\n # the payload rule is like this:\n # normal device (non-shared) \n # apikey = login apikey (= device apikey too)\n #\n # shared device\n # apikey = device apikey\n # selfApiKey = login apikey (yes, it's typed corectly selfApikey and not selfApiKey :|)\n\n if outlet is not None:\n params = { 'switches' : device['params']['switches'] }\n params['switches'][outlet]['switch'] = new_state\n\n else:\n params = { 'switch' : new_state }\n\n payload = {\n 'action' : 'update',\n 'userAgent' : 'app',\n 'params' : params,\n 'apikey' : device['apikey'],\n 'deviceid' : str(deviceid),\n 'sequence' : str(time.time()).replace('.',''),\n 'controlType' : device['params']['controlType'] if 'controlType' in device['params'] else 4,\n 'ts' : 0\n }\n\n # this key is needed for a shared device\n if device['apikey'] != self.get_user_apikey():\n payload['selfApikey'] = self.get_user_apikey()\n\n self._ws.send(json.dumps(payload))\n wsresp = self._ws.recv()\n # _LOGGER.debug(\"switch socket: %s\", wsresp)\n \n self._ws.close() # no need to keep websocket open (for now)\n self._ws = None\n\n # set also te pseudo-internal state of the device until the real refresh kicks in\n for idx, device in enumerate(self._devices):\n if device['deviceid'] == deviceid:\n if outlet is not None:\n self._devices[idx]['params']['switches'][outlet]['switch'] = new_state\n else:\n self._devices[idx]['params']['switch'] = new_state\n\n\n # @TODO add some sort of validation here, maybe call the devices status \n # only IF MAIN STATUS is done over websocket exclusively\n\n return new_state", "def get_switch(self, conf, dpid):\n\t\tpass", "async def async_turn_off(self):\n path = \"/interface\"\n param = \"default-name\"\n if \"-\" in self._data[\"port-mac-address\"]:\n param = \"name\"\n value = self._data[param]\n mod_param = \"disabled\"\n mod_value = True\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n\n if self._data[\"poe-out\"] == \"auto-on\":\n path = \"/interface/ethernet\"\n self._ctrl.set_value(path, param, value, \"poe-out\", \"off\")\n\n await self._ctrl.async_update()", "def on(self):\n self._set_state(on=True)", "def _on_switch_turned_off(\n self, entity: Union[str, dict], attribute: str, old: str, new: str, kwargs: dict\n ) -> None:\n self.set_value(self.entity_ids[CONF_TIMER_SLIDER], 0)", "def enable_sensor_power():\n sen = digital.SensorPower(\"senpwr\") \n sen.set()", "def connectAdapter(self):\n self.canusb = pycanusb.CanUSB(bitrate='500')\n print('CanUSB: ',self.canusb)\n Msg = Switch_to_Operational_State_Msg()\n QTimer.singleShot(50,lambda msg = Msg : self.initialization(Msg))", "def turn_on(self) -> None:\n self._monoprice.set_power(self._zone_id, True)", "async def unlight(self) -> None:\n self.lit = False\n await self.run_command(\"miner fault_light off\")\n print(\"unlight\" + self.ip)", "def lightning_turnon(self):\n self.turnOn()", "def _force_off(self):\n self._interface.set('fw_wp_vref', self._fw_wp_vref)\n self._interface.set('fw_wp_en', 'on')\n self._interface.set('fw_wp', 'off')", "def on(self):\n status = self.dev.ctrl_transfer(0x40, 0x01, 0x0001, 0xa0, [])\n if status == 0:\n self.ev.set()\n return (status == 0)", "def r7_on_off():\n \n r7_cmd_packet = b'\\x04\\x14\\x40\\x00\\x00\\xa8\\x0f'\n ser_relay.write(r7_cmd_packet)", "def misystem_smartvpn_switch(self, enable: models.BasicStatus, mode: models.SmartVPNMode) -> models.SmartVPNInfoResponse:\n return apply_model(\n models.SmartVPNInfoResponse,\n self.do_get_request(f\"/misystem/smartvpn_switch?enable={enable.value}&mode={mode.value}\")\n )" ]
[ "0.7354772", "0.6991087", "0.68995047", "0.6771655", "0.645624", "0.639057", "0.6386869", "0.63163376", "0.6220862", "0.6093367", "0.60827076", "0.5999483", "0.5967314", "0.5949829", "0.59453136", "0.593405", "0.59292585", "0.5903088", "0.5894138", "0.58599454", "0.583519", "0.5815652", "0.5797081", "0.5793762", "0.5792561", "0.57848555", "0.5779927", "0.57765055", "0.5768244", "0.5764093", "0.5758357", "0.57518864", "0.5719004", "0.5696194", "0.56949717", "0.5682778", "0.56820333", "0.5680006", "0.5674951", "0.5651676", "0.56486464", "0.5644265", "0.5639877", "0.56346536", "0.5621225", "0.56203103", "0.5586467", "0.55852056", "0.55835426", "0.5566078", "0.5562292", "0.55618024", "0.55432683", "0.55378896", "0.5536732", "0.5528829", "0.55207795", "0.5513411", "0.5510938", "0.551055", "0.5502019", "0.54942155", "0.5493915", "0.54928744", "0.5491355", "0.5489722", "0.5489448", "0.54866886", "0.54833174", "0.548051", "0.5474535", "0.5464804", "0.5461878", "0.5457709", "0.54513735", "0.5444533", "0.54163986", "0.5413185", "0.5408868", "0.54067117", "0.53880244", "0.5385894", "0.5382329", "0.53740907", "0.53664255", "0.53643656", "0.5358437", "0.53509176", "0.5348157", "0.53462315", "0.5344527", "0.5339027", "0.5334729", "0.53314924", "0.5319993", "0.5319456", "0.5316447", "0.5316082", "0.5313053", "0.53060305" ]
0.58260435
21
default values of parameters from Ciresan et al.
def rotate_scale(X, min_angle=-15, max_angle=15, min_scale=0.85, max_scale=1.15, random_state=None, n_jobs=1): if random_state is None: rng = np.random else: rng = np.random.RandomState(random_state) angles = rng.uniform(min_angle, max_angle, size=X.shape[0]) scales = rng.uniform(min_scale, max_scale, size=X.shape[0]) X_rot = Parallel(n_jobs=n_jobs)(delayed(rotate_scale_one)(X[i], angles[i], scales[i]) for i in range(X.shape[0])) return np.array(X_rot, dtype='float32')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_default_parameters(self):\n super().set_default_parameters()", "def default_params():\n params = {}\n params['dataset'] = 'adult'\n params['engines'] = ['MD','RDA']\n params['iters'] = 10000\n params['epsilon'] = 1.0\n params['delta'] = 0.0\n params['bounded'] = True\n params['frequency'] = 1\n params['seed'] = 0\n params['save'] = None\n params['load'] = None\n params['plot'] = None\n\n return params", "def default_hparams():\n raise NotImplementedError('Not implemented')", "def test_defaults(self):\n fparam = FParameter(POSITIONAL_ONLY)\n assert fparam.kind == POSITIONAL_ONLY\n for k, v in FPARAM_DEFAULTS.items():\n assert getattr(fparam, k) == v", "def _default_parameters():\n\n return {\n 'opt': 'adadelta',\n 'activation_function': 'softmax',\n 'lr': 0.0001,\n 'decay': 1e-6,\n 'loss': 'categorical_crossentropy',\n 'batch_size': 32,\n 'nb_epoch': 20,\n 'shuffle': True,\n 'momentum': 0.9,\n 'nesterov': True,\n 'rho': 0.95,\n 'epsilon': 1e-08,\n 'beta_1': 0.9,\n 'beta_2': 0.999,\n 'horizontal_flip': False,\n 'im_size': 240,#256,\n 'dense_layer': 1024,\n 'nb_classes': 10,\n 'nb_channels': 3,\n 'dropout': 0.5,\n 'metrics': ['accuracy'],\n 'volume': None,\n 'input_size': 25,\n 'temporal': False,\n 'input_dim': 512,\n 'nb_frames': 60,\n 'stride': 16,\n 'nb_hidden':512,\n 'lstm': False\n\n }", "def _default_params(self) -> Dict[str, Any]:\n normal_params = {\n \"temperature\": self.temperature,\n \"max_tokens\": self.max_tokens,\n \"top_p\": self.top_p,\n \"frequency_penalty\": self.frequency_penalty,\n \"presence_penalty\": self.presence_penalty,\n \"n\": self.n,\n # \"best_of\": self.best_of,\n \"request_timeout\": self.request_timeout,\n \"logit_bias\": self.logit_bias,\n }\n return {**normal_params, **self.model_kwargs}", "def default_parameters():\n return BackendNSParameters()", "def getDefaultParams():\n defpar = [\n # coordinate system\n ['crd_sys', \"'sph'\", 'Coordinate system'],\n ['nx', '[60, 40, 30]', 'Number of grid points in the first dimension'],\n ['xbound', '[0.1*au, 30.*au, 110.*au, 250.*au]', 'Number of radial grid points'],\n ['ny', '[10,30, 30, 10]',\n 'Number of grid points in the second dimension'],\n ['ybound', '[0.1, pi/6., pi/2., 5.*pi/6., 3.04]',\n 'Number of radial grid points'],\n ['nz', '[361]', 'Number of grid points in the third dimension'],\n ['zbound', '[0., 2.0*pi]', 'Number of radial grid points'],\n # star related\n ['tstar', '[3900.0]', 'Temperature of star'],\n ['mstar', '[1.0*ms]', 'Mass of the star(s)'],\n ['rstar', '[2.5*rs]', 'Radius of star'],\n # gas density \n ['Rin', '[0.1*au, 80*au]', 'inner bounding edge'],\n ['Rin_w', '[0, 1*au]', 'gaussian taper before inner edge'], \n ['Rout', '[30*au, 120*au]', 'outer bounding edge'],\n ['Rout_w', '[1*au, 1*au]', 'gaussian taper after outer edge'], \n ['sigp', '[-1.0, -1.5]', 'power-law surface density'],\n ['sig0', '[1e2, 1e1]', 'surface density at Rin in g/cm^2'], \n ['ring_r', '[50*au]', 'location of gaussian ring'], \n ['ring_win', '[5*au]', 'width of gaussian ring in inner radius'],\n ['ring_wout', '[5*au]', 'width of gaussian ring in outer radius'], \n ['ring_a', '[1e2]', 'surface density at center of ring in g/cm^2]'], \n ['cutgdens', '1e-30', 'cut for density'], \n ['Rt', '100*au', 'radius for scale height'], \n ['Ht', '10*au', 'scale height'], \n ['qheight', '1.25', 'height power-law'], \n # gas species\n ['gasspec_mol_name', \"['12co']\", 'name of molecule'],\n ['gasspec_mol_abun', '[5e-5]', 'mass abundance '],\n ['gasspec_mol_dbase_type', \"['leiden']\", ''],\n ['gasspec_mol_freezeout_dfact', '[1e-3]',\n 'Factor by which the molecular abundance should be decreased in the freeze-out zone'],\n ['mol_freeze_Ht', '[24*au]', 'Height at Rt, with index=qheight, for freeze out to happen'],\n ['mol_freeze_del_hfrac', '0.2', 'Gaussian taper for freeze-out. del H = h * hfrac'],\n ['mol_snowR', '[20*au]', 'Radius when freeze out begins to happen'],\n # dust density\n # flat power-law parts\n ['dRin', '[0.1*au, 80*au]', 'inner bounding edge'],\n ['dRin_w', '[0, 1*au]', 'gaussian taper before inner edge'], \n ['dRout', '[30*au, 120*au]', 'outer bounding edge'],\n ['dRout_w', '[1*au, 1*au]', 'gaussian taper after outer edge'], \n ['dsigp', '[-1.0, -1.5]', 'power-law surface density'],\n ['dsig0', '[1e2, 1e1]', 'surface density at Rin'],\n # Lynden-Bell parts\n ['dLB_Rin', '[0.1*au]', 'inner bounding radius'], \n ['dLB_Rsig', '[30*au]', 'charcteristic radius'],\n ['dLB_sigp', '[-1.0]', 'power-law exponent. Careful, the sign is different from the usual function by a negative sign for consistency with flat power-law'], \n ['dLB_sig0', '[1e2]', 'surface density'], \n # ring parts\n ['dring_r', '[50*au]', 'location of gaussian ring'],\n ['dring_win', '[5*au]', 'width of gaussian ring in inner radius'],\n ['dring_wout', '[5*au]', 'width of gaussian ring in outer radius'], \n ['dring_a', '[1e2]', 'surface density at center of ring in g/cm^2]'],\n ['cutddens', '1e-30', 'cut for dust density'],\n ['dRt', '[100*au]', 'radius for scale height for each grain size'], \n ['dHt', '[10*au]', 'scale height for each grain size'], \n ['dqheight', '[1.25]', 'scale height power-law for dust'], \n # temperature\n ['T0mid', '50', 'mid plane temperature at Rt'],\n ['T0atm', '50', 'atmosphere temperature at Rt'],\n ['zqratio', '3', 'factor of Ht of where temperature transition occurs'],\n ['qmid', '-0.5', 'midplane temperature exponent'],\n ['qatm', '-0.5', 'atmosphere temperature exponent'],\n ['hdel', '2', 'temperature transition exponent '],\n ['cuttemp', '10', 'temperature cut'], \n # alignment\n ['altype', \"'toroidal'\", 'alignment type']\n ]\n\n return defpar", "def default_parameters(name):\n prm = Parameters(name)\n\n prm.add('venous_compliance', float())\n prm.add('arterial_compliance', float())\n\n prm.add('venous_resistance', float())\n prm.add('arterial_resistance', float())\n prm.add('peripheral_resistance', float())\n\n prm.add('venous_resting_volume', float())\n prm.add('arterial_resting_volume', float())\n\n return prm", "def test_defaults(self):\n params = DefaultsInterface()\n # make sure from_param_server can be called repeatedly\n params.from_param_server()\n\n self.assertEqual(params.verbosity_param_w_default, 'info')\n\n self.assertEqual(params.int_param_w_default, 1)\n self.assertAlmostEqual(params.double_param_w_default, 1.1)\n self.assertEqual(params.str_param_w_default, \"Hello World\")\n self.assertEqual(params.bool_param_w_default, True)\n self.assertEqual(params.long_param_w_default_int, 1)\n self.assertEqual(params.long_param_w_default_int_str, -1)\n self.assertEqual(params.long_param_w_default_long_string, 9223372036854775807)\n\n self.assertEqual(params.vector_int_param_w_default, [1, 2, 3])\n self.assertEqual(params.vector_double_param_w_default, [1.1, 1.2, 1.3])\n self.assertEqual(params.vector_string_param_w_default, [\"Hello\", \"World\"])\n\n self.assertEqual(params.map_param_w_default, {\"Hello\": \"World\"})\n self.assertEqual(params.enum_int_param_w_default, 1)\n self.assertEqual(params.enum_str_param_w_default, \"One\")", "def default_parameters(name):\n prm = Parameters(name)\n\n prm.add('total_volume', 5000.0) # Not important for non-closed loop. Included for compatibility.\n\n prm.add('venous_pressure', float())\n\n prm.add('arterial_compliance', float())\n\n prm.add('venous_resistance', float())\n prm.add('arterial_resistance', float())\n prm.add('peripheral_resistance', float())\n\n return prm", "def parameters_default(cls):\n return cls._Parameters.__new__.__defaults__", "def _default_parameters(cls) -> Options:\n params = super()._default_parameters()\n params.main_axes = None\n params.i_means = None\n params.q_means = None\n params.scales = None\n\n return params", "def define_parameters(self):", "def _default_params(self) -> dict[str, Any]:\n return {\n \"max_tokens\": self.max_tokens,\n \"temperature\": self.temperature,\n \"top_p\": self.top_p,\n \"logprobs\": self.logprobs,\n \"echo\": self.echo,\n \"stop_sequences\": self.stop_sequences,\n \"repeat_penalty\": self.repeat_penalty,\n \"top_k\": self.top_k,\n \"n_threads\": self.n_threads,\n \"n_ctx\": self.n_ctx,\n \"n_gpu_layers\": self.n_gpu_layers,\n \"n_gqa\": self.n_gqa if self.n_gqa else None,\n \"n_parts\": self.n_parts,\n \"seed\": self.seed,\n \"f16_kv\": self.f16_kv,\n \"logits_all\": self.logits_all,\n \"vocab_only\": self.vocab_only,\n \"use_mlock\": self.use_mlock,\n \"n_batch\": self.n_batch,\n \"last_n_tokens_size\": self.last_n_tokens_size,\n \"streaming\": self.streaming,\n }", "def parameters(self):\n return self._default_params", "def default_params():\n params = {}\n params['load'] = None\n params['style'] = 'ggplot'\n params['show'] = True\n params['save'] = None\n return params", "def _use_default_params(self):\n self.params = {\n # Desktop window params\n 'pos': (100, 100),\n 'lock_pos': False,\n # Font params\n 'default_font': 'Sans 9',\n # Lessons colors\n 'lecture_color': '#009566660000',\n 'laboratory_color': '#987600000000',\n 'practice_color': '#188820eda89b',\n 'non_color': '#0000849acdf4',\n 'day_color': '#000000000000',\n # Window style\n 'full_transparent': True,\n 'window_color': '#5ad65ad65ad6',\n 'transparent_percent': 50.0,\n # View schedule settings\n 'view_sch': [True, True, True, True, True]\n }\n self.save_params()", "def initDefaults(self):\n return _libsbml.Parameter_initDefaults(self)", "def getDefault():", "def _initialize_defaults(self):\n for key, value in defaults.items():\n if key not in self.source_params:\n self.source_params[key] = value", "def _initialize_defaults(self):\n for key, value in defaults.items():\n if key not in self.source_params:\n self.source_params[key] = value", "def add_default_params(self, params):\n params['key'] = self.key\n params['format'] = self.format\n #params['unique_id'] = generate_unique_id()\n return params", "def default_parameters():\n prm = Parameters('windkessel_model')\n\n prm.add('total_volume', float())\n\n prm.add('venous_compliance', float())\n prm.add('arterial_compliance', float())\n\n prm.add('venous_resistance', float())\n prm.add('arterial_resistance', float())\n prm.add('peripheral_resistance', float())\n\n prm.add('venous_resting_volume', float())\n prm.add('arterial_resting_volume', float())\n\n return prm", "def defaults() -> dict:\n pass", "def _apply_defaults(self):\n # Applies normal parameter defaults\n for scalar_parameter, value in self._DEFAULT_PARAMETER_SCALARS.items():\n if scalar_parameter not in self.parameters:\n self.parameters[scalar_parameter] = copy.copy(value)\n\n # Applies defaults to all ramp parameters\n for table_parameter, table in self._DEFAULT_PARAMETER_TABLES.items():\n self.parameters[table_parameter] = [list(tup) for tup in table]\n self.parameters['_' + table_parameter] = zip(*self.parameters[table_parameter])", "def _params(self, **kwargs):\n defaults = {'display_name': 'Test User',\n 'locale': 'en-us',\n 'country': 'us'}\n defaults.update(kwargs)\n\n return defaults", "def defaults():\n return {}", "def _get_default_parameters(new_values):\n no_default = [\"BEAM\", \"TYPE\", \"ERRORDEF\", \"CORRECTIONS\"]\n\n not_found = [nf for nf in no_default if nf not in new_values]\n if any(not_found):\n raise ValueError(\"Required parameters '{}' not found.\".format(not_found))\n\n # Some defaults\n default = {\n # Beam Parameters\n \"QX\": \"62.31\",\n \"QY\": \"60.32\",\n \"CHROMX\": \"3\",\n \"CHROMY\": \"3\",\n # Settings\n \"USETHIN\": \"1\",\n \"ARCERRORS\": \"0\",\n \"CALCCORRECTIONS\": \"1\",\n # Outputs\n \"NOMINALMACHINE\": \"\",\n \"ARCAPPLIED\": \"\",\n \"MQXAPPLIED\": \"\",\n \"MBIPAPPLIED\": \"\",\n \"ALLAPPLIED\": \"\",\n \"CORRECTED\": \"\",\n }\n\n # crossing angles and separation bumps\n for idx in [1,2,5,8]:\n for prefix in [\"XING\", \"SEP\", \"PHI\"]:\n default[\"{:s}{:d}\".format(prefix, idx)] = \"0\"\n\n # applied errors\n for idx in range(1, 12):\n for orientation in [\"A\", \"B\"]:\n default[\"{:s}{:d}\".format(orientation, idx)] = \"0\"\n\n # return dictionary filled with defaults and new values\n default.update(new_values)\n return default", "def default():", "def test_defaults(self):\n varp = VarPositional()\n fparam = self.assert_iterable_and_get_fparam(varp)\n assert fparam.name == 'args'\n assert fparam.type == empty\n assert not fparam.converter\n assert not fparam.validator\n assert not fparam.metadata", "def getDefaultParameterValues(self):\r\n dct = {}\r\n self.initializeRoadRunnerModel()\r\n self.roadrunnerModel.reset()\r\n for parameterName in self.parametersToFit:\r\n dct[parameterName] = self.roadrunnerModel.model[parameterName]\r\n return dct", "def default_hparams():\n return {\n \"value\": 0.,\n \"name\": \"constant_connector\"\n }", "def default_parameters():\n prm = Parameters('lvad_model')\n\n prm.add('lvad_volume', 66.0)\n\n prm.add('alpha_slope', 0.0091)\n prm.add('alpha_intercept', 1.4)\n\n prm.add('beta_slope', -0.19)\n prm.add('beta_intercept', -1.9)\n\n prm.add('frequency', float())\n\n return prm", "def init(self, cr):\n param_obj = self.pool.get('ir.config_parameter')\n for key, func in _default_parameters.iteritems():\n ids = param_obj.search(cr, 1, [('key', '=', key)])\n if not ids:\n param_obj.set_param(cr, 1, key, func())", "def without_defaults(self):\n ...", "def default_parameters(self) -> List[Parameter]:\n return self.settings.job_default_parameters", "def optional_parameters(self):\n return ['seed', 'param_card', 'apmass', 'map', 'mpid', 'mrhod']", "def test_defaults(self):\n vark = VarKeyword()\n name, fparam = self.assert_mapping_and_get_fparam(vark)\n assert name == 'kwargs'\n assert fparam.type == empty\n assert not fparam.converter\n assert not fparam.validator\n assert not fparam.metadata", "def __init__(self, defaults={}, data=None):\n\n super().__init__(\n defaults={**OptimizationParameters.parameters, **defaults}, data=data\n )", "def set_default_params(self):\n print('------------------')\n print('Setting default parameters with file ', self.input_file_name)\n if 'ssephem' not in self.__dict__:\n self.__dict__['ssephem'] = 'DE436'\n print('Setting default Solar System Ephemeris: DE436')\n if 'clock' not in self.__dict__:\n self.__dict__['clock'] = None\n print('Setting a default Enterprise clock convention (check the code)')\n if 'setupsamp' not in self.__dict__:\n self.__dict__['setupsamp'] = False\n if 'psrlist' in self.__dict__:\n self.psrlist = np.loadtxt(self.psrlist, dtype=np.unicode_)\n print('Only using pulsars from psrlist')\n else:\n self.__dict__['psrlist'] = []\n print('Using all available pulsars from .par/.tim directory')\n if 'psrcachefile' not in self.__dict__:\n self.psrcachefile = None\n if 'tm' not in self.__dict__:\n self.tm = 'default'\n print('Setting a default linear timing model')\n if 'inc_events' not in self.__dict__:\n self.inc_events = True\n print('Including transient events to specific pulsar models')\n if 'fref' not in self.__dict__:\n self.fref = 1400 # MHz\n print('Setting reference radio frequency to 1400 MHz')\n if 'mcmc_covm_csv' in self.__dict__ and os.path.isfile(self.mcmc_covm_csv):\n print('MCMC jump covariance matrix is available')\n self.__dict__['mcmc_covm'] = pd.read_csv(self.mcmc_covm_csv, index_col=0)\n else:\n self.__dict__['mcmc_covm'] = None\n # Copying default priors from StandardModels/CustomModels object\n # Priors are chosen not to be model-specific because HyperModel\n # (which is the only reason to have multiple models) does not support\n # different priors for different models\n for prior_key, prior_default in self.noise_model_obj().priors.items():\n if prior_key not in self.__dict__.keys():\n self.__dict__[prior_key] = prior_default\n\n # Model-dependent parameters\n for mkey in self.models:\n\n self.models[mkey].modeldict = dict()\n\n print('------------------')", "def init_params(self):\n blah", "def __init__(self, params):\n defaults = {}\n super(Regralizer, self).__init__(params, defaults)", "def parameters(self):", "def _set_params(self, params, defaults):\n new_params = OrderedDict(\n zip(params, [x if isinstance(x, Parameter) else Parameter() for x in defaults])\n )\n for key, value in self._src.items():\n if key in new_params:\n new_params[key] = value\n\n self._src = new_params", "def defaults(self):\n\n return None", "def defaults(self):\n\n return None", "def get_parameters(self):\n params = {}\n for p in self.DEFAULT_VALUES.keys():\n params[p] = getattr(self, p)\n return params", "def set_default_parameters(self):\n super().set_default_parameters()\n if not \"replace_existing_files\" in vars(self):\n self.replace_existing_files = False\n if not \"num_files_per_point\" in vars(self):\n self.num_files_per_point = -1\n if not \"input_location_type\" in vars(self):\n self.input_location_type = \"local\"\n if not \"output_location_type\" in vars(self):\n self.output_location_type = \"local\"", "def get_default_params() -> Dict:\n default_params = {\n \"n_estimators\": {\n \"default_value\": 100,\n \"description\": \"Number of gradient boosted trees. \"\n \"Equivalent to number of boosting rounds.\",\n \"type\": \"int\"\n },\n \"max_depth\": {\n \"default_value\": 6,\n \"description\": \"Maximum tree depth for base learners.\",\n \"type\": \"int\"\n },\n \"learning_rate\": {\n \"default_value\": 0.3,\n \"description\": \"Boosting learning rate (xgb's 'eta')\",\n \"type\": \"float\"\n },\n \"verbosity\": {\n \"default_value\": 1,\n \"description\": \"The degree of verbosity. Valid values are 0 (silent) - 3 (debug).\",\n \"type\": [0, 1, 2, 3]\n },\n \"booster\": {\n \"default_value\": \"gbtree\",\n \"description\": \"Specify which booster to use: gbtree, gblinear or dart.\",\n \"type\": ['gbtree', 'gblinear', 'dart']\n },\n \"tree_method\": {\n \"default_value\": \"auto\",\n \"description\":\n '''\n Specify which tree method to use. Default to auto. If this parameter\n is set to default, XGBoost will choose the most conservative option\n available. It's recommended to study this option from parameters\n document.\n ''',\n \"type\": [\"auto\", \"exact\", \"approx\", \"hist\", \"gpu_hist\"]\n },\n \"n_jobs\": {\n \"default_value\": 1,\n \"description\": '''\n Number of parallel threads used to run xgboost. When used with other Scikit-Learn\n algorithms like grid search, you may choose which algorithm to parallelize and\n balance the threads. Creating thread contention will significantly slow dowm both\n algorithms.\n ''',\n \"type\": \"int\"\n },\n \"gamma\": {\n \"default_value\": 0.0,\n \"description\": \"Minimum loss reduction required to make a further \"\n \"partition on a leaf node of the tree.\",\n \"type\": \"float\"\n },\n \"min_child_weight\": {\n \"default_value\": 1.0,\n \"description\": \"Minimum loss reduction required to make a further \"\n \"partition on a leaf node of the tree.\",\n \"type\": \"float\"\n },\n \"max_delta_step\": {\n \"default_value\": 0.0,\n \"description\": \"Maximum delta step we allow each tree's weight estimation to be.\",\n \"type\": \"float\"\n },\n \"subsample\": {\n \"default_value\": 1.0,\n \"description\": \"Subsample ratio of the training instance.\",\n \"type\": \"float\"\n },\n \"colsample_bytree\": {\n \"default_value\": 1.0,\n \"description\": \"Subsample ratio of columns when constructing each tree.\",\n \"type\": \"float\"\n },\n \"colsample_bylevel\": {\n \"default_value\": 1.0,\n \"description\": \"Subsample ratio of columns for each level.\",\n \"type\": \"float\"\n },\n \"colsample_bynode\": {\n \"default_value\": 1.0,\n \"description\": \"Subsample ratio of columns for each split.\",\n \"type\": \"float\"\n },\n \"reg_alpha\": {\n \"default_value\": 0.0,\n \"description\": \"L1 regularization term on weights\",\n \"type\": \"float\"\n },\n \"reg_lambda\": {\n \"default_value\": 0.0,\n \"description\": \"L2 regularization term on weights\",\n \"type\": \"float\"\n },\n \"scale_pos_weight\": {\n \"default_value\": 1.0,\n \"description\": \"Balancing of positive and negative weights.\",\n \"type\": \"float\"\n },\n \"random_state\": {\n \"default_value\": 0,\n \"description\": \"Random number seed.\",\n \"type\": \"int\"\n },\n \"base_score\": {\n \"default_value\": 0.5,\n \"description\": \"The initial prediction score of all instances, global bias.\",\n \"type\": \"float\"\n },\n # \"missing\": {\n # \"default_value\": None,\n # \"description\": \"Value in the data which needs to be present as a missing value.\",\n # \"type\": \"float\"\n # },\n \"num_parallel_tree\": {\n \"default_value\": 1,\n \"description\": \"Used for boosting random forest.\",\n \"type\": \"int\"\n },\n # \"monotone_constraints\": {\n # \"default_value\": \"(0,0)\",\n # \"description\": \" Constraint of variable monotonicity. \"\n # \"See tutorial for more information.\",\n # \"type\": \"str\"\n # },\n # \"interaction_constraints\": {\n # \"default_value\": None,\n # \"description\": '''\n # Constraints for interaction representing permitted interactions. The\n # constraints must be specified in the form of a nest list, e.g. [[0, 1],\n # [2, 3, 4]], where each inner list is a group of indices of features\n # that are allowed to interact with each other. See tutorial for more\n # information\n # ''',\n # \"type\": \"str\"\n # },\n \"importance_type\": {\n \"default_value\": \"gain\",\n \"description\": '''\n The feature importance type for the feature_importances. property:\n either \"gain\", \"weight\", \"cover\", \"total_gain\" or \"total_cover\".\n ''',\n \"type\": [\"gain\", \"weight\", \"cover\", \"total_gain\", \"total_cover\"]\n }\n }\n\n return default_params", "def f_default(self, default = 1) :\n pass", "def default_parameters(self, ptype):\n ptype = str(ptype).lower()\n if ptype == 'photometry':\n defaults = {'model': 'moffat',\n 'window': 50.0,\n 'window_units': 'pixels',\n 'fwhm': 3.0,\n 'fwhm_units': 'arcsec',\n 'psf_radius': 12.0,\n 'aperture_units': 'pixels',\n 'bg_inner': 15.0,\n 'bg_width': 10.0,\n 'show_plots': False}\n elif ptype == 'display':\n defaults = {'extension': 'first',\n 'lock_image': 'wcs',\n 'lock_slice': 'image',\n 'scale': 'zscale',\n 'cmap': 'none',\n 'zoom_fit': True,\n 'tile': True,\n 's2n_range': None,\n 'eye_viewer': True,\n 'overplots': True,\n 'ds9_viewer': True,\n 'ds9_viewer_pipeline': False,\n 'ds9_viewer_qad': True}\n elif ptype == 'plot':\n defaults = {'window': 50.0,\n 'window_units': 'pixels',\n 'color': 'tab10',\n 'share_axes': 'none',\n 'separate_plots': True,\n 'bin': 'fd',\n 'hist_limits': None,\n 'p2p_reference': 1,\n 'summary_stat': 'clipped median'}\n else:\n defaults = {}\n\n return defaults", "def print_defaults():\n print 'area_bounds :', default_area_bounds\n print 'area_bounds_format :', default_area_bounds_format\n print 'area_bounds_range :', default_area_bounds_range\n print 'years_bounds :', default_years_are_bounds\n print 'dates_are_bounds :', default_dates_are_bounds\n print 'init_date_str_format :', default_init_date_str_format\n print 'member_name :', default_member_name\n print 'period_name :', default_period_name\n print 'initialistion_time_name :', default_initialistion_time_name", "def __init__(self,default=(0,0),length=None,**params):\n if length is None:\n self.length = len(default)\n else:\n self.length = length\n \n self._check(default)\n Parameter.__init__(self,default=default,**params)", "def _invocation_params(self) -> Dict[str, Any]:\n return self._default_params", "def set_default_parameters(self):\n super().set_default_parameters()\n if not \"region_size\" in vars(self):\n self.region_size = 0.08\n if not \"RGB_bands\" in vars(self):\n self.RGB_bands = [\"B4\",\"B3\",\"B2\"]\n if not \"split_RGB_images\" in vars(self):\n self.split_RGB_images = True\n # in PROCESSED dir we expect RGB. NDVI, BWNDVI\n self.num_files_per_point = 3", "def parameters(self):\n pass", "def initializeParameters(self):\n\n self.params[2].value = False\n self.params[3].enabled = False\n self.params[7].value = True\n self.params[7].enabled = False\n self.params[8].value = None\n self.params[8].enabled = False", "def test_kw_args_with_defaults():\n assert arguments.fun_opt_kw_params() == ('blue', 'red', 'yellow', 'orange')", "def params():\n raise NotImplementedError", "def test_default_parameters() -> None:\n mapie = MapieClassifier()\n assert mapie.estimator is None\n assert mapie.method == \"score\"\n assert mapie.cv == \"prefit\"\n assert mapie.verbose == 0\n assert mapie.n_jobs is None", "def defaults():\n\n #dummy = FieldTemplate.dummy\n\n return None", "def default_hparams():\n hparams = DatasetBase.default_hparams()\n hparams.update({\n \"transforms\": None,\n \"processed_csv\": None,\n \"mode\": None,\n \"batch_size\": 1,\n \"shuffle\": False,\n \"shuffle_buffer_size\": 32,\n \"input_channel\": \"RGB\"\n })\n return hparams", "def test_default_parameters() -> None:\n mapie = MapieRegressor()\n assert mapie.estimator is None\n assert mapie.method == \"plus\"\n assert mapie.cv is None\n assert not mapie.ensemble\n assert mapie.verbose == 0\n assert mapie.n_jobs is None", "def rec_default(self):\n pass", "def show_defaults(context: CreateCommandsContext):\n logger.info(\"Default parameters when creating jobs:\")\n for parameter in context.settings.job_default_parameters:\n logger.info(parameter.describe())", "def defaults(self):\n self.lib.iperf_defaults(self._test)", "def setupParameters(self, **pars):\n \n seldict = {}\n for k,v in pars.items():\n if v != None and v != \"\":\n seldict[k] = v\n \n return seldict", "def _add_parameter_default(self, msg_param):\n default_types = msg_param.default_types\n while default_types: # iterate over each bit\n def_type = default_types & (~default_types+1)\n default_types ^= def_type\n def_type -= 1\n if def_type not in self._default_parameters:\n self._default_parameters[def_type] = {}\n self._default_parameters[def_type][msg_param.key] = msg_param.value", "def test_call_default_params(self):\r\n\r\n exp = {'0': ('R27DLI_4812', 'CTGGGCCGTATCTC'),\r\n 'ref1': ('ref1', 'GGGGGGGAAAAAAAAAAAAA'),\r\n '2': ('W3Cecum_4858', 'TTGGGCCGTGTCTCAGT'),\r\n 'ref0': ('ref0', 'CCCAAAAAAATTTTTT'),\r\n }\r\n app = ReferenceRepSetPicker(params={'Algorithm': 'first',\r\n 'ChoiceF': first_id})\r\n obs = app(self.tmp_seq_filepath,\r\n self.tmp_otu_filepath,\r\n self.ref_seq_filepath)\r\n self.assertEqual(obs, exp)", "def default_slc_dict():\n par = _par.ParameterFile.from_file(_os.path.dirname(__file__) + '/default_slc_par.par')\n return par", "def default_training_params():\n N_EPOCHS = 100\n BATCH_SIZE = 64\n EPSILON = 0.0001\n return N_EPOCHS, BATCH_SIZE, EPSILON", "def initialize_params(self, params):\n pass", "def post_init(cr, registry):\n from ecore import SUPERUSER_ID\n from ecore.addons.base.ir.ir_config_parameter import _default_parameters\n ICP = registry['ir.config_parameter']\n for k, func in _default_parameters.items():\n v = ICP.get_param(cr, SUPERUSER_ID, k)\n _, g = func()\n ICP.set_param(cr, SUPERUSER_ID, k, v, g)", "def set_params(self):\r\n pass", "def defaults():\r\n return fmin([], [])", "def __init__(self, default=None):\n # this will hold the list of current parameters,\n # corresponding to the reduction recipe\n self.current = []\n self.stepnames = []\n\n # this holds initial set values for each step\n self.default = {}\n if default is not None:\n for step in default:\n pdict = ParameterSet()\n for param in default[step]:\n pdict.set_param(**param)\n self.default[step] = pdict", "def __init__( self, parameters={} ):\n self.params = {}", "def __init__(self, defaults={}, data=None):\n\n super().__init__(\n defaults={**ThermodynamicsParameters.parameters, **defaults}, data=data\n )", "def test_default_encoder_parameters(self):\n cparams = openjp2.set_default_encoder_parameters()\n\n self.assertEqual(cparams.res_spec, 0)\n self.assertEqual(cparams.cblockw_init, 64)\n self.assertEqual(cparams.cblockh_init, 64)\n self.assertEqual(cparams.numresolution, 6)\n self.assertEqual(cparams.subsampling_dx, 1)\n self.assertEqual(cparams.subsampling_dy, 1)\n self.assertEqual(cparams.mode, 0)\n self.assertEqual(cparams.prog_order, glymur.core.LRCP)\n self.assertEqual(cparams.roi_shift, 0)\n self.assertEqual(cparams.cp_tx0, 0)\n self.assertEqual(cparams.cp_ty0, 0)\n\n self.assertEqual(cparams.irreversible, 0)", "def get_default_args(**kw):\n default_args_exp = {\n \"output_file\": \"ml_demo.c\",\n \"function_name\": \"ml_demo\",\n \"precision\": ML_Binary32,\n \"accuracy\": ML_Faithful,\n \"target\": GenericProcessor.get_target_instance()\n }\n default_args_exp.update(kw)\n return DefaultArgTemplate(**default_args_exp)", "def SetDefaultParams(self, *args):\n return _BRepAlgo.BRepAlgo_NormalProjection_SetDefaultParams(self, *args)", "def print_parameter(args, parser):\n print(\"Default parameter\")\n\n for para in vars(args):\n text = str(para) + \" : \"\n print(text + str(parser.get_default(para)))", "def get_base_parameters(cls):\n return {\n \"cutoff\": None,\n \"method\": None\n }", "def defaults():\n global __preset_staging\n \n t = TreeDict('Default_Parameter_Tree', __defaultpresettree__ = True)\n __preset_staging[id(t)] = t\n return t", "def f_onearg_and_default(self, arg1, default = 1) :\n pass", "def _autoInitPars(self):\n for p in self._pars:\n setattr(self,p,self.defaultparval)", "def def_paramt():\n Zeff = 1.0\n amu = 2.0\n mf = mp*amu\n return Zeff, amu,mf", "def default_raw_par():\n par = _par.ParameterFile.from_file(_os.path.dirname(__file__) + '/default_raw_parameter.raw_par')\n return par", "def get_default_configuration():\n # Pre-configured default values for various parameters:\n default_config = {\n \"name\":\"Transient\",\n \"auto\":True,\n \"ra\":0.0,\n \"dec\":0.0,\n \"radius\":10.0,\n \"resolution\":1.8,\n \"energy\":70.0,\n \"pixsize\": 16,\n \"respcode\":\"czti_Aepix.out\",\n \"txycode\":\"radec2txty.out\",\n \"resppath\":\"pixarea\",\n \"plotfile\":\"plots/localize.pdf\",\n\t \"lc_bin\":5.0,\n\t \"typ\":\"band\",\n\t \"comp_bin\":20,\t\n \"verbose\":True,\n \"do_fit\":True\n }\n required_config = {\n 'l2file':\"_level2.evt\",\n 'infile':\"file.evt\",\n 'mkffile':\"file.mkf\",\n 'trigtime':0.00,\n 'transtart':0.00,\n 'tranend':0.00,\n 'bkg1start':0.00,\n 'bkg1end':0.00,\n 'bkg2start':0.00,\n 'bkg2end':0.00,\n\t 'alpha':0.00,\n\t 'beta':0.00,\n\t 'E0':0.00,\n\t 'A':0.00\n }\n return default_config, required_config", "def get_default_arg():\n\n arg = 'cog:C_cog_space_GRP world:parts_GRP trueWorld:noXform_GRP '\n return arg", "def _set_default_parameters(self, parameters):\n parameters_dict = parameters.get_dict()\n from aiida_crystal_dft.io.f9 import Fort9\n with self.inputs.wavefunction.open(mode='rb') as f:\n file_name = f.name\n wf = Fort9(file_name)\n if 'band' in parameters_dict:\n\n # automatic generation of k-point path\n if 'bands' not in parameters_dict['band']:\n self.logger.info('Proceeding with automatic generation of k-points path')\n structure = wf.get_structure()\n shrink, points, path = get_shrink_kpoints_path(structure)\n parameters_dict['band']['shrink'] = shrink\n parameters_dict['band']['bands'] = path\n\n # automatic generation of first and last band\n if 'first' not in parameters_dict['band']:\n parameters_dict['band']['first'] = 1\n if 'last' not in parameters_dict['band']:\n parameters_dict['band']['last'] = wf.get_ao_number()\n\n if 'dos' in parameters_dict:\n # automatic generation of projections in case no projections are given\n # TODO: explicit asking for automatic projections\n if ('projections_atoms' not in parameters_dict['dos'] and\n 'projections_orbitals' not in parameters_dict['dos']):\n self.logger.info('Proceeding with automatic generation of dos atomic projections')\n parameters_dict['dos']['projections_atoms'] = get_dos_projections_atoms(wf.get_atomic_numbers())\n\n # automatic generation of first and last band\n if 'first' not in parameters_dict['dos']:\n parameters_dict['dos']['first'] = 1\n if 'last' not in parameters_dict['dos']:\n parameters_dict['dos']['last'] = wf.get_ao_number()\n return get_data_class('dict')(dict=parameters_dict)", "def help_default_values():\n click.echo_via_pager(docgen.generate_default_value_help())", "def get_default_kwargs(self, **defaults):\n return super().get_default_kwargs(**{\"port\": 22})", "def get_default_args(**kw):\n default_args_log = {\n \"output_file\": \"POLY.c\",\n \"function_name\": \"POLY\",\n \"precision\": ML_Binary64,\n \"target\": GenericProcessor.get_target_instance(),\n \"function\": None,\n \"interval\": None,\n \"epsilon\": None\n }\n default_args_log.update(kw)\n return DefaultArgTemplate(**default_args_log)", "def set_hyperparams(use_defaults):\n if use_defaults:\n n_neurons, n_hidden, n_steps, k_prob = default_hyperparams()\n return n_neurons, n_hidden, n_steps, k_prob\n\n print (\"Select number of neurons in recurrent layer (default \" +\n \"100):\")\n n_neurons = int(input())\n print (\"Select number of hidden neurons in fully connected \" +\n \"layer (default 100):\")\n n_hidden = int(input())\n print (\"Select n_steps; the max number of words to be read \" +\n \"from each abstract (default 50):\")\n n_steps = int(input())\n print (\"Select k_prob; the dropout probability (default 0.5):\")\n k_prob = float(input())\n\n return n_neurons, n_hidden, n_steps, k_prob", "def __init__(self, defaults={}, data=None):\n\n super().__init__(\n defaults={\n **EnergyParameters.parameters,\n **EnergyParameters.output,\n **defaults,\n },\n data=data,\n )", "def resolve_kwdefaults(sign: inspect.Signature) -> Dict[str, Any]:\n kwdefaults = dict() # type: Dict[str, Any]\n\n # Add to the defaults all the values that are needed by the contracts.\n for param in sign.parameters.values():\n if param.default != inspect.Parameter.empty:\n kwdefaults[param.name] = param.default\n\n return kwdefaults", "def init(self, parameters):\n pass", "def Parameters():\n\n raise NotImplementedError()", "def defaults():\n\n return {\"cr_shelter_flag_id\": S3ReusableField.dummy(\"flag_id\"),\n }" ]
[ "0.77844316", "0.74832076", "0.7425697", "0.74207234", "0.7282676", "0.72558606", "0.7254189", "0.72380584", "0.72216916", "0.7146505", "0.71138066", "0.7112786", "0.70974725", "0.7068888", "0.70080286", "0.70061696", "0.69936126", "0.698711", "0.69810283", "0.6930297", "0.69134754", "0.69134754", "0.69070345", "0.68848974", "0.6878302", "0.68423134", "0.6819727", "0.6784084", "0.67483693", "0.6735755", "0.6700215", "0.669843", "0.66959447", "0.66348827", "0.66342676", "0.6624801", "0.66139746", "0.6613575", "0.65981215", "0.6582314", "0.6581358", "0.65751165", "0.6569136", "0.6565779", "0.6560363", "0.65596586", "0.65596586", "0.65534204", "0.6539173", "0.6532545", "0.6530885", "0.6527605", "0.6518618", "0.651081", "0.6502026", "0.64815235", "0.64781475", "0.64285314", "0.64247864", "0.6424158", "0.6412892", "0.640914", "0.6387645", "0.63606554", "0.6353139", "0.6350292", "0.63441104", "0.6323749", "0.6317641", "0.63176405", "0.6310943", "0.6301455", "0.6301352", "0.6296234", "0.6293949", "0.6292805", "0.6290569", "0.62761307", "0.62556016", "0.6244174", "0.6224553", "0.6222832", "0.6218038", "0.62154543", "0.6203418", "0.6192367", "0.6183282", "0.6167447", "0.61612606", "0.6155906", "0.61510396", "0.61416703", "0.61406773", "0.6140558", "0.6133432", "0.61321396", "0.6129624", "0.61222786", "0.61186594", "0.61131585", "0.61097795" ]
0.0
-1
default values of parameters from Ciresan et al.
def elastic_transform(X, min_alpha=36, max_alpha=38, min_sigma=5, max_sigma=6, random_state=None, n_jobs=1): if random_state is None: rng = np.random else: rng = np.random.RandomState(random_state) alphas = rng.uniform(min_alpha, max_alpha, size=X.shape[0]) sigmas = rng.uniform(min_sigma, max_sigma, size=X.shape[0]) X_elas = Parallel(n_jobs=n_jobs)(delayed(elastic_transform_one)(X[i], alphas[i], sigmas[i]) for i in range(X.shape[0])) return np.array(X_elas, dtype='float32')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_default_parameters(self):\n super().set_default_parameters()", "def default_params():\n params = {}\n params['dataset'] = 'adult'\n params['engines'] = ['MD','RDA']\n params['iters'] = 10000\n params['epsilon'] = 1.0\n params['delta'] = 0.0\n params['bounded'] = True\n params['frequency'] = 1\n params['seed'] = 0\n params['save'] = None\n params['load'] = None\n params['plot'] = None\n\n return params", "def default_hparams():\n raise NotImplementedError('Not implemented')", "def test_defaults(self):\n fparam = FParameter(POSITIONAL_ONLY)\n assert fparam.kind == POSITIONAL_ONLY\n for k, v in FPARAM_DEFAULTS.items():\n assert getattr(fparam, k) == v", "def _default_parameters():\n\n return {\n 'opt': 'adadelta',\n 'activation_function': 'softmax',\n 'lr': 0.0001,\n 'decay': 1e-6,\n 'loss': 'categorical_crossentropy',\n 'batch_size': 32,\n 'nb_epoch': 20,\n 'shuffle': True,\n 'momentum': 0.9,\n 'nesterov': True,\n 'rho': 0.95,\n 'epsilon': 1e-08,\n 'beta_1': 0.9,\n 'beta_2': 0.999,\n 'horizontal_flip': False,\n 'im_size': 240,#256,\n 'dense_layer': 1024,\n 'nb_classes': 10,\n 'nb_channels': 3,\n 'dropout': 0.5,\n 'metrics': ['accuracy'],\n 'volume': None,\n 'input_size': 25,\n 'temporal': False,\n 'input_dim': 512,\n 'nb_frames': 60,\n 'stride': 16,\n 'nb_hidden':512,\n 'lstm': False\n\n }", "def _default_params(self) -> Dict[str, Any]:\n normal_params = {\n \"temperature\": self.temperature,\n \"max_tokens\": self.max_tokens,\n \"top_p\": self.top_p,\n \"frequency_penalty\": self.frequency_penalty,\n \"presence_penalty\": self.presence_penalty,\n \"n\": self.n,\n # \"best_of\": self.best_of,\n \"request_timeout\": self.request_timeout,\n \"logit_bias\": self.logit_bias,\n }\n return {**normal_params, **self.model_kwargs}", "def default_parameters():\n return BackendNSParameters()", "def getDefaultParams():\n defpar = [\n # coordinate system\n ['crd_sys', \"'sph'\", 'Coordinate system'],\n ['nx', '[60, 40, 30]', 'Number of grid points in the first dimension'],\n ['xbound', '[0.1*au, 30.*au, 110.*au, 250.*au]', 'Number of radial grid points'],\n ['ny', '[10,30, 30, 10]',\n 'Number of grid points in the second dimension'],\n ['ybound', '[0.1, pi/6., pi/2., 5.*pi/6., 3.04]',\n 'Number of radial grid points'],\n ['nz', '[361]', 'Number of grid points in the third dimension'],\n ['zbound', '[0., 2.0*pi]', 'Number of radial grid points'],\n # star related\n ['tstar', '[3900.0]', 'Temperature of star'],\n ['mstar', '[1.0*ms]', 'Mass of the star(s)'],\n ['rstar', '[2.5*rs]', 'Radius of star'],\n # gas density \n ['Rin', '[0.1*au, 80*au]', 'inner bounding edge'],\n ['Rin_w', '[0, 1*au]', 'gaussian taper before inner edge'], \n ['Rout', '[30*au, 120*au]', 'outer bounding edge'],\n ['Rout_w', '[1*au, 1*au]', 'gaussian taper after outer edge'], \n ['sigp', '[-1.0, -1.5]', 'power-law surface density'],\n ['sig0', '[1e2, 1e1]', 'surface density at Rin in g/cm^2'], \n ['ring_r', '[50*au]', 'location of gaussian ring'], \n ['ring_win', '[5*au]', 'width of gaussian ring in inner radius'],\n ['ring_wout', '[5*au]', 'width of gaussian ring in outer radius'], \n ['ring_a', '[1e2]', 'surface density at center of ring in g/cm^2]'], \n ['cutgdens', '1e-30', 'cut for density'], \n ['Rt', '100*au', 'radius for scale height'], \n ['Ht', '10*au', 'scale height'], \n ['qheight', '1.25', 'height power-law'], \n # gas species\n ['gasspec_mol_name', \"['12co']\", 'name of molecule'],\n ['gasspec_mol_abun', '[5e-5]', 'mass abundance '],\n ['gasspec_mol_dbase_type', \"['leiden']\", ''],\n ['gasspec_mol_freezeout_dfact', '[1e-3]',\n 'Factor by which the molecular abundance should be decreased in the freeze-out zone'],\n ['mol_freeze_Ht', '[24*au]', 'Height at Rt, with index=qheight, for freeze out to happen'],\n ['mol_freeze_del_hfrac', '0.2', 'Gaussian taper for freeze-out. del H = h * hfrac'],\n ['mol_snowR', '[20*au]', 'Radius when freeze out begins to happen'],\n # dust density\n # flat power-law parts\n ['dRin', '[0.1*au, 80*au]', 'inner bounding edge'],\n ['dRin_w', '[0, 1*au]', 'gaussian taper before inner edge'], \n ['dRout', '[30*au, 120*au]', 'outer bounding edge'],\n ['dRout_w', '[1*au, 1*au]', 'gaussian taper after outer edge'], \n ['dsigp', '[-1.0, -1.5]', 'power-law surface density'],\n ['dsig0', '[1e2, 1e1]', 'surface density at Rin'],\n # Lynden-Bell parts\n ['dLB_Rin', '[0.1*au]', 'inner bounding radius'], \n ['dLB_Rsig', '[30*au]', 'charcteristic radius'],\n ['dLB_sigp', '[-1.0]', 'power-law exponent. Careful, the sign is different from the usual function by a negative sign for consistency with flat power-law'], \n ['dLB_sig0', '[1e2]', 'surface density'], \n # ring parts\n ['dring_r', '[50*au]', 'location of gaussian ring'],\n ['dring_win', '[5*au]', 'width of gaussian ring in inner radius'],\n ['dring_wout', '[5*au]', 'width of gaussian ring in outer radius'], \n ['dring_a', '[1e2]', 'surface density at center of ring in g/cm^2]'],\n ['cutddens', '1e-30', 'cut for dust density'],\n ['dRt', '[100*au]', 'radius for scale height for each grain size'], \n ['dHt', '[10*au]', 'scale height for each grain size'], \n ['dqheight', '[1.25]', 'scale height power-law for dust'], \n # temperature\n ['T0mid', '50', 'mid plane temperature at Rt'],\n ['T0atm', '50', 'atmosphere temperature at Rt'],\n ['zqratio', '3', 'factor of Ht of where temperature transition occurs'],\n ['qmid', '-0.5', 'midplane temperature exponent'],\n ['qatm', '-0.5', 'atmosphere temperature exponent'],\n ['hdel', '2', 'temperature transition exponent '],\n ['cuttemp', '10', 'temperature cut'], \n # alignment\n ['altype', \"'toroidal'\", 'alignment type']\n ]\n\n return defpar", "def default_parameters(name):\n prm = Parameters(name)\n\n prm.add('venous_compliance', float())\n prm.add('arterial_compliance', float())\n\n prm.add('venous_resistance', float())\n prm.add('arterial_resistance', float())\n prm.add('peripheral_resistance', float())\n\n prm.add('venous_resting_volume', float())\n prm.add('arterial_resting_volume', float())\n\n return prm", "def test_defaults(self):\n params = DefaultsInterface()\n # make sure from_param_server can be called repeatedly\n params.from_param_server()\n\n self.assertEqual(params.verbosity_param_w_default, 'info')\n\n self.assertEqual(params.int_param_w_default, 1)\n self.assertAlmostEqual(params.double_param_w_default, 1.1)\n self.assertEqual(params.str_param_w_default, \"Hello World\")\n self.assertEqual(params.bool_param_w_default, True)\n self.assertEqual(params.long_param_w_default_int, 1)\n self.assertEqual(params.long_param_w_default_int_str, -1)\n self.assertEqual(params.long_param_w_default_long_string, 9223372036854775807)\n\n self.assertEqual(params.vector_int_param_w_default, [1, 2, 3])\n self.assertEqual(params.vector_double_param_w_default, [1.1, 1.2, 1.3])\n self.assertEqual(params.vector_string_param_w_default, [\"Hello\", \"World\"])\n\n self.assertEqual(params.map_param_w_default, {\"Hello\": \"World\"})\n self.assertEqual(params.enum_int_param_w_default, 1)\n self.assertEqual(params.enum_str_param_w_default, \"One\")", "def default_parameters(name):\n prm = Parameters(name)\n\n prm.add('total_volume', 5000.0) # Not important for non-closed loop. Included for compatibility.\n\n prm.add('venous_pressure', float())\n\n prm.add('arterial_compliance', float())\n\n prm.add('venous_resistance', float())\n prm.add('arterial_resistance', float())\n prm.add('peripheral_resistance', float())\n\n return prm", "def parameters_default(cls):\n return cls._Parameters.__new__.__defaults__", "def _default_parameters(cls) -> Options:\n params = super()._default_parameters()\n params.main_axes = None\n params.i_means = None\n params.q_means = None\n params.scales = None\n\n return params", "def define_parameters(self):", "def _default_params(self) -> dict[str, Any]:\n return {\n \"max_tokens\": self.max_tokens,\n \"temperature\": self.temperature,\n \"top_p\": self.top_p,\n \"logprobs\": self.logprobs,\n \"echo\": self.echo,\n \"stop_sequences\": self.stop_sequences,\n \"repeat_penalty\": self.repeat_penalty,\n \"top_k\": self.top_k,\n \"n_threads\": self.n_threads,\n \"n_ctx\": self.n_ctx,\n \"n_gpu_layers\": self.n_gpu_layers,\n \"n_gqa\": self.n_gqa if self.n_gqa else None,\n \"n_parts\": self.n_parts,\n \"seed\": self.seed,\n \"f16_kv\": self.f16_kv,\n \"logits_all\": self.logits_all,\n \"vocab_only\": self.vocab_only,\n \"use_mlock\": self.use_mlock,\n \"n_batch\": self.n_batch,\n \"last_n_tokens_size\": self.last_n_tokens_size,\n \"streaming\": self.streaming,\n }", "def parameters(self):\n return self._default_params", "def default_params():\n params = {}\n params['load'] = None\n params['style'] = 'ggplot'\n params['show'] = True\n params['save'] = None\n return params", "def _use_default_params(self):\n self.params = {\n # Desktop window params\n 'pos': (100, 100),\n 'lock_pos': False,\n # Font params\n 'default_font': 'Sans 9',\n # Lessons colors\n 'lecture_color': '#009566660000',\n 'laboratory_color': '#987600000000',\n 'practice_color': '#188820eda89b',\n 'non_color': '#0000849acdf4',\n 'day_color': '#000000000000',\n # Window style\n 'full_transparent': True,\n 'window_color': '#5ad65ad65ad6',\n 'transparent_percent': 50.0,\n # View schedule settings\n 'view_sch': [True, True, True, True, True]\n }\n self.save_params()", "def initDefaults(self):\n return _libsbml.Parameter_initDefaults(self)", "def getDefault():", "def _initialize_defaults(self):\n for key, value in defaults.items():\n if key not in self.source_params:\n self.source_params[key] = value", "def _initialize_defaults(self):\n for key, value in defaults.items():\n if key not in self.source_params:\n self.source_params[key] = value", "def add_default_params(self, params):\n params['key'] = self.key\n params['format'] = self.format\n #params['unique_id'] = generate_unique_id()\n return params", "def default_parameters():\n prm = Parameters('windkessel_model')\n\n prm.add('total_volume', float())\n\n prm.add('venous_compliance', float())\n prm.add('arterial_compliance', float())\n\n prm.add('venous_resistance', float())\n prm.add('arterial_resistance', float())\n prm.add('peripheral_resistance', float())\n\n prm.add('venous_resting_volume', float())\n prm.add('arterial_resting_volume', float())\n\n return prm", "def defaults() -> dict:\n pass", "def _apply_defaults(self):\n # Applies normal parameter defaults\n for scalar_parameter, value in self._DEFAULT_PARAMETER_SCALARS.items():\n if scalar_parameter not in self.parameters:\n self.parameters[scalar_parameter] = copy.copy(value)\n\n # Applies defaults to all ramp parameters\n for table_parameter, table in self._DEFAULT_PARAMETER_TABLES.items():\n self.parameters[table_parameter] = [list(tup) for tup in table]\n self.parameters['_' + table_parameter] = zip(*self.parameters[table_parameter])", "def _params(self, **kwargs):\n defaults = {'display_name': 'Test User',\n 'locale': 'en-us',\n 'country': 'us'}\n defaults.update(kwargs)\n\n return defaults", "def defaults():\n return {}", "def _get_default_parameters(new_values):\n no_default = [\"BEAM\", \"TYPE\", \"ERRORDEF\", \"CORRECTIONS\"]\n\n not_found = [nf for nf in no_default if nf not in new_values]\n if any(not_found):\n raise ValueError(\"Required parameters '{}' not found.\".format(not_found))\n\n # Some defaults\n default = {\n # Beam Parameters\n \"QX\": \"62.31\",\n \"QY\": \"60.32\",\n \"CHROMX\": \"3\",\n \"CHROMY\": \"3\",\n # Settings\n \"USETHIN\": \"1\",\n \"ARCERRORS\": \"0\",\n \"CALCCORRECTIONS\": \"1\",\n # Outputs\n \"NOMINALMACHINE\": \"\",\n \"ARCAPPLIED\": \"\",\n \"MQXAPPLIED\": \"\",\n \"MBIPAPPLIED\": \"\",\n \"ALLAPPLIED\": \"\",\n \"CORRECTED\": \"\",\n }\n\n # crossing angles and separation bumps\n for idx in [1,2,5,8]:\n for prefix in [\"XING\", \"SEP\", \"PHI\"]:\n default[\"{:s}{:d}\".format(prefix, idx)] = \"0\"\n\n # applied errors\n for idx in range(1, 12):\n for orientation in [\"A\", \"B\"]:\n default[\"{:s}{:d}\".format(orientation, idx)] = \"0\"\n\n # return dictionary filled with defaults and new values\n default.update(new_values)\n return default", "def default():", "def test_defaults(self):\n varp = VarPositional()\n fparam = self.assert_iterable_and_get_fparam(varp)\n assert fparam.name == 'args'\n assert fparam.type == empty\n assert not fparam.converter\n assert not fparam.validator\n assert not fparam.metadata", "def getDefaultParameterValues(self):\r\n dct = {}\r\n self.initializeRoadRunnerModel()\r\n self.roadrunnerModel.reset()\r\n for parameterName in self.parametersToFit:\r\n dct[parameterName] = self.roadrunnerModel.model[parameterName]\r\n return dct", "def default_hparams():\n return {\n \"value\": 0.,\n \"name\": \"constant_connector\"\n }", "def default_parameters():\n prm = Parameters('lvad_model')\n\n prm.add('lvad_volume', 66.0)\n\n prm.add('alpha_slope', 0.0091)\n prm.add('alpha_intercept', 1.4)\n\n prm.add('beta_slope', -0.19)\n prm.add('beta_intercept', -1.9)\n\n prm.add('frequency', float())\n\n return prm", "def init(self, cr):\n param_obj = self.pool.get('ir.config_parameter')\n for key, func in _default_parameters.iteritems():\n ids = param_obj.search(cr, 1, [('key', '=', key)])\n if not ids:\n param_obj.set_param(cr, 1, key, func())", "def without_defaults(self):\n ...", "def default_parameters(self) -> List[Parameter]:\n return self.settings.job_default_parameters", "def optional_parameters(self):\n return ['seed', 'param_card', 'apmass', 'map', 'mpid', 'mrhod']", "def test_defaults(self):\n vark = VarKeyword()\n name, fparam = self.assert_mapping_and_get_fparam(vark)\n assert name == 'kwargs'\n assert fparam.type == empty\n assert not fparam.converter\n assert not fparam.validator\n assert not fparam.metadata", "def __init__(self, defaults={}, data=None):\n\n super().__init__(\n defaults={**OptimizationParameters.parameters, **defaults}, data=data\n )", "def set_default_params(self):\n print('------------------')\n print('Setting default parameters with file ', self.input_file_name)\n if 'ssephem' not in self.__dict__:\n self.__dict__['ssephem'] = 'DE436'\n print('Setting default Solar System Ephemeris: DE436')\n if 'clock' not in self.__dict__:\n self.__dict__['clock'] = None\n print('Setting a default Enterprise clock convention (check the code)')\n if 'setupsamp' not in self.__dict__:\n self.__dict__['setupsamp'] = False\n if 'psrlist' in self.__dict__:\n self.psrlist = np.loadtxt(self.psrlist, dtype=np.unicode_)\n print('Only using pulsars from psrlist')\n else:\n self.__dict__['psrlist'] = []\n print('Using all available pulsars from .par/.tim directory')\n if 'psrcachefile' not in self.__dict__:\n self.psrcachefile = None\n if 'tm' not in self.__dict__:\n self.tm = 'default'\n print('Setting a default linear timing model')\n if 'inc_events' not in self.__dict__:\n self.inc_events = True\n print('Including transient events to specific pulsar models')\n if 'fref' not in self.__dict__:\n self.fref = 1400 # MHz\n print('Setting reference radio frequency to 1400 MHz')\n if 'mcmc_covm_csv' in self.__dict__ and os.path.isfile(self.mcmc_covm_csv):\n print('MCMC jump covariance matrix is available')\n self.__dict__['mcmc_covm'] = pd.read_csv(self.mcmc_covm_csv, index_col=0)\n else:\n self.__dict__['mcmc_covm'] = None\n # Copying default priors from StandardModels/CustomModels object\n # Priors are chosen not to be model-specific because HyperModel\n # (which is the only reason to have multiple models) does not support\n # different priors for different models\n for prior_key, prior_default in self.noise_model_obj().priors.items():\n if prior_key not in self.__dict__.keys():\n self.__dict__[prior_key] = prior_default\n\n # Model-dependent parameters\n for mkey in self.models:\n\n self.models[mkey].modeldict = dict()\n\n print('------------------')", "def init_params(self):\n blah", "def __init__(self, params):\n defaults = {}\n super(Regralizer, self).__init__(params, defaults)", "def parameters(self):", "def _set_params(self, params, defaults):\n new_params = OrderedDict(\n zip(params, [x if isinstance(x, Parameter) else Parameter() for x in defaults])\n )\n for key, value in self._src.items():\n if key in new_params:\n new_params[key] = value\n\n self._src = new_params", "def defaults(self):\n\n return None", "def defaults(self):\n\n return None", "def get_parameters(self):\n params = {}\n for p in self.DEFAULT_VALUES.keys():\n params[p] = getattr(self, p)\n return params", "def set_default_parameters(self):\n super().set_default_parameters()\n if not \"replace_existing_files\" in vars(self):\n self.replace_existing_files = False\n if not \"num_files_per_point\" in vars(self):\n self.num_files_per_point = -1\n if not \"input_location_type\" in vars(self):\n self.input_location_type = \"local\"\n if not \"output_location_type\" in vars(self):\n self.output_location_type = \"local\"", "def get_default_params() -> Dict:\n default_params = {\n \"n_estimators\": {\n \"default_value\": 100,\n \"description\": \"Number of gradient boosted trees. \"\n \"Equivalent to number of boosting rounds.\",\n \"type\": \"int\"\n },\n \"max_depth\": {\n \"default_value\": 6,\n \"description\": \"Maximum tree depth for base learners.\",\n \"type\": \"int\"\n },\n \"learning_rate\": {\n \"default_value\": 0.3,\n \"description\": \"Boosting learning rate (xgb's 'eta')\",\n \"type\": \"float\"\n },\n \"verbosity\": {\n \"default_value\": 1,\n \"description\": \"The degree of verbosity. Valid values are 0 (silent) - 3 (debug).\",\n \"type\": [0, 1, 2, 3]\n },\n \"booster\": {\n \"default_value\": \"gbtree\",\n \"description\": \"Specify which booster to use: gbtree, gblinear or dart.\",\n \"type\": ['gbtree', 'gblinear', 'dart']\n },\n \"tree_method\": {\n \"default_value\": \"auto\",\n \"description\":\n '''\n Specify which tree method to use. Default to auto. If this parameter\n is set to default, XGBoost will choose the most conservative option\n available. It's recommended to study this option from parameters\n document.\n ''',\n \"type\": [\"auto\", \"exact\", \"approx\", \"hist\", \"gpu_hist\"]\n },\n \"n_jobs\": {\n \"default_value\": 1,\n \"description\": '''\n Number of parallel threads used to run xgboost. When used with other Scikit-Learn\n algorithms like grid search, you may choose which algorithm to parallelize and\n balance the threads. Creating thread contention will significantly slow dowm both\n algorithms.\n ''',\n \"type\": \"int\"\n },\n \"gamma\": {\n \"default_value\": 0.0,\n \"description\": \"Minimum loss reduction required to make a further \"\n \"partition on a leaf node of the tree.\",\n \"type\": \"float\"\n },\n \"min_child_weight\": {\n \"default_value\": 1.0,\n \"description\": \"Minimum loss reduction required to make a further \"\n \"partition on a leaf node of the tree.\",\n \"type\": \"float\"\n },\n \"max_delta_step\": {\n \"default_value\": 0.0,\n \"description\": \"Maximum delta step we allow each tree's weight estimation to be.\",\n \"type\": \"float\"\n },\n \"subsample\": {\n \"default_value\": 1.0,\n \"description\": \"Subsample ratio of the training instance.\",\n \"type\": \"float\"\n },\n \"colsample_bytree\": {\n \"default_value\": 1.0,\n \"description\": \"Subsample ratio of columns when constructing each tree.\",\n \"type\": \"float\"\n },\n \"colsample_bylevel\": {\n \"default_value\": 1.0,\n \"description\": \"Subsample ratio of columns for each level.\",\n \"type\": \"float\"\n },\n \"colsample_bynode\": {\n \"default_value\": 1.0,\n \"description\": \"Subsample ratio of columns for each split.\",\n \"type\": \"float\"\n },\n \"reg_alpha\": {\n \"default_value\": 0.0,\n \"description\": \"L1 regularization term on weights\",\n \"type\": \"float\"\n },\n \"reg_lambda\": {\n \"default_value\": 0.0,\n \"description\": \"L2 regularization term on weights\",\n \"type\": \"float\"\n },\n \"scale_pos_weight\": {\n \"default_value\": 1.0,\n \"description\": \"Balancing of positive and negative weights.\",\n \"type\": \"float\"\n },\n \"random_state\": {\n \"default_value\": 0,\n \"description\": \"Random number seed.\",\n \"type\": \"int\"\n },\n \"base_score\": {\n \"default_value\": 0.5,\n \"description\": \"The initial prediction score of all instances, global bias.\",\n \"type\": \"float\"\n },\n # \"missing\": {\n # \"default_value\": None,\n # \"description\": \"Value in the data which needs to be present as a missing value.\",\n # \"type\": \"float\"\n # },\n \"num_parallel_tree\": {\n \"default_value\": 1,\n \"description\": \"Used for boosting random forest.\",\n \"type\": \"int\"\n },\n # \"monotone_constraints\": {\n # \"default_value\": \"(0,0)\",\n # \"description\": \" Constraint of variable monotonicity. \"\n # \"See tutorial for more information.\",\n # \"type\": \"str\"\n # },\n # \"interaction_constraints\": {\n # \"default_value\": None,\n # \"description\": '''\n # Constraints for interaction representing permitted interactions. The\n # constraints must be specified in the form of a nest list, e.g. [[0, 1],\n # [2, 3, 4]], where each inner list is a group of indices of features\n # that are allowed to interact with each other. See tutorial for more\n # information\n # ''',\n # \"type\": \"str\"\n # },\n \"importance_type\": {\n \"default_value\": \"gain\",\n \"description\": '''\n The feature importance type for the feature_importances. property:\n either \"gain\", \"weight\", \"cover\", \"total_gain\" or \"total_cover\".\n ''',\n \"type\": [\"gain\", \"weight\", \"cover\", \"total_gain\", \"total_cover\"]\n }\n }\n\n return default_params", "def f_default(self, default = 1) :\n pass", "def default_parameters(self, ptype):\n ptype = str(ptype).lower()\n if ptype == 'photometry':\n defaults = {'model': 'moffat',\n 'window': 50.0,\n 'window_units': 'pixels',\n 'fwhm': 3.0,\n 'fwhm_units': 'arcsec',\n 'psf_radius': 12.0,\n 'aperture_units': 'pixels',\n 'bg_inner': 15.0,\n 'bg_width': 10.0,\n 'show_plots': False}\n elif ptype == 'display':\n defaults = {'extension': 'first',\n 'lock_image': 'wcs',\n 'lock_slice': 'image',\n 'scale': 'zscale',\n 'cmap': 'none',\n 'zoom_fit': True,\n 'tile': True,\n 's2n_range': None,\n 'eye_viewer': True,\n 'overplots': True,\n 'ds9_viewer': True,\n 'ds9_viewer_pipeline': False,\n 'ds9_viewer_qad': True}\n elif ptype == 'plot':\n defaults = {'window': 50.0,\n 'window_units': 'pixels',\n 'color': 'tab10',\n 'share_axes': 'none',\n 'separate_plots': True,\n 'bin': 'fd',\n 'hist_limits': None,\n 'p2p_reference': 1,\n 'summary_stat': 'clipped median'}\n else:\n defaults = {}\n\n return defaults", "def print_defaults():\n print 'area_bounds :', default_area_bounds\n print 'area_bounds_format :', default_area_bounds_format\n print 'area_bounds_range :', default_area_bounds_range\n print 'years_bounds :', default_years_are_bounds\n print 'dates_are_bounds :', default_dates_are_bounds\n print 'init_date_str_format :', default_init_date_str_format\n print 'member_name :', default_member_name\n print 'period_name :', default_period_name\n print 'initialistion_time_name :', default_initialistion_time_name", "def __init__(self,default=(0,0),length=None,**params):\n if length is None:\n self.length = len(default)\n else:\n self.length = length\n \n self._check(default)\n Parameter.__init__(self,default=default,**params)", "def _invocation_params(self) -> Dict[str, Any]:\n return self._default_params", "def set_default_parameters(self):\n super().set_default_parameters()\n if not \"region_size\" in vars(self):\n self.region_size = 0.08\n if not \"RGB_bands\" in vars(self):\n self.RGB_bands = [\"B4\",\"B3\",\"B2\"]\n if not \"split_RGB_images\" in vars(self):\n self.split_RGB_images = True\n # in PROCESSED dir we expect RGB. NDVI, BWNDVI\n self.num_files_per_point = 3", "def parameters(self):\n pass", "def initializeParameters(self):\n\n self.params[2].value = False\n self.params[3].enabled = False\n self.params[7].value = True\n self.params[7].enabled = False\n self.params[8].value = None\n self.params[8].enabled = False", "def test_kw_args_with_defaults():\n assert arguments.fun_opt_kw_params() == ('blue', 'red', 'yellow', 'orange')", "def params():\n raise NotImplementedError", "def test_default_parameters() -> None:\n mapie = MapieClassifier()\n assert mapie.estimator is None\n assert mapie.method == \"score\"\n assert mapie.cv == \"prefit\"\n assert mapie.verbose == 0\n assert mapie.n_jobs is None", "def defaults():\n\n #dummy = FieldTemplate.dummy\n\n return None", "def default_hparams():\n hparams = DatasetBase.default_hparams()\n hparams.update({\n \"transforms\": None,\n \"processed_csv\": None,\n \"mode\": None,\n \"batch_size\": 1,\n \"shuffle\": False,\n \"shuffle_buffer_size\": 32,\n \"input_channel\": \"RGB\"\n })\n return hparams", "def test_default_parameters() -> None:\n mapie = MapieRegressor()\n assert mapie.estimator is None\n assert mapie.method == \"plus\"\n assert mapie.cv is None\n assert not mapie.ensemble\n assert mapie.verbose == 0\n assert mapie.n_jobs is None", "def rec_default(self):\n pass", "def show_defaults(context: CreateCommandsContext):\n logger.info(\"Default parameters when creating jobs:\")\n for parameter in context.settings.job_default_parameters:\n logger.info(parameter.describe())", "def defaults(self):\n self.lib.iperf_defaults(self._test)", "def setupParameters(self, **pars):\n \n seldict = {}\n for k,v in pars.items():\n if v != None and v != \"\":\n seldict[k] = v\n \n return seldict", "def _add_parameter_default(self, msg_param):\n default_types = msg_param.default_types\n while default_types: # iterate over each bit\n def_type = default_types & (~default_types+1)\n default_types ^= def_type\n def_type -= 1\n if def_type not in self._default_parameters:\n self._default_parameters[def_type] = {}\n self._default_parameters[def_type][msg_param.key] = msg_param.value", "def test_call_default_params(self):\r\n\r\n exp = {'0': ('R27DLI_4812', 'CTGGGCCGTATCTC'),\r\n 'ref1': ('ref1', 'GGGGGGGAAAAAAAAAAAAA'),\r\n '2': ('W3Cecum_4858', 'TTGGGCCGTGTCTCAGT'),\r\n 'ref0': ('ref0', 'CCCAAAAAAATTTTTT'),\r\n }\r\n app = ReferenceRepSetPicker(params={'Algorithm': 'first',\r\n 'ChoiceF': first_id})\r\n obs = app(self.tmp_seq_filepath,\r\n self.tmp_otu_filepath,\r\n self.ref_seq_filepath)\r\n self.assertEqual(obs, exp)", "def default_slc_dict():\n par = _par.ParameterFile.from_file(_os.path.dirname(__file__) + '/default_slc_par.par')\n return par", "def default_training_params():\n N_EPOCHS = 100\n BATCH_SIZE = 64\n EPSILON = 0.0001\n return N_EPOCHS, BATCH_SIZE, EPSILON", "def initialize_params(self, params):\n pass", "def post_init(cr, registry):\n from ecore import SUPERUSER_ID\n from ecore.addons.base.ir.ir_config_parameter import _default_parameters\n ICP = registry['ir.config_parameter']\n for k, func in _default_parameters.items():\n v = ICP.get_param(cr, SUPERUSER_ID, k)\n _, g = func()\n ICP.set_param(cr, SUPERUSER_ID, k, v, g)", "def set_params(self):\r\n pass", "def defaults():\r\n return fmin([], [])", "def __init__(self, default=None):\n # this will hold the list of current parameters,\n # corresponding to the reduction recipe\n self.current = []\n self.stepnames = []\n\n # this holds initial set values for each step\n self.default = {}\n if default is not None:\n for step in default:\n pdict = ParameterSet()\n for param in default[step]:\n pdict.set_param(**param)\n self.default[step] = pdict", "def __init__( self, parameters={} ):\n self.params = {}", "def __init__(self, defaults={}, data=None):\n\n super().__init__(\n defaults={**ThermodynamicsParameters.parameters, **defaults}, data=data\n )", "def test_default_encoder_parameters(self):\n cparams = openjp2.set_default_encoder_parameters()\n\n self.assertEqual(cparams.res_spec, 0)\n self.assertEqual(cparams.cblockw_init, 64)\n self.assertEqual(cparams.cblockh_init, 64)\n self.assertEqual(cparams.numresolution, 6)\n self.assertEqual(cparams.subsampling_dx, 1)\n self.assertEqual(cparams.subsampling_dy, 1)\n self.assertEqual(cparams.mode, 0)\n self.assertEqual(cparams.prog_order, glymur.core.LRCP)\n self.assertEqual(cparams.roi_shift, 0)\n self.assertEqual(cparams.cp_tx0, 0)\n self.assertEqual(cparams.cp_ty0, 0)\n\n self.assertEqual(cparams.irreversible, 0)", "def get_default_args(**kw):\n default_args_exp = {\n \"output_file\": \"ml_demo.c\",\n \"function_name\": \"ml_demo\",\n \"precision\": ML_Binary32,\n \"accuracy\": ML_Faithful,\n \"target\": GenericProcessor.get_target_instance()\n }\n default_args_exp.update(kw)\n return DefaultArgTemplate(**default_args_exp)", "def SetDefaultParams(self, *args):\n return _BRepAlgo.BRepAlgo_NormalProjection_SetDefaultParams(self, *args)", "def print_parameter(args, parser):\n print(\"Default parameter\")\n\n for para in vars(args):\n text = str(para) + \" : \"\n print(text + str(parser.get_default(para)))", "def get_base_parameters(cls):\n return {\n \"cutoff\": None,\n \"method\": None\n }", "def defaults():\n global __preset_staging\n \n t = TreeDict('Default_Parameter_Tree', __defaultpresettree__ = True)\n __preset_staging[id(t)] = t\n return t", "def f_onearg_and_default(self, arg1, default = 1) :\n pass", "def _autoInitPars(self):\n for p in self._pars:\n setattr(self,p,self.defaultparval)", "def def_paramt():\n Zeff = 1.0\n amu = 2.0\n mf = mp*amu\n return Zeff, amu,mf", "def default_raw_par():\n par = _par.ParameterFile.from_file(_os.path.dirname(__file__) + '/default_raw_parameter.raw_par')\n return par", "def get_default_configuration():\n # Pre-configured default values for various parameters:\n default_config = {\n \"name\":\"Transient\",\n \"auto\":True,\n \"ra\":0.0,\n \"dec\":0.0,\n \"radius\":10.0,\n \"resolution\":1.8,\n \"energy\":70.0,\n \"pixsize\": 16,\n \"respcode\":\"czti_Aepix.out\",\n \"txycode\":\"radec2txty.out\",\n \"resppath\":\"pixarea\",\n \"plotfile\":\"plots/localize.pdf\",\n\t \"lc_bin\":5.0,\n\t \"typ\":\"band\",\n\t \"comp_bin\":20,\t\n \"verbose\":True,\n \"do_fit\":True\n }\n required_config = {\n 'l2file':\"_level2.evt\",\n 'infile':\"file.evt\",\n 'mkffile':\"file.mkf\",\n 'trigtime':0.00,\n 'transtart':0.00,\n 'tranend':0.00,\n 'bkg1start':0.00,\n 'bkg1end':0.00,\n 'bkg2start':0.00,\n 'bkg2end':0.00,\n\t 'alpha':0.00,\n\t 'beta':0.00,\n\t 'E0':0.00,\n\t 'A':0.00\n }\n return default_config, required_config", "def get_default_arg():\n\n arg = 'cog:C_cog_space_GRP world:parts_GRP trueWorld:noXform_GRP '\n return arg", "def _set_default_parameters(self, parameters):\n parameters_dict = parameters.get_dict()\n from aiida_crystal_dft.io.f9 import Fort9\n with self.inputs.wavefunction.open(mode='rb') as f:\n file_name = f.name\n wf = Fort9(file_name)\n if 'band' in parameters_dict:\n\n # automatic generation of k-point path\n if 'bands' not in parameters_dict['band']:\n self.logger.info('Proceeding with automatic generation of k-points path')\n structure = wf.get_structure()\n shrink, points, path = get_shrink_kpoints_path(structure)\n parameters_dict['band']['shrink'] = shrink\n parameters_dict['band']['bands'] = path\n\n # automatic generation of first and last band\n if 'first' not in parameters_dict['band']:\n parameters_dict['band']['first'] = 1\n if 'last' not in parameters_dict['band']:\n parameters_dict['band']['last'] = wf.get_ao_number()\n\n if 'dos' in parameters_dict:\n # automatic generation of projections in case no projections are given\n # TODO: explicit asking for automatic projections\n if ('projections_atoms' not in parameters_dict['dos'] and\n 'projections_orbitals' not in parameters_dict['dos']):\n self.logger.info('Proceeding with automatic generation of dos atomic projections')\n parameters_dict['dos']['projections_atoms'] = get_dos_projections_atoms(wf.get_atomic_numbers())\n\n # automatic generation of first and last band\n if 'first' not in parameters_dict['dos']:\n parameters_dict['dos']['first'] = 1\n if 'last' not in parameters_dict['dos']:\n parameters_dict['dos']['last'] = wf.get_ao_number()\n return get_data_class('dict')(dict=parameters_dict)", "def help_default_values():\n click.echo_via_pager(docgen.generate_default_value_help())", "def get_default_kwargs(self, **defaults):\n return super().get_default_kwargs(**{\"port\": 22})", "def get_default_args(**kw):\n default_args_log = {\n \"output_file\": \"POLY.c\",\n \"function_name\": \"POLY\",\n \"precision\": ML_Binary64,\n \"target\": GenericProcessor.get_target_instance(),\n \"function\": None,\n \"interval\": None,\n \"epsilon\": None\n }\n default_args_log.update(kw)\n return DefaultArgTemplate(**default_args_log)", "def set_hyperparams(use_defaults):\n if use_defaults:\n n_neurons, n_hidden, n_steps, k_prob = default_hyperparams()\n return n_neurons, n_hidden, n_steps, k_prob\n\n print (\"Select number of neurons in recurrent layer (default \" +\n \"100):\")\n n_neurons = int(input())\n print (\"Select number of hidden neurons in fully connected \" +\n \"layer (default 100):\")\n n_hidden = int(input())\n print (\"Select n_steps; the max number of words to be read \" +\n \"from each abstract (default 50):\")\n n_steps = int(input())\n print (\"Select k_prob; the dropout probability (default 0.5):\")\n k_prob = float(input())\n\n return n_neurons, n_hidden, n_steps, k_prob", "def __init__(self, defaults={}, data=None):\n\n super().__init__(\n defaults={\n **EnergyParameters.parameters,\n **EnergyParameters.output,\n **defaults,\n },\n data=data,\n )", "def resolve_kwdefaults(sign: inspect.Signature) -> Dict[str, Any]:\n kwdefaults = dict() # type: Dict[str, Any]\n\n # Add to the defaults all the values that are needed by the contracts.\n for param in sign.parameters.values():\n if param.default != inspect.Parameter.empty:\n kwdefaults[param.name] = param.default\n\n return kwdefaults", "def init(self, parameters):\n pass", "def Parameters():\n\n raise NotImplementedError()", "def defaults():\n\n return {\"cr_shelter_flag_id\": S3ReusableField.dummy(\"flag_id\"),\n }" ]
[ "0.77844316", "0.74832076", "0.7425697", "0.74207234", "0.7282676", "0.72558606", "0.7254189", "0.72380584", "0.72216916", "0.7146505", "0.71138066", "0.7112786", "0.70974725", "0.7068888", "0.70080286", "0.70061696", "0.69936126", "0.698711", "0.69810283", "0.6930297", "0.69134754", "0.69134754", "0.69070345", "0.68848974", "0.6878302", "0.68423134", "0.6819727", "0.6784084", "0.67483693", "0.6735755", "0.6700215", "0.669843", "0.66959447", "0.66348827", "0.66342676", "0.6624801", "0.66139746", "0.6613575", "0.65981215", "0.6582314", "0.6581358", "0.65751165", "0.6569136", "0.6565779", "0.6560363", "0.65596586", "0.65596586", "0.65534204", "0.6539173", "0.6532545", "0.6530885", "0.6527605", "0.6518618", "0.651081", "0.6502026", "0.64815235", "0.64781475", "0.64285314", "0.64247864", "0.6424158", "0.6412892", "0.640914", "0.6387645", "0.63606554", "0.6353139", "0.6350292", "0.63441104", "0.6323749", "0.6317641", "0.63176405", "0.6310943", "0.6301455", "0.6301352", "0.6296234", "0.6293949", "0.6292805", "0.6290569", "0.62761307", "0.62556016", "0.6244174", "0.6224553", "0.6222832", "0.6218038", "0.62154543", "0.6203418", "0.6192367", "0.6183282", "0.6167447", "0.61612606", "0.6155906", "0.61510396", "0.61416703", "0.61406773", "0.6140558", "0.6133432", "0.61321396", "0.6129624", "0.61222786", "0.61186594", "0.61131585", "0.61097795" ]
0.0
-1
Elastic deformation of images as described in [Simard2003]_. .. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for Convolutional Neural Networks applied to Visual Document Analysis", in Proc. of the International Conference on Document Analysis and Recognition, 2003.
def elastic_transform_one(image, alpha, sigma, random_state=None): if random_state is None: random_state = np.random.randint(0, 999999) rng = np.random.RandomState(random_state) shape = image.shape dx = gaussian_filter((rng.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha dy = gaussian_filter((rng.rand(*shape) * 2 - 1), sigma, mode="constant", cval=0) * alpha x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1])) indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)) return map_coordinates(image, indices, order=1).reshape(shape)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_random_elastic_deformation(dummy_input):\n # Test the 2D image: H, W, C\n image, label = dummy_input(image_size=(512, 512, 3),\n label_size=(512, 512, 1))\n transform = RandomElasticDeformation()\n _image, _label = transform(image, label, elastic_deformation_orders=[3, 0])\n assert _image.shape == image.shape\n assert _image.dtype == image.dtype\n assert _label.shape == label.shape\n assert _label.dtype == label.dtype\n\n # Test the 3D image: H, W, D, C\n image, label = dummy_input(image_size=(512, 512, 20, 3),\n label_size=(512, 512, 20, 1))\n transform = RandomElasticDeformation()\n _image, _label = transform(image, label, elastic_deformation_orders=[3, 0])\n assert _image.shape == image.shape\n assert _image.dtype == image.dtype\n assert _label.shape == label.shape\n assert _label.dtype == label.dtype", "def create_elastic_indices():\n # initial values\n alpha, alpha2, sigma = 10, 10, 50\n shape = (96, 288) # same as shape of input images\n x_mesh, y_mesh = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]))\n\n # below is used once per epoch for the elastic deformation\n g_1d = signal.gaussian(300, sigma)\n kernel_deform = np.outer(g_1d, g_1d)\n dx = signal.fftconvolve(np.random.rand(*shape) * 2 - 1, kernel_deform, mode='same')\n dy = signal.fftconvolve(np.random.rand(*shape) * 2 - 1, kernel_deform, mode='same')\n dx = alpha * (dx - np.mean(dx)) / np.std(dx)\n dy = alpha2 * (dy - np.mean(dy)) / np.std(dy)\n indices_x, indices_y = x_mesh + dx, y_mesh + dy\n indices_x_clipped = np.clip(indices_x, a_min=0, a_max=shape[1] - 1)\n indices_y_clipped = np.clip(indices_y, a_min=0, a_max=shape[0] - 1)\n return indices_x_clipped, indices_y_clipped", "def pre_processing_image(img):\n\n #print(img.shape)\n # apply gamma correction and show the images\n #adjusted = adjust_gamma(img, gamma=0.65)\n\n adjusted = exposure.adjust_gamma(img, gamma=1.65)\n #print(adjusted.shape)\n\n # log transform of image\n\n logarithmic_corrected = exposure.adjust_log(adjusted, 1)\n #print(logarithmic_corrected.shape)\n\n # denoising\n #dst2 = cv2.fastNlMeansDenoisingColored(logarithmic_corrected, None, 10, 10, 7, 21)\n #print(dst2.shape)\n dst2 = logarithmic_corrected\n return dst2", "def elastic_transform(image, alpha, sigma, random_state=None):\n\n print('image : ', id(image))\n if random_state is None:\n random_state = np.random.RandomState(None)\n\n # print(random_state)\n shape = image.shape\n dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode=\"constant\", cval=0) * alpha\n dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode=\"constant\", cval=0) * alpha\n dz = np.zeros_like(dx)\n\n x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))\n\n indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1)), np.reshape(z, (-1, 1))\n distored_image = map_coordinates(image, indices, order=1, mode='nearest') # wrap,reflect, nearest\n return distored_image.reshape(image.shape)", "def elastic_transform(image, alpha_param, sigma_param, alpha_affine_param, random_state=None):\n image = np.asarray(image)\n #im_np = im_np.transpose(1,0,2) # when we go from pil to numpy array the W and L dimensions are swaped\n\n if len(image.shape) < 3: # if there is less than 3 channels (black&white) we triplicate the image\n image = np.concatenate((image[:,:, np.newaxis], image[:,:, np.newaxis], image[:,:, np.newaxis]), axis = 2)\n\n alpha = image.shape[1] * alpha_param\n sigma = image.shape[1] * sigma_param\n alpha_affine = image.shape[1] * alpha_affine_param\n\n if random_state is None:\n random_state = np.random.RandomState(None)\n\n shape = image.shape\n shape_size = shape[:2]\n\n # Random affine\n center_square = np.float32(shape_size) // 2\n square_size = min(shape_size) // 3\n pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size], center_square - square_size])\n pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)\n M = cv2.getAffineTransform(pts1, pts2)\n image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)\n\n dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha\n dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha\n dz = np.zeros_like(dx)\n\n x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))\n indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)), np.reshape(z, (-1, 1))\n\n im_elastic = map_coordinates(image, indices, order=1, mode='reflect').reshape(shape)\n\n return Image.fromarray(np.uint8(im_elastic))", "def deep_dream_of_extreme_control(FLAGS,model,input_images=[],num_iterations=10,step_size=0.1):\n if len(input_images) == 0:\n # use predefined images\n img_dir='/esat/opal/kkelchte/docker_home/pilot_data/visualization_images'\n input_images=sorted([img_dir+'/'+f for f in os.listdir(img_dir)])\n\n print(\"[tools.py]: extracting deep dream maps of {0} in {1}\".format([os.path.basename(i) for i in input_images], os.path.dirname(input_images[0])))\n \n # experts=np.asarray([[k]*(FLAGS.action_quantity if FLAGS.discrete else 1) for v in sorted(model.factor_offsets.values()) for k in model.factor_offsets.keys() if model.factor_offsets[k]==v]).flatten()\n\n inputs = load_images(input_images, model.input_size[1:])\n \n # collect gradients for output endpoint of evaluation model\n grads={}\n with tf.device('/cpu:0'):\n output_tensor = model.endpoints['eval']['outputs']\n for i in range(output_tensor.shape[1].value):\n layer_loss = output_tensor[:,i]\n gradients = tf.gradients(layer_loss, model.inputs)[0]\n gradients /= (tf.sqrt(tf.reduce_mean(tf.square(gradients))) + 1e-5)\n grads[output_tensor.name+'_'+str(i)]=gradients\n\n\n # apply gradient ascent for all outputs and each input image\n # if number of outputs ==1 apply gradient descent for contrast\n if len(grads.keys())== 1:\n opposite_results={}\n else:\n opposite_results=None\n\n import copy\n results = {}\n for gk in grads.keys(): \n results[gk]=copy.deepcopy(inputs)\n if isinstance(opposite_results,dict): opposite_results[gk]=copy.deepcopy(inputs)\n\n for step in range(num_iterations):\n if step%10==0: print \"{0} step: {1}\".format(time.ctime(), step)\n for i,gk in enumerate(sorted(grads.keys())):\n results[gk] += step_size * model.sess.run(grads[gk], {model.inputs: results[gk]})\n if isinstance(opposite_results,dict):\n opposite_results[gk] -= step_size * model.sess.run(grads[gk], {model.inputs: opposite_results[gk]})\n\n # Normalize results within 0:1 range\n clean_results={}\n for gk in results.keys():\n clean_results[gk]=[]\n for i in range(results[gk].shape[0]):\n clean_results[gk].append(deprocess_image(results[gk][i], one_channel=True))\n # results[gk][i]=deprocess_image(results[gk][i], one_channel=True)\n if isinstance(opposite_results,dict):\n opposite_results[gk][i]=deprocess_image(opposite_results[gk][i])\n\n # combine adjust input images in one overview image\n # one column for each input image\n # one row with each extreme control for separate and difference images\n num_rows=1+len(results.keys())\n fig, axes = plt.subplots(num_rows ,min(len(input_images),5),figsize=(23, 4*(len(grads.keys())+1)))\n # fig, axes = plt.subplots(num_rows ,min(len(input_images),5),figsize=(23, 4*(len(grads.keys())+1)))\n # add original images in first row\n for i in range(axes.shape[1]):\n axes[0, i].set_title(os.path.basename(input_images[i]).split('.')[0])\n axes[0, i].imshow(matplotlibprove(inputs[i]), cmap='inferno')\n axes[0, i].axis('off')\n\n # add for each filter the modified input\n row_index=1\n for gk in sorted(results.keys()):\n for i in range(axes.shape[1]):\n # print gk\n # axes[row_index, i].set_title('Grad Asc: '+gk.split('/')[1]+'/'+gk[-1]) \n axes[row_index, i].set_title('Grad Asc: '+gk)\n # axes[row_index, i].set_title(experts[row_index-1])\n\n axes[row_index, i].imshow(np.concatenate((inputs[i],np.expand_dims(clean_results[gk][i],axis=2)), axis=2), cmap='inferno')\n # axes[row_index, i].imshow(matplotlibprove(results[gk][i]), cmap='inferno')\n axes[row_index, i].axis('off')\n row_index+=1\n # In cas of continouos controls: visualize the gradient descent and difference\n # if isinstance(opposite_results,dict):\n # for gk in opposite_results.keys():\n # for i in range(axes.shape[1]):\n # # axes[row_index, i].set_title('Grad Desc: '+gk.split('/')[1]) \n # axes[row_index, i].set_title('Grad Desc: '+gk) \n # axes[row_index, i].imshow(matplotlibprove(opposite_results[gk][i]), cmap='inferno')\n # axes[row_index, i].axis('off')\n # row_index+=1\n \n # # add difference\n # for gk in opposite_results.keys():\n # for i in range(axes.shape[1]):\n # # axes[row_index, i].set_title('Diff: '+gk.split('/')[1]) \n # axes[row_index, i].set_title('Diff: '+gk) \n # axes[row_index, i].imshow(matplotlibprove(deprocess_image((opposite_results[gk][i]-results[gk][i])**2)), cmap='inferno')\n # axes[row_index, i].axis('off')\n # row_index+=1\n # else:\n # # add difference between 2 exteme actions\n # gk_left=sorted(results.keys())[0]\n # gk_right=sorted(results.keys())[-1]\n # for i in range(axes.shape[1]):\n # # axes[row_index, i].set_title('Diff : '+gk.split('/')[1]) \n # axes[row_index, i].set_title('Diff : '+gk) \n # axes[row_index, i].imshow(matplotlibprove(deprocess_image((results[gk_left][i]-results[gk_right][i])**2)), cmap='inferno')\n # axes[row_index, i].axis('off')\n # row_index+=1\n \n \n plt.savefig(FLAGS.summary_dir+FLAGS.log_tag+'/control_dream_maps.jpg',bbox_inches='tight')\n # plt.show()", "def elastic_transform(image, label):\n # Params taken from https://arxiv.org/pdf/1705.03820.pdf\n dx = dxs[np.random.randint(0, len(dxs))]\n dy = dys[np.random.randint(0, len(dys))]\n\n indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)), np.reshape(z, (-1, 1))\n\n distored_image = map_coordinates(image, indices, order=1, mode='reflect')\n distored_label = map_coordinates(np.expand_dims(label, -1), indices, order=1, mode='reflect')\n\n img, lab = distored_image.reshape(image.shape), distored_label.reshape(image.shape)[:, :, 0]\n return img, lab", "def motionDeflicker(imgs):\n b = [x[:,:,0] for x in imgs] \n g = [x[:,:,1] for x in imgs] \n r = [x[:,:,2] for x in imgs] \n b_corrected = single_deflicker(b)\n g_corrected = single_deflicker(g)\n r_corrected = single_deflicker(r)\n return cv2.merge((np.uint8(b_corrected),np.uint8(g_corrected),np.uint8(r_corrected)))", "def normalise(image):", "def mold_image(images, config):\n return images.astype(np.float32) - config.MEAN_PIXEL", "def mold_image(images, config):\n return images.astype(np.float32) - config.MEAN_PIXEL", "def elasticTransform_legacy(image, mask, sigma, alpha_affine, random_seed=None):\n\t\n\trandom_state = np.random.RandomState(random_seed)\n\t\n\tif len(image.shape)<3:\n\t\timage = np.expand_dims(image,-1)\n\tif len(mask.shape)<3:\n\t\tmask = np.expand_dims(mask,-1)\n\t\n\tshape = image.shape\n\tshape_size = shape[:2]\n\n\t# Random affine\n\tcenter_square = np.float32(shape_size) // 2\n\tsquare_size = min(shape_size) // 3\n\tpts1 = np.float32([center_square + square_size, [center_square[0] + square_size,center_square[1] - square_size], center_square - square_size])\n\tpts2 = pts1 + random_state.uniform(-alpha_affine,alpha_affine, size=pts1.shape).astype(np.float32)\n\t\n\tM = cv2.getAffineTransform(pts1, pts2)\n\t\n\timage_w = np.zeros_like(image)\n\tfor i in range(image.shape[-1]):\n\t\timage_w[...,i] = cv2.warpAffine(image[...,i], M, shape_size[::-1], borderMode=cv2.BORDER_CONSTANT, borderValue=int(np.min(image[...,i])))\n\t\n\tmask_w = np.zeros_like(mask)\n\tfor i in range(mask.shape[-1]):\n\t\tmask_w[...,i] = cv2.warpAffine(mask[...,i] , M, shape_size[::-1], borderMode=cv2.BORDER_REPLICATE)\n\n\tblur_size = int(2*sigma) | 1\n\tdx = cv2.GaussianBlur((random_state.rand(*shape) * 2 - 1), ksize=(blur_size, blur_size), sigmaX=sigma)\n\tdy = cv2.GaussianBlur((random_state.rand(*shape) * 2 - 1), ksize=(blur_size, blur_size), sigmaX=sigma)\n\t\n\tif len(dx.shape) < 3:\n\t\tdx = np.expand_dims(dx,-1)\n\t\tdy = np.expand_dims(dy,-1)\n\n\tgx, gy = np.meshgrid(np.arange(shape_size[1]), np.arange(shape_size[0]))\n\tgx = np.expand_dims(gx,-1)\n\tgy = np.expand_dims(gy,-1)\n\n\tgx = np.repeat(gx,dx.shape[-1], -1)\n\tgy = np.repeat(gy,dy.shape[-1], -1)\n\n\tgx = (gx + dx).astype(np.float32)\n\tgy = (gy + dy).astype(np.float32)\n\n\timage_d = np.zeros_like(image_w)\n\tmask_d = np.zeros_like(mask_w)\n\n\tfor i in range(image.shape[-1]):\n\t\timage_d[...,i] = cv2.remap(image_w[...,i], gx[...,i], gy[...,i], interpolation=cv2.INTER_LINEAR)\n\t\n\tradix = gx.shape[-1]\n\tfor i in range(mask.shape[-1]):\n\t\tmask_d[...,i] = cv2.remap(mask_w[...,i], gx[...,i%radix], gy[...,i%radix], interpolation=cv2.INTER_LINEAR)\n\t\n\treturn image_d, mask_d", "def forward(self, images):\n # assuming that the precomputed features are not already l2-normalized\n #x = l2norm(images.view( images.size(0), -1))\n #print(images.shape, self.fc )\n resnet_feat=torch.empty(len(images),2048)\n vse_feat=torch.empty(len(images),256)\n\n for idx, feat_concat in enumerate(images):\n #print(\"check\", feat_concat[:2048].shape, feat_concat[2048:].shape)\n resnet_feat[idx,:] = feat_concat[:2048]\n vse_feat[idx,:] = feat_concat[2048:]\n x1 = self.fc1(resnet_feat.cuda())\n\n x2 = self.relu(self.fc2(vse_feat.cuda()))\n x2 = self.relu(self.fc3(x2))\n x2 = self.scale(self.fc4(x2))\n #print(x2.shape, x1.shape, x1[:,:5000].shape)\n x = x1[:,:5000]+x2\n \n\n return x", "def colorize_images(self, img):\n self.load_model()\n self.mdn.eval()\n self.vae.eval()\n n, _, _ = img.shape\n img = img.astype(np.float32) / 255\n img = torch.tensor(img, dtype=torch.float, device=self.device).unsqueeze(1)\n with torch.no_grad():\n z = self.mdn(img)\n ab_out = self.vae.decode(z)\n lab_out = torch.cat((img, ab_out), dim=1)\n lab_out = self.unnormalize(lab_out).cpu().numpy()\n lab_out = np.transpose(lab_out, (0, 2, 3, 1)).astype(np.uint8)\n for i in range(n):\n color_out = cv2.cvtColor(lab_out[i], cv2.COLOR_LAB2BGR)\n color_out = cv2.resize(color_out, (96, 96), interpolation=cv2.INTER_AREA)\n cv2.imwrite(\"../datasets/stl10/divcolor/{}.png\".format(str(i)), color_out)\n return", "def elastic_transform(\n img: np.ndarray,\n alpha: float,\n sigma: float,\n alpha_affine: float,\n interpolation: int = cv2.INTER_LINEAR,\n border_mode: int = cv2.BORDER_REFLECT_101,\n value: Optional[ImageColorType] = None,\n random_state: Optional[np.random.RandomState] = None,\n approximate: bool = False,\n same_dxdy: bool = False,\n):\n height, width = img.shape[:2]\n\n # Random affine\n center_square = np.array((height, width), dtype=np.float32) // 2\n square_size = min((height, width)) // 3\n alpha = float(alpha)\n sigma = float(sigma)\n alpha_affine = float(alpha_affine)\n\n pts1 = np.array(\n [\n center_square + square_size,\n [center_square[0] + square_size, center_square[1] - square_size],\n center_square - square_size,\n ],\n dtype=np.float32,\n )\n pts2 = pts1 + random_utils.uniform(-alpha_affine, alpha_affine, size=pts1.shape, random_state=random_state).astype(\n np.float32\n )\n matrix = cv2.getAffineTransform(pts1, pts2)\n\n warp_fn = _maybe_process_in_chunks(\n cv2.warpAffine, M=matrix, dsize=(width, height), flags=interpolation, borderMode=border_mode, borderValue=value\n )\n img = warp_fn(img)\n\n if approximate:\n # Approximate computation smooth displacement map with a large enough kernel.\n # On large images (512+) this is approximately 2X times faster\n dx = random_utils.rand(height, width, random_state=random_state).astype(np.float32) * 2 - 1\n cv2.GaussianBlur(dx, (17, 17), sigma, dst=dx)\n dx *= alpha\n if same_dxdy:\n # Speed up even more\n dy = dx\n else:\n dy = random_utils.rand(height, width, random_state=random_state).astype(np.float32) * 2 - 1\n cv2.GaussianBlur(dy, (17, 17), sigma, dst=dy)\n dy *= alpha\n else:\n dx = np.float32(\n gaussian_filter((random_utils.rand(height, width, random_state=random_state) * 2 - 1), sigma) * alpha\n )\n if same_dxdy:\n # Speed up\n dy = dx\n else:\n dy = np.float32(\n gaussian_filter((random_utils.rand(height, width, random_state=random_state) * 2 - 1), sigma) * alpha\n )\n\n x, y = np.meshgrid(np.arange(width), np.arange(height))\n\n map_x = np.float32(x + dx)\n map_y = np.float32(y + dy)\n\n remap_fn = _maybe_process_in_chunks(\n cv2.remap, map1=map_x, map2=map_y, interpolation=interpolation, borderMode=border_mode, borderValue=value\n )\n return remap_fn(img)", "def make_e(self):\n self.img[1, 2:-1] = 1\n self.img[self.l_i / 2, 2:-1] = 1\n self.img[-2, 2:-1] = 1\n self.img[1:-1, 2] = 1\n self.img_name = 'E'", "def _normalize_images(self, images: th.Tensor) -> th.Tensor:\n output = ((images+2)/4 - self._norm_mean)/self._norm_std\n return output", "def forward(self, images):\n # assuming that the precomputed features are not already l2-normalized\n #x = l2norm(images.view( images.size(0), -1))\n #print(images.shape, self.fc )\n resnet_feat=torch.empty(len(images),2048)\n vse_feat=torch.empty(len(images),256)\n\n for idx, feat_concat in enumerate(images):\n #print(\"check\", feat_concat[:2048].shape, feat_concat[2048:].shape)\n #resnet_feat[idx,:] = feat_concat[:2048]\n vse_feat[idx,:] = feat_concat[2048:]\n x = self.relu(self.fc1(vse_feat.cuda()))\n x = self.relu(self.fc2(x))\n x = self.fc3(x)\n\n return x", "def erode(img, kernel = (5,5), iterations = 1):\n\ttmp = grayscale(img)\n\tk = np.ones(kernel, np.uint8)\n\terosion = cv2.erode(tmp, k, iterations= iterations)\n\treturn erosion", "def mold_image(image, config=None):\n if np.max(image) <= 1 and np.min(image) >= 0:\n image[:,:,:3] = image[:,:,:3]*2.0 - 1.0\n elif np.min(image) >= 0:\n image[:, :, :3] = image[:, :, :3] * (1.0/127.5) - 1.0\n return image.astype(np.float32)", "def forward(self, images):\n # assuming that the precomputed features are not already l2-normalized\n x = l2norm(images.view( images.size(0), -1))\n #print(images.shape, self.fc )\n x = self.relu(self.fc1(x))\n x = self.fc2(x)\n #x = F.log_softmax(x) #no need of log softmax here if we use cross entropy as loss\n #x = self.softmax(x)\n # normalize in the joint embedding space\n \n\n return x", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def preprocess_image(img):\n return (img.astype(np.float32)/255.0 - FACENET_MEAN) / FACENET_STD", "def _image_deviation(params):\n # generate the droplet\n data_flat[free] = params\n droplet.data = unstructured_to_structured(data_flat, dtype=dtype)\n droplet.check_data()\n img = droplet._get_phase_field(phase_field.grid)[mask]\n return img - data_mask", "def energy(image):\n #create sobel operators for gradient calculation\n sobel_x = np.array([\n [-1, 0, 1],\n [-2, 0, 2],\n [-1, 0, 1]])\n sobel_y = np.array([\n [-1, -2, -1],\n [0, 0, 0],\n [1, 2, 1]])\n #calculate gradients\n gradients_x = derive(image, sobel_x)\n gradients_y = derive(image, sobel_y)\n #calculate energy as defined in Avidan et al.\n energy_result = np.abs(gradients_x) + np.abs(gradients_y)\n return energy_result", "def elastic_transform(image, alpha, sigma, random_state=None):\n assert len(image.shape)==2\n\n if random_state is None:\n random_state = np.random.RandomState(None)\n\n shape = image.shape\n\n dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode=\"constant\", cval=0) * alpha\n dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode=\"constant\", cval=0) * alpha\n\n x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')\n indices = np.reshape(x+dx, (-1, 1)), np.reshape(y+dy, (-1, 1))\n \n return map_coordinates(image, indices, order=1).reshape(shape)", "def nature_cnn(unscaled_images):\n scaled_images = tf.cast(unscaled_images, tf.float32) / 255.\n activ = tf.nn.relu\n h = activ(conv(scaled_images, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2)))\n h2 = activ(conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2)))\n h3 = activ(conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2)))\n h3 = conv_to_fc(h3)\n return activ(fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2)))", "def network_process(images, exposures, gains):\n\t\t\n\t# Getting the images\n\timage1 = images[2]\n\timage2 = images[1]\n\timage3 = images[0]\n\t\n\t# Resizing\n\timage1 = cv2.resize(image1, (image_width, image_height))\n\timage2 = cv2.resize(image2, (image_width, image_height))\n\timage3 = cv2.resize(image3, (image_width, image_height))\n\n\t# Convert images to tensors for pytorch\n\timage1 = tensor(image1)\n\timage2 = tensor(image2)\n\timage3 = tensor(image3)\n\n\t# print(\"Image 1: \", image1)\n\n\t# Get exposure values\n\texposure1 = exposures[2]\n\texposure2 = exposures[1]\n\texposure3 = exposures[0]\n\n\tgain1 = gains[2]\n\tgain2 = gains[1]\n\tgain3 = gains[0]\n\n\t# Setting 75us and 30ms as the lower and upper limits of exposure respectively\n\tadjusted_exp1 = ((exposure1-75)/(30000-75))*(1)\n\tadjusted_exp2 = ((exposure2-75)/(30000-75))*(1)\n\tadjusted_exp3 = ((exposure3-75)/(30000-75))*(1)\n\n\t# Setting 0 dB and 30 dB as the lower and upper limits of gain respectively\n\tadjusted_gain1 = (gain1/30)*(1)\n\tadjusted_gain2 = (gain2/30)*(1)\n\tadjusted_gain3 = (gain3/30)*(1)\n\n\t# Create a single channel for the exposure and gain values\n\tones = torch.ones((1, image_height, image_width))\n\texposure_1 = ones*adjusted_exp1\n\texposure_2 = ones*adjusted_exp2\n\texposure_3 = ones*adjusted_exp3\n\n\tgain_1 = ones*adjusted_gain1\n\tgain_2 = ones*adjusted_gain2\n\tgain_3 = ones*adjusted_gain3\n\n\tparameter_1 = torch.cat((exposure_1, gain_1), 0)\n\tparameter_2 = torch.cat((exposure_2, gain_2), 0)\n\tparameter_3 = torch.cat((exposure_3, gain_3), 0)\n\n\tnew_img1 = torch.cat((image1, parameter_1), 0)\n\tnew_img2 = torch.cat((image2, parameter_2), 0)\n\tnew_img3 = torch.cat((image3, parameter_3), 0)\n\n\tim1_im2 = torch.cat((new_img1, new_img2), 0)\n\tcombined_images = torch.cat((im1_im2, new_img3),0)\n\tfinal_images = combined_images.unsqueeze(0).to(device)\n\n\toutput = model.forward(final_images)\n\t\n\toutput_exposure = int(((output[:,0]-0)*29925)/1 + 75)\n\toutput_gain = float(((output[:,1]-0)*30)/1 + 0)\n\n\treturn output_exposure, output_gain", "def explain(self):\n # build the 2 versions of the model\n model = self.build_model()\n last_conv_model = self.build_cut_model()\n\n for i, label_name in enumerate(self.label_names):\n # This is the algorithm for the last convolution layer's tensor image\n # Get the index of the image that was classified correctly with the most confidence for the class\n predicted_col_proba = np.array(self.predicted_labels)[0][:, i]\n predicted_col_argsort = predicted_col_proba.argsort()[::-1]\n predicted_col = (predicted_col_proba > 0.2).astype(int)\n true_col = self.true_labels[:, 0]\n\n representative_image_index = None\n for most_probable_arg_index in predicted_col_argsort:\n if predicted_col[most_probable_arg_index] == true_col[most_probable_arg_index]:\n representative_image_index = most_probable_arg_index\n break\n\n # Resize the image to fit the neural network and keep the original resized image\n original_img = io.imread('{}/{}/{}'.format(path_to_img_directory, self.ex_format, np.array(self.image_names)[representative_image_index]))\n original_img = cv2.normalize(original_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n original_img = cv2.resize(original_img, dsize=(self.ex_input_size, self.ex_input_size), interpolation=cv2.INTER_CUBIC)\n img = np.expand_dims(original_img, axis=0)\n original_img = original_img[:, :, :3]\n\n # Get the output of the neural network for this image as a tensor\n model.predict(np.array(img))\n class_output = model.output[:, i]\n last_conv_layer = model.get_layer(self.ex_last_conv_layer_name1).output\n # if self.model_name == 'vit':\n # last_conv_layer = tf.nn.relu(tf.reshape(last_conv_layer[:, :256, :], (-1, 16, 16, 1024)))\n\n # Get the output for the cut model\n cut_img = last_conv_model.predict(np.array(img))[0]\n if self.model_name == 'vit':\n cut_img = np.reshape(cut_img[:256, :], (16, 16, 1024))\n cut_img = np.mean(cut_img, axis=-1)\n cut_img = cv2.normalize(cut_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n if self.model_name == 'vit':\n cut_img[0, 0] = np.mean(cut_img)\n cut_img = cv2.normalize(cut_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n cut_img = cv2.resize(cut_img, (self.ex_input_size, self.ex_input_size))\n\n # This is the algorithm of the Grad-CAM model\n # Refine the output of the last convolutional layer according to the class output\n grads = K.gradients(class_output, last_conv_layer)[0]\n if self.model_name == 'vit':\n last_conv_layer = tf.reshape(last_conv_layer[:, :256, :], (-1, 16, 16, 1024))\n last_conv_layer = last_conv_layer / tf.norm(last_conv_layer)\n\n grads = tf.reshape(grads[:, :256, :], (-1, 16, 16, 1024))\n grads = grads / tf.norm(grads)\n\n pooled_grads = K.mean(grads, axis=(0, 1, 2))\n iterate = K.function([model.input], [pooled_grads, last_conv_layer[0]])\n pooled_grads_value, conv_layer_output_value = iterate([img])\n for j in range(self.ex_last_conv_layer_filter_number):\n conv_layer_output_value[:, :, j] *= pooled_grads_value[j]\n\n # Create a 16x16 heatmap and scale it to the same size as the original image\n heatmap = np.mean(conv_layer_output_value, axis=-1)\n heatmap = np.maximum(heatmap, 0)\n heatmap /= np.max(heatmap)\n heatmap = cv2.resize(heatmap, (self.ex_input_size, self.ex_input_size))\n heatmap = np.uint8(255 * heatmap)\n heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)\n heatmap = cv2.normalize(heatmap, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n superimposed_img = cv2.addWeighted(original_img, 0.7, heatmap, 0.4, 0)\n\n # save the original image\n plt.matshow(original_img)\n plt.axis('off')\n plt.title(label_name, fontdict={'fontsize': 18})\n plt.savefig('{}/{}/{}_{}.png'.format(path_to_explainable, 'original', self.model_name, label_name), bbox_inches='tight', pad_inches=0.1)\n\n # save the cut image\n plt.matshow(cut_img, cmap=plt.get_cmap('Spectral'))\n plt.colorbar(shrink=0.75, ticks=np.linspace(0, 1, 11).tolist())\n plt.axis('off')\n plt.title(label_name, fontdict={'fontsize': 18})\n plt.savefig('{}/{}/{}_{}.png'.format(path_to_explainable, 'cut', self.model_name, label_name), bbox_inches='tight', pad_inches=0.1)\n\n # save the superimposed gradcam image\n plt.matshow(superimposed_img, cmap=plt.get_cmap('Spectral'))\n plt.colorbar(shrink=0.75, ticks=np.linspace(0, 1, 11).tolist())\n plt.axis('off')\n plt.title(label_name, fontdict={'fontsize': 18})\n plt.savefig('{}/{}/{}_{}.png'.format(path_to_explainable, 'gradcam', self.model_name, label_name), bbox_inches='tight', pad_inches=0.1)", "def forward(self, images, text=None, mask=None):\n b, c, fh, fw = images.shape\n images = self.patch_embedding(images) # b,d,gh,gw\n image_patchified = images.flatten(2).transpose(1, 2) # b,gh*gw,d\n #image_patchified = einops.rearrange(x, 'b d gh gw -> b (gh gw) d')\n \n if hasattr(self, 'class_token'):\n x = torch.cat((self.class_token.expand(b, -1, -1), image_patchified), dim=1) # b,gh*gw+1,d\n if hasattr(self, 'positional_embedding'): \n x = self.positional_embedding(x) # b,gh*gw+1,d\n\n # concatenate text to images\n if hasattr(self, 'text_embeddings'):\n text = self.text_embeddings(text) #b, max_text_seq_len > b, max_text_seq_len, d\n x = torch.cat((x, text), dim=1) #b, gh*gw+1+max_text_seq_len,d\n \n if self.ret_interm_repr and self.ret_attn_scores:\n x, interm_repr, scores = self.transformer(x, mask)\n elif self.ret_interm_repr:\n x, interm_repr = self.transformer(x, mask)\n elif self.ret_attn_scores:\n x, scores = self.transformer(x, mask) # b,gh*gw+1,d\n else:\n x = self.transformer(x, mask)\n \n if hasattr(self, 'pre_logits'):\n x = self.pre_logits(x) # b,d\n x = torch.tanh(x) # b,d\n \n if hasattr(self, 'fc'):\n x = self.norm(x)[:, 0] # b,d\n x = self.fc(x) # b,num_classes\n \n if self.ret_image_patchified and self.ret_interm_repr and self.ret_attn_scores:\n return x, interm_repr, scores, image_patchified\n \n elif self.ret_interm_repr and self.ret_attn_scores:\n return x, interm_repr, scores\n elif self.ret_interm_repr and self.ret_image_patchified:\n return x, interm_repr, image_patchified\n elif self.ret_image_patchified and self.ret_attn_scores:\n return x, scores, image_patchified\n \n elif self.ret_interm_repr:\n return x, interm_repr\n elif self.ret_image_patchified:\n return x, image_patchified\n elif self.ret_attn_scores:\n return x, scores\n \n else:\n return x", "def erode(image, kernel_size=(5, 5)):\n kernel = np.ones(kernel_size, np.uint8)\n image = cv2.erode(image, kernel)\n return image", "def erode(image, kernel_size=(5, 5)):\n kernel = np.ones(kernel_size, np.uint8)\n image = cv2.erode(image, kernel)\n return image", "def elastic_transform(image, alpha, sigma, alpha_affine, random_state=None):\n if random_state is None:\n random_state = np.random.RandomState(None)\n\n shape = image.shape\n shape_size = shape[:2]\n \n # Random affine\n center_square = np.float32(shape_size) // 2\n square_size = min(shape_size) // 3\n pts1 = np.float32([center_square + square_size, [center_square[0]+square_size, center_square[1]-square_size], center_square - square_size])\n pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)\n M = cv2.getAffineTransform(pts1, pts2)\n image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)\n\n dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha\n dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha\n dz = np.zeros_like(dx)\n\n x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))\n indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1)), np.reshape(z, (-1, 1))\n\n return map_coordinates(image, indices, order=1, mode='reflect').reshape(shape)", "def denormalize(img, means, stds, resize_to_original=False):\n\n img = np.moveaxis(img, 0, 2)\n img = img*stds + means\n img = np.clip(img, 0, 255).astype('uint8')\n\n if resize_to_original:\n # revert def preprocess_image()\n img = img[:,(img_w//4): (img_w - img_w//4),:]\n img = cv2.copyMakeBorder( img, img.shape[0], 0,0,0, cv2.BORDER_CONSTANT) #, borderType)\n img = cv2.resize(img, (img_orig_w, img_orig_h))\n \n return img", "def elastic_transform(image, alpha, sigma, random_state=None):\n assert len(image.shape)==2\n\n if random_state is None:\n random_state = np.random.RandomState(None)\n\n shape = image.shape\n\n dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode=\"constant\", cval=0) * alpha\n dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode=\"constant\", cval=0) * alpha\n\n x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')\n indices = np.reshape(x+dx, (-1, 1)), np.reshape(y+dy, (-1, 1))\n\n return map_coordinates(image, indices, order=1).reshape(shape)", "def reconstruct_image(self, net, images, name):\n target_img_grid = torchvision.utils.make_grid(images)\n images = images.to(self.device)\n output = net(images)\n img_grid = torchvision.utils.make_grid(output.cpu().data)\n if self.writer is not None:\n self.writer.add_image(f'{name} Reconstruct Image', target_img_grid, self.ae_epoch)\n self.writer.add_image('Target Image', img_grid, self.ae_epoch)", "def elastic_transform(image, alpha, sigma, random_state=None):\n if random_state is None:\n random_state = np.random.RandomState(None)\n\n shape = image.shape\n dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode=\"constant\", cval=0) * alpha\n dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode=\"constant\", cval=0) * alpha\n\n x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]))\n indices = np.reshape(y+dy, (-1, 1)), np.reshape(x+dx, (-1, 1))\n\n return map_coordinates(image, indices, order=1).reshape(shape)", "def normalization(imgs):\n\n imgs = np.asarray(imgs).astype(np.float32)\n imgs = np.expand_dims(imgs / 255, axis=-1)\n return imgs", "def processImage(imgs):\r\n imgs = imgs.astype(np.float32)\r\n for i, img in enumerate(imgs):\r\n m = img.mean()\r\n s = img.std()\r\n imgs[i] = (img - m) / s\r\n return imgs", "def get_image(images,n,normproc=(\"normalize.edgemean\",{})):\n\tif isinstance(images[0],EMData) : ret=images[n].copy()\n\telif n>len(images)-2 : raise Exception, \"get_image() outside range\"\n\telse: ret=EMData(images[0],images[n+1])\n\n\tif normproc!=None : ret.process_inplace(normproc[0],normproc[1])\n\n\treturn ret", "def demo(image, name):\n\n # Log.set_log_max_depth(5)\n\n image = normalise(image.astype(np.float32))\n # noisy = add_noise(image, intensity=None, variance=0.1, sap=0, clip=False)\n noisy = random_noise(image, mode=\"gaussian\", var=0.1, seed=0, clip=False)\n noisier = random_noise(noisy, mode=\"gaussian\", var=0.1, seed=100, clip=False)\n\n generator = StandardFeatureGenerator(\n include_corner_features=True,\n include_scale_one=True,\n include_fine_features=True,\n include_spatial_features=True,\n )\n regressor = CBRegressor(\n patience=16, loss='l1', learning_rate=0.002, max_num_estimators=4096\n )\n\n it = ImageTranslatorFGR(feature_generator=generator, regressor=regressor)\n\n it.train(noisy, noisy, jinv=True)\n n2s_denoised = it.translate(noisy)\n\n it.exclude_center_feature = False\n it.train(noisier, noisy, jinv=False)\n denoised = it.translate(noisy)\n denoised_corrected = 2 * denoised - noisy\n\n # denoised2 = it.translate(it.translate(it.translate(denoised)))\n denoised2 = it.translate(denoised)\n\n image = numpy.clip(image, 0, 1)\n noisy = numpy.clip(noisy, 0, 1)\n n2s_denoised = numpy.clip(n2s_denoised, 0, 1)\n denoised_corrected = numpy.clip(denoised_corrected, 0, 1)\n denoised2 = numpy.clip(denoised2, 0, 1)\n\n psnr_noisy = psnr(image, noisy)\n ssim_noisy = ssim(image, noisy)\n\n psnr_n2s_denoised = psnr(image, n2s_denoised)\n ssim_n2s_denoised = ssim(image, n2s_denoised)\n\n psnr_denoised = psnr(image, denoised)\n ssim_denoised = ssim(image, denoised)\n\n psnr_denoised_corrected = psnr(image, denoised_corrected)\n ssim_denoised_corrected = ssim(image, denoised_corrected)\n\n psnr_denoised2 = psnr(image, denoised2)\n ssim_denoised2 = ssim(image, denoised2)\n\n print(\"noisy :\", psnr_noisy, ssim_noisy)\n print(\n \"denoised (classic_denoisers) :\",\n psnr_n2s_denoised,\n ssim_n2s_denoised,\n )\n print(\"denoised (noiser2noise) :\", psnr_denoised, ssim_denoised)\n print(\n \"denoised (noiser2noise corrected) :\",\n psnr_denoised_corrected,\n ssim_denoised_corrected,\n )\n print(\"denoised (x2) :\", psnr_denoised2, ssim_denoised2)\n\n Log.enable_output = False\n denoised_images = []\n for i in range(1, 32):\n psnr_denoised = psnr(image, numpy.clip(denoised, 0, 1))\n ssim_denoised = ssim(image, numpy.clip(denoised, 0, 1))\n psnr_sslos = psnr(numpy.clip(n2s_denoised, 0, 1), numpy.clip(denoised, 0, 1))\n ssim_sslos = ssim(numpy.clip(n2s_denoised, 0, 1), numpy.clip(denoised, 0, 1))\n print(f\"x{i} :\", psnr_sslos, ssim_sslos, psnr_denoised, ssim_denoised)\n\n denoised_images.append(numpy.clip(denoised, 0, 1))\n denoised = it.translate(denoised)\n\n import napari\n\n with napari.gui_qt():\n viewer = napari.Viewer()\n viewer.add_image(image, name='image')\n viewer.add_image(noisy, name='noisy')\n viewer.add_image(noisier, name='noisier')\n viewer.add_image(n2s_denoised, name='denoised (classic_denoisers)')\n viewer.add_image(denoised, name='denoised (noiser3noise)')\n viewer.add_image(denoised_corrected, name='denoised (noiser3noise corrected)')\n viewer.add_image(numpy.stack(denoised_images), name=f'denoised images')", "def EST_NOISE(images):\n num = images.shape[0]\n m_e_bar = sum(images)/num\n m_sigma = np.sqrt(sum((images - m_e_bar)**2)/(num - 1))\n \n return m_sigma", "def instance_norm(images, epsilon=1e-5, name='instance_norm'):\n means = tf.reduce_mean(images, axis=[1, 2], keep_dims=True)\n stddevs = tf.sqrt(tf.reduce_mean(tf.square(images - means), axis=[1, 2], keep_dims=True))\n results = (images - means) / (stddevs + epsilon)\n with tf.variable_scope(None, default_name=name):\n biases = tf.get_variable('biases', shape=images.get_shape()[-1].value, dtype=images.dtype,\n initializer=tf.zeros_initializer())\n scales = tf.get_variable('scales', shape=images.get_shape()[-1].value, dtype=images.dtype,\n initializer=tf.ones_initializer())\n return results*scales + biases", "def torchxrayvision_normalize(img, maxval=255, reshape=False):\n \n if img.max() > maxval:\n raise Exception(\"max image value ({}) higher than expected bound ({}).\".format(img.max(), maxval))\n \n img = (2 * (img.astype(np.float32) / maxval) - 1.) * 1024\n\n if reshape:\n # Check that images are 2D arrays\n if len(img.shape) > 2:\n img = img[:, :, 0]\n if len(img.shape) < 2:\n print(\"error, dimension lower than 2 for image\")\n\n # add color channel\n img = img[None, :, :] \n \n return img", "def normalize_image(self):\n # The image normalization is identical to Cloud TPU ResNet.\n self._image = tf.image.convert_image_dtype(self._image, dtype=tf.float32)\n offset = tf.constant(DATASET_MEAN)\n offset = tf.expand_dims(offset, axis=0)\n offset = tf.expand_dims(offset, axis=0)\n self._image -= offset\n\n scale = tf.constant(DATASET_VAR)\n scale = tf.expand_dims(scale, axis=0)\n scale = tf.expand_dims(scale, axis=0)\n self._image /= scale", "def _inexact_alm_l1(imgflt_stack,options):\n # Get basic image information and reshape input\n img_width = imgflt_stack.shape[0]\n img_height = imgflt_stack.shape[1]\n img_size = img_width* img_height\n img_3d = imgflt_stack.shape[2]\n imgflt_stack = np.reshape(imgflt_stack,(img_size, img_3d))\n options['weight'] = np.reshape(options['weight'],imgflt_stack.shape)\n\n # Matrix normalization factor\n temp = np.linalg.svd(imgflt_stack,full_matrices=False,compute_uv=False)\n norm_two = np.float64(temp[0])\n del temp\n\n # A is a low rank matrix that is being solved for\n A = np.zeros(imgflt_stack.shape,dtype=np.float64)\n A_coeff = np.ones((1, img_3d),dtype=np.float64) # per image scaling coefficient, accounts for things like photobleaching\n A_offset = np.zeros((img_size,1),dtype=np.float64) # offset per pixel across all images\n\n # E1 is the additive error. Since the goal is determining the background signal, this is the real signal at each pixel\n E1 = np.zeros(imgflt_stack.shape,dtype=np.float64)\n\n # Normalization factors\n ent1 = np.float64(1) # flatfield normalization\n ent2 = np.float64(10) # darkfield normalization\n\n # Weights\n weight_upd = _dct2(np.mean(np.reshape(A,(img_width, img_height, img_3d)),2))\n\n # Initialize gradient and weight normalization factors\n Y1 = np.float64(0)\n mu = np.float64(12.5)/norm_two\n mu_bar = mu * 10**7\n rho = np.float64(1.5)\n\n # Frobenius norm\n d_norm = np.linalg.norm(imgflt_stack,'fro')\n\n # Darkfield upper limit and offset\n B1_uplimit = np.min(imgflt_stack)\n B1_offset = np.float64(0)\n\n # Perform optimization\n iternum = 0\n converged = False\n while not converged:\n iternum += 1\n\n # Calculate the flatfield using existing weights, coefficients, and offsets\n W_idct_hat = _idct2(weight_upd)\n A = np.matmul(np.reshape(W_idct_hat,(img_size,1)),A_coeff) + A_offset\n temp_W = np.divide(imgflt_stack - A - E1 + np.multiply(1/mu,Y1),ent1)\n\n # Update the weights\n temp_W = np.reshape(temp_W,(img_width, img_height, img_3d))\n temp_W = np.mean(temp_W,2)\n weight_upd = weight_upd + _dct2(temp_W)\n weight_upd = np.max(np.reshape(weight_upd - options['lambda']/(ent1*mu),(img_width, img_height,1)),-1,initial=0) + np.min(np.reshape(weight_upd + options['lambda']/(ent1*mu),(img_width, img_height,1)),-1,initial=0)\n W_idct_hat = _idct2(weight_upd)\n\n # Calculate the flatfield using updated weights\n A = np.matmul(np.reshape(W_idct_hat,(img_size,1)),A_coeff) + A_offset\n\n # Determine the error\n E1 = E1 + np.divide(imgflt_stack - A - E1 + np.multiply(1/mu,Y1),ent1)\n E1 = np.max(np.reshape(E1 - options['weight']/(ent1*mu),(img_size, img_3d,1)),-1,initial=0) + np.min(np.reshape(E1 + options['weight']/(ent1*mu),(img_size, img_3d,1)),-1,initial=0)\n\n # Calculate the flatfield coefficients by subtracting the errors from the original data\n R1 = imgflt_stack-E1\n A_coeff = np.reshape(np.mean(R1,0)/np.mean(R1),(1, img_3d))\n A_coeff[A_coeff<0] = 0 # pixel values should never be negative\n\n # Calculate the darkfield component if specified by the user\n if options['darkfield']:\n # Get images with predominantly background pixels\n validA1coeff_idx = np.argwhere(A_coeff<1)[:,1]\n R1_upper = R1[np.argwhere(np.reshape(W_idct_hat,(-1,1)).astype(np.float64)>(np.float64(np.mean(W_idct_hat))-np.float64(10**-5)))[:,0],:]\n R1_upper = np.mean(R1_upper[:,validA1coeff_idx],0)\n R1_lower = R1[np.argwhere(np.reshape(W_idct_hat,(-1,1))<np.mean(W_idct_hat)+np.float64(10**-5))[:,0],:]\n R1_lower = np.mean(R1_lower[:,validA1coeff_idx],0)\n B1_coeff = (R1_upper-R1_lower)/np.mean(R1)\n k = validA1coeff_idx.size\n\n # Calculate the darkfield offset\n temp1 = np.sum(np.square(A_coeff[0,validA1coeff_idx]))\n temp2 = np.sum(A_coeff[0,validA1coeff_idx])\n temp3 = np.sum(B1_coeff)\n temp4 = np.sum(A_coeff[0,validA1coeff_idx]*B1_coeff)\n temp5 = temp2 * temp3 - k*temp4\n if temp5 == 0:\n B1_offset = np.float64(0)\n else:\n B1_offset = (temp1*temp3-temp2*temp4)/temp5\n B1_offset = np.max(B1_offset,initial=0)\n B1_offset = np.min(B1_offset,initial=B1_uplimit/(np.mean(W_idct_hat)+10**-7))\n B_offset = B1_offset * np.mean(W_idct_hat) - B1_offset*np.reshape(W_idct_hat,(-1,1))\n\n # Calculate darkfield\n A1_offset = np.reshape(np.mean(R1[:,validA1coeff_idx],1),(-1,1)) - np.mean(A_coeff[0,validA1coeff_idx]) * np.reshape(W_idct_hat,(-1,1))\n A1_offset = A1_offset - np.mean(A1_offset)\n A_offset = A1_offset - np.mean(A1_offset) - B_offset\n\n # Update darkfield weights\n W_offset = _dct2(np.reshape(A_offset,(img_width, img_height)))\n W_offset = np.max(np.reshape(W_offset - options['lambda_darkfield']/(ent2*mu),(img_width, img_height,1)),-1,initial=0) \\\n + np.min(np.reshape(W_offset + options['lambda_darkfield']/(ent2*mu),(img_width, img_height,1)),-1,initial=0)\n\n # Calculate darkfield based on updated weights\n A_offset = _idct2(W_offset)\n A_offset = np.reshape(A_offset,(-1,1))\n A_offset = np.max(np.reshape(A_offset - options['lambda_darkfield']/(ent2*mu),(A_offset.shape[0],A_offset.shape[1],1)),-1,initial=0) \\\n + np.min(np.reshape(A_offset + options['lambda_darkfield']/(ent2*mu),(A_offset.shape[0],A_offset.shape[1],1)),-1,initial=0)\n A_offset = A_offset + B_offset\n\n # Loss\n Z1 = imgflt_stack - A - E1\n\n # Update weight regularization term\n Y1 = Y1 + mu*Z1\n\n # Update learning rate\n mu = np.min(mu*rho,initial=mu_bar)\n\n # Stop if loss is below threshold\n stopCriterion = np.linalg.norm(Z1,ord='fro')/d_norm\n if stopCriterion < options['optimization_tol'] or iternum > options['max_iterations']:\n converged = True\n\n # Calculate final darkfield image\n A_offset = A_offset + B1_offset * np.reshape(W_idct_hat,(-1,1))\n\n return A,E1,A_offset", "def equalize_exposure(image, iterations=1, kernel_size=None, min_object_size=500, dark_objects=True, stretch=False):\n\n # Housekeeping\n img = img_as_float(image.copy())\n\n if stretch is True:\n img = img/img.max()\n\n if dark_objects is False:\n img = 1-img # invert\n\n img_in = img.copy() # for use later\n\n if kernel_size is None:\n kernel_size = np.int(max(image.shape[0], image.shape[1])/10)\n\n # mean filter kernel\n kernel = morphology.disk(int(kernel_size/2))\n\n # identify objects to ignore\n if kernel_size % 2 is 0:\n block_size = kernel_size + 1\n else:\n block_size = kernel_size\n\n #objects = ~filters.threshold_adaptive(img, block_size, offset = 0.01*img.max()) # deprecated function\n objects = img > filters.threshold_local(img, block_size, offset = 0.01*img.max())\n objects = morphology.remove_small_objects(objects, min_size = min_object_size)\n\n # Correct Exposure x times\n i = 0\n while i < iterations:\n # Global mean\n img_mean = np.ma.masked_array(img, mask=objects).mean()\n\n # global means\n local_means = filters.rank.mean(img, selem=kernel, mask=~objects)\n local_means = filters.gaussian(local_means, kernel_size)\n\n # Correct Image\n img += (img_mean - local_means)\n img[img>1] = 1 # for compatibilty with img_as_float\n img[img<0] = 0 # for compatibilty with img_as_float\n i += 1\n\n out = img_as_float(img)\n\n return(out)", "def imgNormalize(img): \n constant = np.sum(sitk.GetArrayFromImage(img))*np.prod(img.GetSpacing())\n return img/constant", "def erode(img, size, iterations=1):\n kernel = np.ones((size, size), np.uint8)\n bg = cv2.erode(img, kernel, iterations=iterations)\n return bg", "def edges(image):\n #store image width and height and initialize new image\n image_width = image['width']\n image_height = image['height']\n new_image = {'height': image['height'], 'width': image['width'], 'pixels': len(image['pixels'])*[0]}\n \n #sobel operator kernels\n kernel_x = {'height': 3, 'width': 3, 'pixels': [-1,0,1,-2,0,2,-1,0,1]}\n kernel_y = {'height': 3, 'width': 3, 'pixels': [-1,-2,-1,0,0,0,1,2,1]}\n \n #creating the filters\n o_x = correlate(image, kernel_x)\n o_y = correlate(image, kernel_y)\n\n #perform relvant calculation for each pixel \n for x in range(image_width):\n for y in range(image_height):\n a = ((get_pixel(o_x, x, y))**2+(get_pixel(o_y, x, y))**2)**0.5\n set_pixel(new_image, x, y, a)\n return round_and_clip_image(new_image)", "def normalize_images(image_sitk):\n\n max = 400\n min = -1000\n\n image_np = sitk.GetArrayFromImage(image_sitk)\n\n # Normalization\n image_np = (image_np - min)/(max - min)\n image_np[image_np > 1] = 1\n image_np[image_np < 0] = 0\n\n # Convert back to SITK\n out_image_sitk = sitk.GetImageFromArray(image_np)\n out_image_sitk.CopyInformation(image_sitk)\n\n return out_image_sitk", "def energy_map(img):\n img_new = img.astype(float) #converting image to float\n total_energy = 0.0 # To store the sum of energy for all channels\n r,c,d = img.shape \n for i in range(d):\n dy = np.zeros([r, c], dtype=float) \n dx = np.zeros([r, c], dtype=float)\n if r > 1:\n dy = np.gradient(img_new[:,:,i], axis=0) #gradient along rows\n if c > 1:\n dx = np.gradient(img_new[:,:,i], axis=1) #gradient along columns\n total_energy += np.absolute(dy) + np.absolute(dx) \n return total_energy #Total energy map for entire image", "def elastic_transform(self, image, random_state=None):\n if random_state is None:\n random_state = np.random.RandomState(None)\n\n image = self.affine(image, random_state)\n #from ipdb import set_trace; set_trace()\n indices = self.stretch_indices(image, random_state)\n\n return map_coordinates(image, indices, order=1, mode='reflect').reshape(image.shape)", "def elastic_transform(image, mask, alpha, sigma, alpha_affine,\n random_state=None):\n if random_state is None:\n random_state = np.random.RandomState(None)\n\n shape = image.shape\n shape_size = shape[:2]\n\n # Random affine\n center_square = np.float32(shape_size) // 2\n square_size = min(shape_size) // 3\n pts1 = np.float32([center_square + square_size,\n [center_square[0] + square_size,\n center_square[1] - square_size],\n center_square - square_size])\n pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine,\n size=pts1.shape).astype(np.float32)\n M = cv2.getAffineTransform(pts1, pts2)\n\n dx = gaussian_filter((random_state.rand(*shape) * 2 - 1),\n sigma) * alpha\n dy = gaussian_filter((random_state.rand(*shape) * 2 - 1),\n sigma) * alpha\n\n x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]),\n np.arange(shape[2]))\n indices = np.reshape(y + dy, (-1, 1)), np.reshape(\n x + dx, (-1, 1)), np.reshape(z, (-1, 1))\n\n image = cv2.warpAffine(image, M, shape_size[::-1],\n borderMode=cv2.BORDER_REFLECT_101)\n\n image = map_coordinates(image, indices, order=1,\n mode='reflect').reshape(shape)\n if mask is not None:\n mask = cv2.warpAffine(mask, M, shape_size[::-1],\n borderMode=cv2.BORDER_REFLECT_101)\n mask = map_coordinates(\n mask, indices, order=1, mode='reflect'\n ).reshape(shape)\n\n return image, mask", "def forward(self,images):\n with torch.no_grad(): \n features = self.resnet(images)\n features = features.view(features.size(0),-1)\n features = self.embed(features)\n features = self.bn(features)\n return features", "def forward(self, images):\n embedding = self.model(images)\n embedding = self.l2_norm(embedding)\n # Multiply by alpha = 10 as suggested in https://arxiv.org/pdf/1703.09507.pdf\n # Equation 9: number of classes in VGGFace2 dataset = 9131\n # lower bound on alpha = 5, multiply alpha by 2; alpha = 10\n alpha = 10\n embedding = embedding * alpha\n\n return embedding", "def image_normalize(image, image_dims):\r\n if image.dtype != 'float32':\r\n image = image.astype(dtype=np.float32)\r\n if np.size(image_dims) == 2:\r\n maxpx = np.max(image[:, :])\r\n if maxpx == float(0):\r\n maxpx = 1e-12 # fuzz factor\r\n minpx = np.min(image[:, :])\r\n image[:, :] = (image[:, :] - minpx) / (maxpx - minpx)\r\n else:\r\n for i in range(image_dims[2]): # find max/min for each channel\r\n maxpx = np.max(image[:, :, i])\r\n if maxpx == float(0):\r\n maxpx = 1e-12 # fuzz factor\r\n minpx = np.min(image[:, :, i])\r\n image[:, :, i] = (image[:, :, i] - minpx) / (maxpx - minpx)\r\n return image", "def norm_and_stack(images):\n imagestack = np.dstack(tuple([cv2.imread(image, cv2.IMREAD_UNCHANGED) for image in images]))\n mean = np.mean(imagestack)\n std = np.std(imagestack)\n new_im = (imagestack - mean)/std \n \n return new_im, mean, std", "def normalize(image):\r\n return image / 127.5 - 1.", "def process_images(image, label):\n # Normalize images to have a mean of 0 and standard deviation of 1\n # per_image_standardization is preferred, which normalize the entire image to mean zero and std 1.\n # It also make learning fast.\n image = tf.image.per_image_standardization(image)\n # Resize images from 32x32 to 277x277\n image = tf.image.resize(image, (227,227))\n return image, label", "def demo2(image_paths, output_dir, cuda):\n\n device = get_device(cuda)\n\n # Synset words\n classes = get_classtable()\n\n # Model\n model = models.resnet152(pretrained=True)\n model.to(device)\n model.eval()\n\n # The four residual layers\n target_layers = [\"relu\", \"layer1\", \"layer2\", \"layer3\", \"layer4\"]\n target_class = 243 # \"bull mastif\"\n\n # Images\n images, raw_images = load_images(image_paths)\n images = torch.stack(images).to(device)\n\n gcam = GradCAM(model=model)\n probs, ids = gcam.forward(images)\n # ids_ = torch.LongTensor([[target_class]] * len(images)).to(device)\n ids_ = torch.tensor([[target_class]] * len(images), dtype=torch.long).to(device)\n gcam.backward(ids=ids_)\n\n for target_layer in target_layers:\n print(\"Generating Grad-CAM @{}\".format(target_layer))\n\n # Grad-CAM\n regions = gcam.generate(target_layer=target_layer)\n\n for j in range(len(images)):\n print(\n \"\\t#{}: {} ({:.5f})\".format(\n j, classes[target_class], float(probs[ids == target_class])\n )\n )\n\n # save_gradcam(\n # filename=osp.join(\n # output_dir,\n # \"{}-{}-gradcam-{}-{}.png\".format(\n # j, \"resnet152\", target_layer, classes[target_class]\n # ),\n # ),\n # gcam=regions[j, 0],\n # raw_image=raw_images[j],\n # )", "def elastic_transform(image, alpha=1000, sigma=30, spline_order=1, mode='nearest', random_state=np.random):\n assert image.ndim == 2\n shape = image.shape[:2]\n\n dx = gaussian_filter((random_state.rand(*shape) * 2 - 1),\n sigma, mode=\"constant\", cval=0) * alpha\n dy = gaussian_filter((random_state.rand(*shape) * 2 - 1),\n sigma, mode=\"constant\", cval=0) * alpha\n\n x, y = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')\n indices = [np.reshape(x + dx, (-1, 1)), np.reshape(y + dy, (-1, 1))]\n result = map_coordinates(\n image, indices, order=spline_order, mode=mode).reshape(shape)\n return result", "def elastic_transform_approx(\n img: np.ndarray,\n alpha: float,\n sigma: float,\n alpha_affine: float,\n interpolation: int = cv2.INTER_LINEAR,\n border_mode: int = cv2.BORDER_REFLECT_101,\n value: Optional[ImageColorType] = None,\n random_state: Optional[np.random.RandomState] = None,\n) -> np.ndarray:\n height, width = img.shape[:2]\n\n # Random affine\n center_square = np.array((height, width), dtype=np.float32) // 2\n square_size = min((height, width)) // 3\n alpha = float(alpha)\n sigma = float(sigma)\n alpha_affine = float(alpha_affine)\n\n pts1 = np.array(\n [\n center_square + square_size,\n [center_square[0] + square_size, center_square[1] - square_size],\n center_square - square_size,\n ],\n dtype=np.float32,\n )\n pts2 = pts1 + random_utils.uniform(-alpha_affine, alpha_affine, size=pts1.shape, random_state=random_state).astype(\n np.float32\n )\n matrix = cv2.getAffineTransform(pts1, pts2)\n\n warp_fn = _maybe_process_in_chunks(\n cv2.warpAffine,\n M=matrix,\n dsize=(width, height),\n flags=interpolation,\n borderMode=border_mode,\n borderValue=value,\n )\n img = warp_fn(img)\n\n dx = random_utils.rand(height, width, random_state=random_state).astype(np.float32) * 2 - 1\n cv2.GaussianBlur(dx, (17, 17), sigma, dst=dx)\n dx *= alpha\n\n dy = random_utils.rand(height, width, random_state=random_state).astype(np.float32) * 2 - 1\n cv2.GaussianBlur(dy, (17, 17), sigma, dst=dy)\n dy *= alpha\n\n x, y = np.meshgrid(np.arange(width), np.arange(height))\n\n map_x = np.float32(x + dx)\n map_y = np.float32(y + dy)\n\n remap_fn = _maybe_process_in_chunks(\n cv2.remap,\n map1=map_x,\n map2=map_y,\n interpolation=interpolation,\n borderMode=border_mode,\n borderValue=value,\n )\n return remap_fn(img)", "def create_external_edge_force_gradients_from_img( img, sigma=30. ):\n # Gaussian smoothing.\n smoothed = filt.gaussian( (img-img.min()) / (img.max()-img.min()), sigma )\n # Gradient of the image in x and y directions.\n giy, gix = np.gradient( smoothed )\n # Gradient magnitude of the image.\n gmi = (gix**2 + giy**2)**(0.5)\n # Normalize. This is crucial (empirical observation).\n gmi = (gmi - gmi.min()) / (gmi.max() - gmi.min())\n\n # Gradient of gradient magnitude of the image in x and y directions.\n ggmiy, ggmix = np.gradient( gmi )\n\n def fx(x, y):\n \"\"\"\n Return external edge force in the x direction.\n\n x: ndarray\n numpy array of floats.\n y: ndarray:\n numpy array of floats.\n \"\"\"\n # Check bounds.\n x[ x < 0 ] = 0.\n y[ y < 0 ] = 0.\n\n x[ x > img.shape[1]-1 ] = img.shape[1]-1\n y[ y > img.shape[0]-1 ] = img.shape[0]-1\n\n return ggmix[ (y.round().astype(int), x.round().astype(int)) ]\n\n def fy(x, y):\n \"\"\"\n Return external edge force in the y direction.\n\n x: ndarray\n numpy array of floats.\n y: ndarray:\n numpy array of floats.\n \"\"\"\n # Check bounds.\n x[ x < 0 ] = 0.\n y[ y < 0 ] = 0.\n\n x[ x > img.shape[1]-1 ] = img.shape[1]-1\n y[ y > img.shape[0]-1 ] = img.shape[0]-1\n\n return ggmiy[ (y.round().astype(int), x.round().astype(int)) ]\n\n return fx, fy", "def optimize_images(conv_id=None, num_iterations=30):\n\n if conv_id is None:\n print(\"Final fully-connected layer before softmax.\")\n else:\n print(\"Layer:\", conv_names[conv_id])\n\n images = []\n for feature in range(0, 10):\n print(\"Optimizing image for feature no.\", feature)\n\n image = optimize_image(conv_id=conv_id, feature=feature, show_progress=False, num_iterations=num_iterations)\n\n image = image.squeeze()\n images.append(image)\n\n images = np.array(images)\n\n plot_images10(images=images)", "def forward(self, person_image):\n blob = cv.dnn.blobFromImage(person_image, size=(64, 160), ddepth=cv.CV_8U)\n self.__net.setInput(blob)\n\n emb = self.__net.forward()\n\n return emb", "def _get_photobleach(imgflt_stack,flatfield,darkfield=None):\n # Initialize matrices\n imgflt_stack = np.reshape(imgflt_stack,(OPTIONS['size']*OPTIONS['size'],-1)).astype(np.float64)\n if darkfield is None:\n darkfield = np.zeros(flatfield.shape,dtype=np.float64)\n\n # Initialize weights and tolerances\n weights = np.ones(imgflt_stack.shape,dtype=np.float64)\n epsilon = np.float64(0.1)\n tol = np.float64(10**-6)\n\n # Run optimization exactly 5 times\n for r in range(5):\n # Calculate weights, offsets and coefficients\n W_idct_hat = np.reshape(flatfield,(-1,1))\n A_offset = np.reshape(darkfield,(-1,1))\n A_coeff = np.reshape(np.mean(imgflt_stack,0),(1,-1))\n\n # Initialization values and learning rates\n temp = np.linalg.svd(imgflt_stack,full_matrices=False,compute_uv=False)\n norm_two = np.float64(temp[0])\n mu = np.float64(12.5)/norm_two\n mu_bar = mu * 10**7\n rho = np.float64(1.5)\n ent1 = 1\n\n # Normalization factors\n d_norm = np.linalg.norm(imgflt_stack,'fro')\n\n # Initialize augmented representation and error\n A = np.zeros(imgflt_stack.shape,dtype=np.float64)\n E1 = np.zeros(imgflt_stack.shape,dtype=np.float64)\n Y1 = np.float64(0)\n\n # Run optimization\n iternum = 0\n converged = False\n while not converged:\n iternum += 1\n\n # Calculate augmented representation\n A = np.matmul(W_idct_hat,A_coeff) + A_offset\n\n # Calculate errors\n E1 = E1 + np.divide(imgflt_stack - A - E1 + np.multiply(1/mu,Y1),ent1)\n E1 = np.max(np.reshape(E1 - weights/(ent1*mu),(imgflt_stack.shape[0],imgflt_stack.shape[1],1)),-1,initial=0) + np.min(np.reshape(E1 + weights/(ent1*mu),(imgflt_stack.shape[0],imgflt_stack.shape[1],1)),-1,initial=0)\n\n # Calculate coefficients\n R1 = imgflt_stack-E1\n A_coeff = np.reshape(np.mean(R1,0),(1, -1)) - np.mean(A_offset)\n A_coeff[A_coeff<0] = 0 # pixel values are never negative\n\n # Loss\n Z1 = imgflt_stack - A - E1\n\n # Error updates\n Y1 = Y1 + mu*Z1\n\n # Update learning rate\n mu = np.min(mu*rho,initial=mu_bar)\n\n # Stop if below threshold\n stopCriterion = np.linalg.norm(Z1,'fro')/d_norm\n if stopCriterion < tol:\n converged = True\n\n # Update weights\n XE_norm = np.reshape(np.mean(A,0),(1,-1)) / E1\n weights = 1/np.abs(XE_norm + epsilon)\n weights = weights * weights.size/np.sum(weights)\n\n return A_coeff", "def __call__(self, adv, annotation=None, unpack=True,\r\n abort_early=True, epsilons=10000):\r\n\r\n a = adv\r\n del adv\r\n del annotation\r\n del unpack\r\n\r\n image = a.original_image\r\n min_, max_ = a.bounds()\r\n axis = a.channel_axis(batch=False)\r\n hw = [image.shape[i] for i in range(image.ndim) if i != axis]\r\n h, w = hw\r\n size = max(h, w)\r\n\r\n if not isinstance(epsilons, Iterable):\r\n epsilons = np.linspace(0, 0.2, num=epsilons + 1)[1:]\r\n\r\n for epsilon in tqdm(epsilons):\r\n # epsilon = 1 will correspond to\r\n # sigma = size = max(width, height)\r\n sigmas = [epsilon * size] * 3\r\n sigmas[axis] = 0\r\n blurred = gaussian_filter(image, sigmas)\r\n blurred = np.clip(blurred, min_, max_)\r\n _, is_adversarial = a.predictions(blurred)\r\n if is_adversarial and abort_early:\r\n return", "def dft(im: np.array, uv: np.array, vis: np.array):\n m, n = im.shape\n size = im.size\n xy = np.mgrid[0:m, 0:n].reshape(2, size)\n for i in range(uv.shape[1]):\n vis[i] = np.sum(\n im.reshape(size) * np.exp(\n -2j * np.pi * (uv[0, i] * xy[0, :] / m + uv[1, i] * xy[1, :] / n)))\n\n return vis", "def image_enhancement(self,img,file_name):\n #Creating output directory if it doesnt exist\n dirname = 'output'\n dir_path = os.path.dirname(os.path.realpath(__file__))\n if(os.path.isdir(os.path.join(dir_path, dirname))): \n if(os.path.exists(os.path.join(dir_path, dirname))):\n pass\n else:\n os.mkdir(os.path.join(dir_path, dirname))\n os.mkdir(os.path.join(dir_path, dirname,\"results\"))\n os.mkdir(os.path.join(dir_path, dirname,\"inputs\"))\n #Extracting edges using Canny's Edge Detection\n edges = cv2.Canny(img,80,255)\n cv2.imwrite(os.path.join(dir_path, dirname,'inputs','edges.png'),edges)\n kernel = (3,3)\n #Applying image pyramid technique\n #Applying Gaussian blur filter over the image\n gaussian_blurred_image =self.gaussian_blurring(img,kernel,0)\n cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gaussian_blurred_image.png'),gaussian_blurred_image)\n plt.subplot(121),\n plt.xticks([]), plt.yticks([])\n plt.subplot(122),\n plt.xticks([]), plt.yticks([])\n #Downsizing the image to 1/4th of its original size\n coarse_image =self.sampling(gaussian_blurred_image,0.25,0.25) \n cv2.imwrite(os.path.join(dir_path, dirname,'inputs','coarse_image.png'),coarse_image)\n #Upsampling the image to its original size\n up_sampling=self.sampling(coarse_image,4,4)\n cv2.imwrite(os.path.join(dir_path, dirname,'inputs','up_sampling.png'),up_sampling)\n #Applying Gaussian Blur filtering\n gaus=self.gaussian_blurring(up_sampling,kernel,0)\n cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gaus2.png'),gaus)\n #Resizing the image for image subtraction\n gaussian_blurred_image=cv2.resize(img,(gaus.shape[1],gaus.shape[0]))\n #Convert into grayscale\n gaus_gray=cv2.cvtColor(gaus,cv2.COLOR_BGR2GRAY)\n cv2.imwrite(os.path.join(dir_path, dirname,'inputs','gausgray.png'),gaus_gray)\n #Converting to grayscale\n dst_gray=cv2.cvtColor(gaussian_blurred_image,cv2.COLOR_BGR2GRAY)\n (score, diff) = compare_ssim(gaus_gray, dst_gray, full=True)\n diff = (diff * 255).astype(\"uint8\")\n #Image Subtraction\n detail_image = cv2.subtract(gaus,gaussian_blurred_image)\n cv2.imwrite(os.path.join(dir_path, dirname,'inputs','detailed.png'),detail_image)\n print(detail_image.shape)\n output_path=self.process_imgdir(os.path.join(dir_path, dirname))\n dehazed_image=cv2.imread(output_path)\n # dehazed_image =self.sampling(dehazed_image,4,4)\n output_path=\"\\\\\".join(output_path.split(\"\\\\\")[:-1])\n print(dehazed_image.shape)\n cv2.imwrite(os.path.join(output_path,'dehazed_image.png'),dehazed_image) \n #Adding two images\n dst = cv2.addWeighted(detail_image,1,dehazed_image,1,0) \n kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])\n dst = cv2.filter2D(dst, -1, kernel)\n #Converting images to lightness,chroma ,hue for increasing the brightness\n lab= cv2.cvtColor(dst, cv2.COLOR_BGR2LAB)\n l, a, b = cv2.split(lab)\n #Applying CLAHE Algorithm for contrast amplification which is limited and to reduce the problem of noise amplification\n clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))\n cl = clahe.apply(l) \n limg = cv2.merge((cl,a,b))\n #Convert back to rgb\n final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR) \n psf = np.ones((5, 5)) / 25\n #Applying mean denoising filtering\n dst=cv2.fastNlMeansDenoisingColored(final,None,10,10,7,21)\n edges=cv2.cvtColor(edges,cv2.COLOR_GRAY2BGR)\n print(edges.shape)\n edges=cv2.resize(edges,(dst.shape[1],dst.shape[0]))\n #Increasing the brightness of the image\n hsv = cv2.cvtColor(dst, cv2.COLOR_BGR2HSV)\n h,s,v=cv2.split(hsv)\n value = 30 #whatever value you want to add\n lim=255-value\n \n s[s>lim]=255\n s[s<lim]+=value\n value1=30\n lim1=255-value1\n v[v>lim1]=255\n v[v<lim1]+=value1\n hsv = cv2.merge((h, s, v))\n dst = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n #Writing the output file \n dst = cv2.addWeighted(dst,1,edges,1,0)\n cv2.imwrite(os.path.join(output_path,file_name+'.png'),dst)\n #Resizing the file to compare it with other methods\n resized = cv2.resize(dst, (256,256), interpolation = cv2.INTER_AREA)\n cv2.imwrite(os.path.join(output_path,'result_resized.png'),resized)", "def prep_image(img, inp_dim):\n img = (letterbox_image(img, (inp_dim, inp_dim)))\n img = img[:,:,::-1].transpose((2,0,1)).copy()\n img = torch.from_numpy(img).float().div(255.0).unsqueeze(0)\n return img", "def densenet121(growth_rate=32, compression=1.0):\n X = K.Input(shape=(224, 224, 3))\n x = K.layers.BatchNormalization(axis=3)(X)\n x = K.layers.Activation('relu')(x)\n x = K.layers.Conv2D(2 * growth_rate, (7, 7), (2, 2),\n kernel_initializer='he_normal', padding='same')(x)\n x = K.layers.MaxPooling2D(pool_size=(3, 3),\n strides=2,\n padding=\"same\")(x)\n x, nb_filters = dense_block(x, 2 * growth_rate, growth_rate, 6)\n x, nb_filters = transition_layer(x, nb_filters, compression)\n x, nb_filters = dense_block(x, nb_filters, growth_rate, 12)\n x, nb_filters = transition_layer(x, nb_filters, compression)\n x, nb_filters = dense_block(x, nb_filters, growth_rate, 24)\n x, nb_filters = transition_layer(x, nb_filters, compression)\n x, nb_filters = dense_block(x, nb_filters, growth_rate, 16)\n x = K.layers.AveragePooling2D(pool_size=(7, 7),\n strides=(1, 1))(x)\n x = K.layers.Dense(units=1000, activation='softmax',\n kernel_initializer='he_normal')(x)\n model = K.models.Model(inputs=X, outputs=x)\n return model", "def augment_train_data(self):\n print(\"Augmenting train data.\")\n elastic_flag = 1\n read_path: Path = Path(os.environ[\"DATA_PATH\"]) / \"characters\" / \"train\"\n for letter_dir in read_path.iterdir():\n original_images = list(letter_dir.iterdir())\n length = len(original_images)\n max_kernel = (240 - length) / 2 / length + 2\n if max_kernel >= 2.6:\n max_kernel = min(round(max_kernel), 5)\n for j in original_images:\n img_path = str(j)\n self.augmenter.dilate_image(img_path, 3, max_kernel)\n self.augmenter.erosion_image(img_path, 3, max_kernel)\n new_len = len(\n list(letter_dir.iterdir())\n ) # Length after regular augmentation\n if elastic_flag == 1:\n try: # to make the program runnable if you are not on linux\n if new_len < 160:\n reps = 4 - new_len // 50\n self.augmenter.elastic_morphs(letter_dir, reps)\n except:\n print(\"Continuing without elastic morph\")\n elastic_flag = 0\n continue", "def Embedding(image, mode, params, reuse=tf.AUTO_REUSE, scope='scene'):\n del params\n is_training = mode == tf_estimator.ModeKeys.TRAIN\n with tf.variable_scope(scope, reuse=reuse):\n scene = resnet.get_resnet50_spatial(image, is_training)\n scene = tf.nn.relu(scene)\n summed_scene = tf.reduce_mean(scene, axis=[1, 2])\n return summed_scene, scene", "def norm(imagestack, mean, std):\n \n new_im = (imagestack - mean)/std \n \n return new_im", "def densenet121(growth_rate=32, compression=1.0):\n inputs = K.Input(shape=(224, 224, 3))\n out_l = K.layers.BatchNormalization(axis=3)(inputs)\n out_l = K.layers.Activation('relu')(out_l)\n out_l = K.layers.Conv2D(64, kernel_size=(7, 7), padding='same',\n kernel_initializer='he_normal',\n strides=(2, 2))(out_l)\n out_l = K.layers.MaxPool2D((3, 3), (2, 2), padding=\"same\")(out_l)\n out_l, filters = dense_block(out_l, 64, growth_rate, 6)\n out_l, filters = transition_layer(out_l, filters, compression)\n out_l, filters = dense_block(out_l, filters, growth_rate, 12)\n out_l, filters = transition_layer(out_l, filters, compression)\n out_l, filters = dense_block(out_l, filters, growth_rate, 24)\n out_l, filters = transition_layer(out_l, filters, compression)\n out_l, filters = dense_block(out_l, filters, growth_rate, 16)\n out_l = K.layers.AvgPool2D((7, 7), padding='same')(out_l)\n out_l = K.layers.Dense(1000, activation='softmax')(out_l)\n model = K.Model(inputs, out_l)\n return model", "def preprocess(self, img):\n return img - np.mean(img)", "def run_phot_normalization(setup, **params):\n\n log = logs.start_stage_log( setup.red_dir, 'postproc_phot_norm', version=VERSION )\n\n xmatch = crossmatch.CrossMatchTable()\n xmatch.load(params['crossmatch_file'],log=log)\n\n # Identify the datasets to be used as the primary reference in each\n # filter:\n xmatch.id_primary_datasets_per_filter()\n log.info('Identified datasets to be used as the primary references in each filter: '\\\n +repr(xmatch.reference_datasets))\n\n # Add columns to the dataset Table to hold the photometric calibration\n # parameters\n ndset = len(xmatch.datasets)\n ncol = len(xmatch.datasets.colnames)\n if 'norm_a0' not in xmatch.datasets.colnames:\n xmatch.datasets.add_column(np.zeros(ndset), name='norm_a0', index=ncol+1)\n xmatch.datasets.add_column(np.zeros(ndset), name='norm_a1', index=ncol+2)\n xmatch.datasets.add_column(np.zeros(ndset), name='norm_covar_0', index=ncol+3)\n xmatch.datasets.add_column(np.zeros(ndset), name='norm_covar_1', index=ncol+4)\n xmatch.datasets.add_column(np.zeros(ndset), name='norm_covar_2', index=ncol+5)\n xmatch.datasets.add_column(np.zeros(ndset), name='norm_covar_3', index=ncol+6)\n log.info('Expanded xmatch.datasets table for normalization parameters')\n\n # Extract list of filters from xmatch.images['filter'] column\n filter_list = np.unique(xmatch.images['filter'].data)\n log.info('Identified list of filters to process: '+repr(filter_list))\n\n # Read data from quadrant 1\n # Reading in the timeseries data for all four quadrants is at the very\n # edge of the memory limits on the machines available, so it is preferable\n # to calibrate the quadrant's data separately. However, there are sufficient\n # stars in each quantant to be able to determine the photometric calibration\n # from a single quadrant, and apply it to the rest of the image.\n log.info('Loading the timeseries photometry from quadrant 1')\n file_path = path.join(setup.red_dir, params['field_name']+'_quad1_photometry.hdf5')\n phot_data = hd5_utils.read_phot_from_hd5_file(file_path, return_type='array')\n log.info('-> Completed photometry load')\n\n # Identify constant stars in the dataset\n constant_stars = find_constant_stars(xmatch, phot_data, log)\n star = 1\n\n # Normalize the photometry of each dataset to that of the reference\n # image in the primary reference dataset in that filter\n #for filter in filter_list:\n for filter in filter_list:\n\n # Plot an RMS diagram of the lightcurves for all stars in this filter,\n # prior to normalization, for comparison\n image_index = np.where(xmatch.images['filter'] == filter)[0]\n phot_data_filter = phot_data[:,image_index,:]\n (mag_col, mag_err_col) = field_photometry.get_field_photometry_columns('corrected')\n qc_col = 16\n\n plot_multisite_rms(params, phot_data_filter, mag_col, mag_err_col, qc_col,\n 'rms_prenorm_'+str(filter)+'.png', log)\n\n # Extract the reference image photometry for the primary-ref dataset\n # for this filter\n ref_datacode = xmatch.reference_datasets[filter]\n sitecode = get_site_code(ref_datacode)\n log.info('Reference dataset in '+filter+' is '+ref_datacode+', sitecode='+sitecode)\n\n ref_phot = np.zeros((len(xmatch.stars),2))\n ref_phot[:,0] = xmatch.stars['cal_'+filter.replace('p','')+'_mag_'+sitecode]\n ref_phot[:,1] = xmatch.stars['cal_'+filter.replace('p','')+'_magerr_'+sitecode]\n\n # Extract the lightcurves for all other datasets in this filter in turn\n dataset_index = np.where(xmatch.datasets['dataset_filter'] == filter)[0]\n\n for idset in dataset_index:\n dset_datacode = xmatch.datasets['dataset_code'][idset]\n dset_sitecode = get_site_code(dset_datacode)\n\n # If the dataset is the reference dataset, replicate the photometric\n # measurements from the corrected columns to the normalized columns,\n # since no normalization is required - this ensures the full\n # lightcurve can be accessed from the normalization columns.\n if dset_datacode == ref_datacode:\n log.info('Replicating primary reference photometry from dataset '\\\n +dset_datacode+' to the normalized photometry columns')\n image_index = np.where(xmatch.images['dataset_code'] == dset_datacode)[0]\n (mag_col, mag_err_col) = field_photometry.get_field_photometry_columns('corrected')\n (norm_mag_col, norm_mag_err_col) = field_photometry.get_field_photometry_columns('normalized')\n for i in image_index:\n phot_data[:,i,norm_mag_col] = phot_data[:,i,mag_col]\n phot_data[:,i,norm_mag_err_col] = phot_data[:,i,mag_err_col]\n\n # Normalize any dataset that isn't the same as the reference dataset\n else:\n log.info('Normalizing dataset '+dset_datacode+', sitecode='+dset_sitecode)\n image_index = np.where(xmatch.images['dataset_code'] == dset_datacode)[0]\n\n ## Dset created to hold all stars in field, not quadrant - \n # normalization is calculated from whole field.\n dset_phot = np.zeros((len(xmatch.stars),2))\n dset_phot[:,0] = xmatch.stars['cal_'+filter.replace('p','')+'_mag_'+dset_sitecode]\n dset_phot[:,1] = xmatch.stars['cal_'+filter.replace('p','')+'_magerr_'+dset_sitecode]\n\n # Calculate their weighted offset relative to the primary-ref\n # dataset for the filter\n (fit, covar_fit) = calc_phot_normalization(ref_phot, dset_phot,\n constant_stars, log,\n diagnostics=True, ref=sitecode,\n dset=dset_sitecode, f=filter)\n\n # Store the fit results for this dataset\n xmatch = store_dataset_phot_normalization(idset, xmatch, fit, covar_fit, log)\n\n # Apply the normalization calibration to the dataset's reference\n # image photometry, and store the results in the xmatch.stars table\n log.info('Applying normalization to the datasets reference image photometry')\n cal_phot = apply_phot_normalization_single_frame(fit, covar_fit, dset_phot,\n 0, 1, log,\n diagnostics=True, ref=sitecode,\n dset=dset_sitecode, f=filter)\n xmatch.stars['norm_'+filter.replace('p','')+'_mag_'+dset_sitecode] = cal_phot[:,0]\n xmatch.stars['norm_'+filter.replace('p','')+'_magerr_'+dset_sitecode] = cal_phot[:,1]\n\n # Apply the photometry calibration to the timeseries data\n # for this dataset\n (mag_col, mag_err_col) = field_photometry.get_field_photometry_columns('corrected')\n (norm_mag_col, norm_mag_err_col) = field_photometry.get_field_photometry_columns('normalized')\n phot_data = normalize_timeseries_photometry(phot_data, image_index,\n fit, covar_fit,\n mag_col, mag_err_col,\n norm_mag_col, norm_mag_err_col,\n log)\n\n # Plot a second RMS diagram of the lightcurves for all stars in this\n # filter, post normalization, for comparison\n image_index = np.where(xmatch.images['filter'] == filter)[0]\n phot_data_filter = phot_data[:,image_index,:]\n (mag_col, mag_err_col) = field_photometry.get_field_photometry_columns('normalized')\n plot_multisite_rms(params, phot_data_filter, mag_col, mag_err_col, qc_col,\n 'rms_postnorm_'+str(filter)+'.png', log)\n\n\n fig = plt.figure(3,(10,10))\n (norm_mag_col, norm_mag_err_col) = field_photometry.get_field_photometry_columns('normalized')\n idx = np.where(phot_data[star,:,norm_mag_col] > 0.0)[0]\n plt.errorbar(phot_data[star,idx,0], phot_data[star,idx,norm_mag_col],\n yerr=phot_data[star,idx,norm_mag_err_col], fmt='none', color='k')\n (xmin,xmax,ymin,ymax) = plt.axis()\n ymin = max(ymin,14.0)\n ymax = min(ymax,22.0)\n plt.axis([xmin,xmax,ymax,ymin])\n plt.xlabel('HJD')\n plt.ylabel('Mag')\n plt.savefig('Star_'+str(star)+'_lc_norm.png')\n plt.close(3)\n\n # Output updated crossmatch table\n xmatch.save(params['crossmatch_file'])\n\n # Output the photometry for quadrant 1:\n output_quadrant_photometry(params, setup, 1, phot_data, log)\n\n logs.close_log(log)\n\n status = 'OK'\n report = 'Completed successfully'\n return status, report", "def resize_real_images(self, images):\n block_idx = (self.growth_idx + 1) // 2\n height, width = self.params[\"generator_projection_dims\"][0:2]\n resized_image = tf.image.resize(\n images=images,\n size=[\n height * (2 ** block_idx), width * (2 ** block_idx)\n ],\n method=\"nearest\",\n name=\"resized_real_image_{}\".format(self.growth_idx)\n )\n\n return resized_image", "def model(inputs, target_images, is_training):\n # if isinstance(inputs, tuple):\n assert mask_augs >= 0. and mask_augs <= 1., \"mask_augs must be in [0, 1]\"\n if FLAGS.use_td_loss and isinstance(inputs, tuple):\n # print('#'*80)\n # print(inputs)\n assert metric is not None, \"Metric function is None\"\n inputs, augs = inputs\n B = inputs.get_shape().as_list()[0]\n A = augs.get_shape().as_list()[1]\n if mask_augs > 0:\n mask = tf.cast(tf.greater(tf.random.uniform(shape=[B, A], minval=0., maxval=1.), 0.5), augs.dtype) # noqa\n bias = mask * -1\n augs = (augs * mask) + bias # Randomly mask out augs for difficulty and code those dims as -1\n with tf.variable_scope('encoder'): # variable_scope name_scope\n features, block_activities = encoder(inputs, is_training=is_training)\n print(\"Features: \")\n print(features)\n print(\"---\")\n # Global average pool of B 7 7 2048 -> B 2048\n if data_format == 'channels_last':\n outputs = tf.reduce_mean(features, [1, 2])\n else:\n outputs = tf.reduce_mean(features, [2, 3])\n outputs = tf.identity(outputs, 'final_avg_pool')\n print(\"Outputs: \")\n print(outputs)\n print(\"---\")\n # B 2048\n\n h_w = features.get_shape().as_list()[1]\n # print(h_w)\n\n augs = tf.tile(augs[:,None,None,:], tf.constant([1,h_w,h_w,1]))\n print(\"Augs: \")\n print(augs)\n print(\"---\")\n features = tf.concat([features, augs], axis=-1)\n \n with tf.variable_scope('decoder'):\n recon_images = decoder(\n features,\n block_activities,\n is_training=is_training,\n skip=skip)\n print(\"Reconstructed images and target images: \")\n print(recon_images)\n print(target_images)\n print(\"---\")\n with tf.variable_scope('metric'):\n # Squash both recon and target images\n recon_images_squash = tf.tanh(recon_images)\n target_images = (target_images * 2) - 1\n Bt = target_images.get_shape().as_list()[0]\n Br = recon_images_squash.get_shape().as_list()[0]\n if Bt == Br:\n # Attractive + repulsive loss\n pass\n elif Bt * 2 == Br:\n # Attractive-only loss\n target_images = tf.concat([target_images, target_images], 0)\n\n # Differentiable perceptual metric. First reconstruction.\n # both_images = tf.concat([recon_images, target_images], -1) # B H W 6\n all_images = tf.concat([recon_images_squash, target_images], 0) # Stack these in batch dim\n metric_all_images = metric(all_images, is_training=is_training)\n # B = metric_all_images.get_shape().as_list()[0]\n metric_all_images = tf.reshape(metric_all_images, [B, -1])\n metric_hidden_r, metric_hidden_t = tf.split(metric_all_images, 2, 0) # Split these in batch dim\n\n # Prep recon_images for visualization\n # recon_images = tf.clip_by_value(recon_images, clip_value_min=-5, clip_value_max=5)\n # recon_images = (recon_images + 5) / 10\n\n recon_mean, recon_std = tf.nn.moments(recon_images, axes=[1, 2], keep_dims=True)\n recon_images = (recon_images - recon_mean) / recon_std\n recon_images = tf.clip_by_value(recon_images, clip_value_min=-5, clip_value_max=5)\n recon_images = (recon_images + 5) / 10\n # recon_images = recon_images_squash\n if greyscale_viz:\n recon_images = tf.image.rgb_to_grayscale(recon_images)\n recon_images = tf.concat([recon_images, recon_images, recon_images], -1)\n print(\"Embedding output: \")\n print(metric_hidden_t)\n print(\"---\")\n return outputs, recon_images, metric_hidden_r, metric_hidden_t\n\n else:\n # augs = None\n \n with tf.variable_scope('encoder'): # variable_scope name_scope\n features, block_activities = encoder(inputs, is_training)\n \n if data_format == 'channels_last':\n print(\"Features:\")\n print(features)\n outputs = tf.reduce_mean(features, [1, 2])\n else:\n outputs = tf.reduce_mean(features, [2, 3])\n outputs = tf.identity(outputs, 'final_avg_pool')\n \n # filter_trainable_variables(trainable_variables, after_block=5)\n # add_to_collection(trainable_variables, 'trainable_variables_inblock_')\n\n return outputs", "def evaluation(pre_model, img_1, img_2,\n default_mean_std = True,\n style_layers=default_style_layers,\n weight = 1000000):\n # load the image\n imsize = 512 if torch.cuda.is_available() else 128 # use small size if no gpu\n img_1 = image_loader(img_1)\n img_2 = image_loader(img_2)\n\n cnn = copy.deepcopy(pre_model)\n\n # normalization module\n normalization = Normalization(default_mean_std = default_mean_std)\n\n style_losses = 0\n\n # create our model\n model = nn.Sequential(normalization)\n\n # increment every time we see a conv\n i = 0 \n # go through all the layers\n for layer in cnn.children():\n if isinstance(layer, nn.Conv2d):\n i += 1\n name = 'conv_{}'.format(i)\n elif isinstance(layer, nn.ReLU):\n name = 'relu_{}'.format(i)\n # According to Alexis Jacq, the in-place version doesn't play \n # very nicely with the ContentLoss with the ContentLoss and StyleLoss \n # we insert below. So we replace with out-of-place ones here.\n layer = nn.ReLU(inplace=False)\n elif isinstance(layer, nn.MaxPool2d):\n name = 'maxpool_{}'.format(i)\n elif isinstance(layer, nn.BatchNorm2d):\n name = 'bn_{}'.format(i)\n\n model.add_module(name, layer)\n\n if name in style_layers:\n # add style loss:\n # calculate target style\n style_1 = model(img_1).detach()\n style_1 = gram_matrix(style_1)\n style_2 = model(img_2).detach()\n style_2 = gram_matrix(style_2)\n # save the loss\n style_losses += F.mse_loss(style_1, style_2) / len(style_layers)\n \n style_losses *= weight\n return float(style_losses)", "def FullSingleSceneInference(self, image_file, USE_MODEL = 'DEFAULT'): \n if USE_MODEL != 'DEFAULT':\n (detector_file, classes_file, map_file, \n model_name, model_type) = self.DownloadModel(USE_MODEL)\n self.SetActiveGraph(detector_graph_file = detector_file,\n model_name = model_name,\n classes_file = classes_file,\n map_file = map_file,\n model_type = model_type\n )\n \n b,s,c = self.AnalyzeSingleImage(image_file = image_file)\n self.DrawImage(image_file = image_file, boxes = b,\n scores = s, classes = c\n )\n return", "def run_visualization(image):\n # for image in images:\n try:\n with tf.gfile.FastGFile(image, 'rb') as f:\n jpeg_str = f.read()\n original_im = Image.open(BytesIO(jpeg_str))\n except IOError:\n print('Cannot retrieve image')\n return\n\n # print('running deeplab on image {0}'.format(image))\n resized_im, seg_map = MODEL.run(original_im)\n seg_map = seg_map.astype(np.uint8) * 255\n resized_im = np.array(resized_im, dtype=np.uint8)\n resized_im = cv2.cvtColor(resized_im, cv2.COLOR_BGR2RGB)\n # vis_segmentation(resized_im, seg_map,FULL_COLOR_MAP ,LABEL_NAMES)\n overlay_image = cv2.addWeighted(resized_im, 0.8, cv2.merge((seg_map * 0, seg_map, seg_map * 0)), 0.2, 0)\n # time.sleep(params.SEC_BETWEEN_PREDICTION)\n\n return resized_im, seg_map, overlay_image.astype(np.uint8)", "def compute_energy(img):\r\n # urmati urmatorii pasi:\r\n # 1. transformati imagine in grayscale\r\n # 2. folositi filtru sobel pentru a calcula gradientul in directia X si Y\r\n # 3. calculati magnitudinea imaginii\r\n\r\n img_gray_scale = cv.cvtColor(img, cv.COLOR_BGR2GRAY);\r\n\r\n #de cautat totusi si codul pt SOBEL pe net\r\n grad_x = cv.Sobel(img_gray_scale, ddepth = cv.CV_16S, dx = 1, dy = 0, borderType = cv.BORDER_CONSTANT)\r\n grad_y = cv.Sobel(img_gray_scale, ddepth = cv.CV_16S, dx = 0, dy = 1, borderType = cv.BORDER_CONSTANT)\r\n\r\n#E repr gradientii aka cat se sch un pixel de la unul la altul\r\n E = abs(grad_x) + abs(grad_y)\r\n # print(grad_y)\r\n # print(grad_x)\r\n\r\n cv.imwrite(\"poza.jpg\", E)\r\n return E", "def make_big_e(self):\n l = self.l_i\n self.img[l/2-1:l/2+1, l/2-5:l/2+5] = 1\n self.img[l/2-5:l/2-3, l/2-5:l/2+5] = 1\n self.img[l/2+3:l/2+5, l/2-5:l/2+5] = 1\n self.img[l/2-5:l/2+5, l/2-5:l/2-3] = 1\n self.img_name = 'bigE'", "def elastic_transform(X, min_alpha=36, max_alpha=38, min_sigma=5, max_sigma=6, random_state=None, n_jobs=1):\n if random_state is None:\n rng = np.random\n else:\n rng = np.random.RandomState(random_state)\n alphas = rng.uniform(min_alpha, max_alpha, size=X.shape[0])\n sigmas = rng.uniform(min_sigma, max_sigma, size=X.shape[0])\n X_elas = Parallel(n_jobs=n_jobs)(delayed(elastic_transform_one)(X[i], alphas[i], sigmas[i]) for i in range(X.shape[0]))\n return np.array(X_elas, dtype='float32')", "def post_process_image(image):\n\n image = (image - np.min(image)) * (255 / (np.max(image) - np.min(image)))\n\n return image", "def forward(self, emb_i, emb_j):\n z_i = F.normalize(emb_i, dim=1)\n z_j = F.normalize(emb_j, dim=1)\n\n representations = torch.cat([z_i, z_j], dim=0)\n similarity_matrix = F.cosine_similarity(representations.unsqueeze(1), representations.unsqueeze(0), dim=2)\n \n sim_ij = torch.diag(similarity_matrix, self.batch_size)\n sim_ji = torch.diag(similarity_matrix, -self.batch_size)\n positives = torch.cat([sim_ij, sim_ji], dim=0)\n \n nominator = torch.exp(positives / self.temperature)\n denominator = self.negatives_mask * torch.exp(similarity_matrix / self.temperature)\n \n loss_partial = -torch.log(nominator / torch.sum(denominator, dim=1))\n loss = torch.sum(loss_partial) / (2 * self.batch_size)\n return loss", "def vimage(cat1, cat2, dmax, psize, fwhm):\n\n NHALF = int(dmax/psize)\n NSIDE = 2*NHALF+1\n mshift = (NHALF+0.5)*psize\n img = np.zeros((NSIDE,NSIDE))\n x2s, y2s = cat2[:,0], cat2[:,1]\n for x1, y1 in cat1:\n ok = (x2s > x1-mshift) & (x2s < x1+mshift) & \\\n (y2s > y1-mshift) & (y2s < y1+mshift)\n for x2, y2 in cat2[ok]:\n ix = NHALF+int(round((x2-x1)/psize))\n iy = NHALF+int(round((y2-y1)/psize))\n img[iy,ix] += 1\n\n # smooth image\n img = gaussian_filter(img,fwhm/psize/2.3548,mode='constant')\n\n # identify maximum pixel\n ind = np.arange(NSIDE)\n ix, iy = np.meshgrid(ind, ind)\n peak = img == img.max()\n #if len(ix[peak]) > 1:\n # raise Exception(\"Found more than one maximum pixel\")\n\n # now have first approximation to the shift\n ixp = ix[peak][0]\n iyp = iy[peak][0]\n xp = psize*(ixp-NHALF)\n yp = psize*(iyp-NHALF)\n if ixp == 0 or ixp == NSIDE-1 or iyp == 0 or iyp == NSIDE-1:\n # max pixel at edge of array. Just return pixel position\n # as \"refined\" position\n xr = xp\n yr = yp\n\n else:\n # Make a quadratic approx to refine the peak position.\n # Estimate first and second partial derivatives from\n # 3x3 pixels centred on peak\n fx = (img[iyp,ixp+1] - img[iyp,ixp-1])/2.\n fy = (img[iyp+1,ixp] - img[iyp-1,ixp])/2.\n fxx = img[iyp,ixp-1] + img[iyp,ixp+1] - 2*img[iyp,ixp]\n fyy = img[iyp-1,ixp] + img[iyp+1,ixp] - 2*img[iyp,ixp]\n fxy = (img[iyp+1,ixp+1] + img[iyp-1,ixp-1] -\n img[iyp+1,ixp-1] - img[iyp-1,ixp+1])/4.\n b = np.array((fx,fy)).T\n A = np.array(((fxx,fxy),(fxy,fyy)))\n x = solve(A,b)\n xr = xp - psize*x[0]\n yr = yp - psize*x[1]\n return (img, xp,yp,xr,yr)", "def normalize(img):\r\n return ((img / 255.0) - 0.5) / 0.5", "def forward_train(self, imgs, label, token_ids=None, segment_ids=None, input_mask=None, \n mlm_label=None, dvae_imgs=None, v_token_mask=None, hog_features=None, img_metas=None, **kwargs): \n # (batch_size, num_clips*num_crops, channel, num_segments, h, w) -> (batch_size*num_clips*num_crops, channel, num_segments, h, w)\n imgs = imgs.reshape((-1, ) + imgs.shape[2:]) \n if self.from_scratch:\n imgs = imgs / 255.0\n # text reshape: (batch_size, num_candidates, seq_length) -> (batch_size * num_candidates, seq_length)\n token_ids = token_ids.reshape((-1, ) + token_ids.shape[2:])\n text_input_mask = input_mask.reshape((-1, ) + input_mask.shape[2:])\n if mlm_label is not None:\n mlm_label = mlm_label.reshape((-1, ) + mlm_label.shape[2:])\n\n\n visual_token = self.extract_visual_feat(imgs) # b, d, T, h, w\n\n B, D, T, H, W = visual_token.shape\n losses = dict()\n # -------------- nce loss ------------------- #\n if hasattr(self, 'ssl_head'):\n input_ssl_ids = torch.where(mlm_label == -100, token_ids.clone(), mlm_label.clone())\n input_ssl_mask = text_input_mask.clone()\n text_only_out = self.text_backbone(input_ssl_ids, input_ssl_mask)\n # ------------ complete T -------------- #\n text_out_no_mask = text_only_out['last_hidden_state']\n visual_emb, text_emb = self.ssl_head(visual_token, text_out_no_mask, input_ssl_mask, input_ssl_ids)\n\n\n # ------------ complete V ---------------- #\n visual_token = visual_token.view(B, D, T, -1).permute(0, 2, 3, 1)\n\n\n # ---------- foward mask text input ---------- # \n text_out_with_mask = self.text_backbone(token_ids, text_input_mask)\n text_out_last_hidden_state = text_out_with_mask['last_hidden_state']\n\n # ---------- forward mask v input ------------ #\n visual_token_with_mask, v_mask = self.extract_visual_feat(imgs.clone(), v_token_mask) # b, d, T, h, w\n visual_token_mask = visual_token_with_mask.view(B, D, T, -1).permute(0, 2, 3, 1)\n \n v_fusion_output = self.multimodal_backbone(visual_token=visual_token_mask, text_input_mask=text_input_mask.clone(), text_input_embeds=text_out_no_mask.clone())\n \n t_fusion_output = self.multimodal_backbone(visual_token=visual_token, text_input_mask=text_input_mask, text_input_embeds=text_out_last_hidden_state)\n # for mlm #\n t_last_hidden_state = t_fusion_output['t_last_hidden_state']\n\n\n\n\n\n # ------------ MLM loss ------------ #\n\n if mlm_label is not None and self.mlm_head is not None:\n # we use mask text for MLM\n # because we doubt there will be miss interaction between wrong img-text pair \n # and the model not learn good relationship between vision and language\n # -------- forward masked text ----------- #\n mlm_prediction_score = self.mlm_head(t_last_hidden_state)\n \n if self.mlm_loss_func is not None:\n mlm_label_idx = torch.where(mlm_label.view(-1) != -100)\n mlm_prediction_mask_score = mlm_prediction_score.view(-1, self.text_vocab_size)[mlm_label_idx[0], :]\n mlm_label_mask = mlm_label.view(-1)[mlm_label_idx]\n mlm_loss = self.mlm_loss_func(mlm_prediction_mask_score, mlm_label_mask)\n else:\n mlm_loss = self.loss_func(mlm_prediction_score.view(-1, self.text_vocab_size), mlm_label.view(-1))\n losses['mlm_loss'] = mlm_loss\n\n\n # ------- Tri-modal alignment with mask sample and ranking --------- #\n if self.mlm_ssl_V_head is not None:\n mlm_visual_feat = v_fusion_output['t_last_hidden_state'][:, 0]\n mask_visual_recon_emb = self.mlm_ssl_V_head(mlm_visual_feat)\n mask_word_emb = self.ssl_head.forward_text(text_out_last_hidden_state) if self.use_Cmask else None\n loss_cvt_rank = self.ssl_loss(visual_emb, text_emb, mask_word_emb, mask_visual_recon_emb)\n losses.update(loss_cvt_rank)\n\n\n if self.symmetry_rank:\n mlm_word_feat = t_last_hidden_state[:, 0]\n mask_word_recon_emb = self.mlm_ssl_T_head(mlm_word_feat)\n\n mask_visual_emb = self.ssl_head.forward_vision(visual_token_with_mask) if self.use_Cmask else None\n \n loss_ctv_rank = self.ssl_loss(text_emb, visual_emb, mask_visual_emb, mask_word_recon_emb)\n loss_ctv_rank['v_nce_loss'] = loss_ctv_rank.pop('nce_loss')\n \n if self.ssl_loss.use_rank:\n loss_ctv_rank['rank_v_vm_loss'] = loss_ctv_rank.pop('rank_t_tm_loss')\n\n \n\n losses.update(loss_ctv_rank)\n\n\n\n return losses", "def denormalize(img, dataset=\"imagenet\"):\r\n if dataset == \"cifar10\":\r\n c_std = [0.247, 0.243, 0.261]\r\n c_mean = [0.4914, 0.4822, 0.4466]\r\n elif dataset == \"imagenet\":\r\n c_std = [0.229, 0.224, 0.225]\r\n c_mean = [0.485, 0.456, 0.406]\r\n for i in [0, 1, 2]:\r\n img[i] = img[i] * c_std[i] + c_mean[i]\r\n return img", "def densenet121(growth_rate=32, compression=1.0):\n\n init = K.initializers.he_normal(seed=None)\n X = K.Input(shape=(224, 224, 3))\n\n batch_1 = K.layers.BatchNormalization()(X)\n activation_1 = K.layers.Activation('relu')(batch_1)\n\n conv_1 = K.layers.Conv2D(filters=64,\n kernel_size=7,\n strides=2,\n padding='same',\n kernel_initializer=init)(activation_1)\n max_pool = K.layers.MaxPooling2D(pool_size=[3, 3],\n strides=2,\n padding='same')(conv_1)\n\n ly_1, nb_filters1 = dense_block(max_pool, 64, growth_rate, 6)\n\n ly_2, nb_filters2 = transition_layer(ly_1, nb_filters1, compression)\n ly_3, nb_filters3 = dense_block(ly_2, nb_filters2, growth_rate, 12)\n\n ly_4, nb_filters4 = transition_layer(ly_3, nb_filters3, compression)\n ly_5, nb_filters5 = dense_block(ly_4, nb_filters4, growth_rate, 24)\n\n ly_6, nb_filters6 = transition_layer(ly_5, nb_filters5, compression)\n ly_7, nb_filters7 = dense_block(ly_6, nb_filters6, growth_rate, 16)\n\n avg_pool = K.layers.AveragePooling2D(pool_size=[7, 7],\n strides=7,\n padding='same')(ly_7)\n\n dense = K.layers.Dense(1000, activation='softmax',\n kernel_initializer=init)(avg_pool)\n\n model = K.models.Model(inputs=X, outputs=dense)\n return model", "def normalize(image):\n return image / 127.5 - 1.", "def predict(self, image, normalize=True):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = cv2.resize(\n image, (IMAGE_SIZE, IMAGE_SIZE), interpolation=cv2.INTER_CUBIC)\n \n # Facenet prewhiten\n mean = np.mean(image)\n std = np.std(image)\n std_adj = np.maximum(std, 1.0/np.sqrt(image.size))\n image = np.multiply(np.subtract(image, mean), 1/std_adj)\n \n images = [image]\n\n with tf.Session(graph=self.graph) as sess:\n # Get input and output tensors\n images_placeholder = tf.get_default_graph().get_tensor_by_name(\"input:0\")\n embeddings = tf.get_default_graph().get_tensor_by_name(\"embeddings:0\")\n phase_train_placeholder = tf.get_default_graph().get_tensor_by_name(\"phase_train:0\")\n\n # Run forward pass to calculate embeddings\n feed_dict = {images_placeholder: images,\n phase_train_placeholder: False}\n emb = sess.run(embeddings, feed_dict=feed_dict)\n\n if normalize:\n return emb[0, :].astype(np.float64) / np.linalg.norm(emb[0, :])\n\n else:\n return emb[0, :].astype(np.float64)", "def preprocess(self, resized_inputs):\n return (2.0 / 255.0) * resized_inputs - 1.0", "def problem4():\n\n # load image\n img = plt.imread(\"data/a1p4.png\")\n\n # create filters\n fx, fy = createfilters()\n\n # filter image\n imgx, imgy = filterimage(img, fx, fy)\n\n # show filter results\n fig = plt.figure()\n ax1 = fig.add_subplot(121)\n ax1.imshow(imgx, \"gray\", interpolation=\"none\")\n ax1.set_title(\"x derivative\")\n ax1.axis(\"off\")\n ax2 = plt.subplot(122)\n ax2.imshow(imgy, \"gray\", interpolation=\"none\")\n ax2.set_title(\"y derivative\")\n ax2.axis(\"off\")\n\n # show gradient magnitude\n plt.figure()\n plt.imshow(np.sqrt(imgx**2 + imgy**2), \"gray\", interpolation=\"none\")\n plt.axis(\"off\")\n plt.title(\"Derivative magnitude\")\n\n # threshold derivative\n threshold = 0.029750135 + 0.050867412\n # 0.029750135 is the mean value of gradient magnitude in all of image\n # 050867412 ist the standard deviation of gradient magnitude in all of image\n # with empirical rule in statistics, using the addition of mean value and standard deviation\n # could guarantee this threshold more than 68.27% of all gradient magnitude (1 sigma rule in Normal distribution)\n edges = detectedges(imgx,imgy,threshold)\n plt.figure()\n plt.imshow(edges > 0, \"gray\", interpolation=\"none\")\n plt.axis(\"off\")\n plt.title(\"Binary edges\")\n\n # non maximum suppression\n edges2 = nonmaxsupp(edges,imgx,imgy)\n plt.figure()\n plt.imshow(edges2 > 0, \"gray\", interpolation=\"none\")\n plt.axis(\"off\")\n plt.title(\"Non-maximum suppression\")\n\n plt.show()", "def pnet_process(self, image, height, width):\n image = cv2.resize(image, (width, height)).astype(np.float32)\n image[:, :, 0] -= self.mean[0]\n image[:, :, 1] -= self.mean[1]\n image[:, :, 2] -= self.mean[2]\n image *= self.scale_factor\n image = np.transpose(image, (2, 0, 1))\n image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))\n return image.copy()", "def softmax(Z):\r\n\r\n\r\n eZ = np.exp(Z)\r\n return eZ / np.sum(eZ, axis=0, keepdims=True)" ]
[ "0.68137693", "0.6105213", "0.58586067", "0.5774048", "0.5770297", "0.57465637", "0.5704582", "0.5688945", "0.56770426", "0.5661012", "0.5661012", "0.56132793", "0.5603475", "0.5600591", "0.55894667", "0.5575768", "0.5571582", "0.5547392", "0.55281556", "0.5469116", "0.54566944", "0.54355335", "0.54355335", "0.54164475", "0.53806317", "0.53728473", "0.5366576", "0.5364102", "0.536169", "0.5353351", "0.5349684", "0.53362316", "0.53362316", "0.53337", "0.5331769", "0.5330408", "0.5330208", "0.5317838", "0.5310139", "0.5308235", "0.52846247", "0.52775794", "0.5275427", "0.5240501", "0.5236898", "0.52350026", "0.51944953", "0.5192382", "0.5186044", "0.51721835", "0.5171314", "0.51689935", "0.516836", "0.5166756", "0.5165031", "0.5155482", "0.5155058", "0.51454", "0.5136948", "0.5134006", "0.5130955", "0.51269674", "0.5115474", "0.5111233", "0.5108805", "0.5108408", "0.5102296", "0.5101459", "0.5099728", "0.50914496", "0.5077858", "0.5077507", "0.50763476", "0.5076064", "0.5074886", "0.50708884", "0.50682753", "0.50647444", "0.5062022", "0.50482273", "0.50457615", "0.50398964", "0.5039263", "0.5033261", "0.5027837", "0.5026701", "0.5022848", "0.5021007", "0.50184745", "0.50178343", "0.5016252", "0.50148666", "0.5014181", "0.5010685", "0.49977297", "0.49976116", "0.49867433", "0.49865398", "0.4986303", "0.49847785" ]
0.5255144
43
CPU mnist test for TF Training Instance Type c5.4xlarge Given above parameters, registers a task with family named after this test, runs the task, and waits for the task to be stopped before doing teardown operations of instance and cluster.
def test_ecs_tensorflow_training_mnist_cpu(cpu_only, ecs_container_instance, tensorflow_training, training_cmd, ecs_cluster_name): instance_id, cluster_arn = ecs_container_instance ecs_utils.ecs_training_test_executor(ecs_cluster_name, cluster_arn, training_cmd, tensorflow_training, instance_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_mnist():\n env = os.environ.copy()\n if not \"CUDA_VISIBLE_DEVICES\" in env:\n env[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n subprocess.run(\n \"edflow -b template_tfe/config.yaml -t --max_batcher_per_epoch --num_epochs 1\",\n shell=True,\n check=True,\n env=env,\n )", "def run_sm_perf_test(image_uri, num_nodes, region):\n _, framework_version = get_framework_and_version_from_tag(image_uri)\n if framework_version.startswith(\"1.\"):\n pytest.skip(\"Skipping benchmark test on TF 1.x images.\")\n\n processor = \"gpu\" if \"gpu\" in image_uri else \"cpu\"\n device_cuda_str = (\n f\"{processor}-{get_cuda_version_from_tag(image_uri)}\" if processor == \"gpu\" else processor\n )\n\n ec2_instance_type = \"p3.16xlarge\" if processor == \"gpu\" else \"c5.18xlarge\"\n\n py_version = \"py2\" if \"py2\" in image_uri else \"py37\" if \"py37\" in image_uri else \"py3\"\n\n time_str = time.strftime(\"%Y-%m-%d-%H-%M-%S\")\n commit_info = os.getenv(\"CODEBUILD_RESOLVED_SOURCE_VERSION\")\n target_upload_location = os.path.join(\n BENCHMARK_RESULTS_S3_BUCKET,\n \"tensorflow\",\n framework_version,\n \"sagemaker\",\n \"training\",\n device_cuda_str,\n py_version,\n )\n training_job_name = f\"tf{framework_version[0]}-tr-bench-{device_cuda_str}-{num_nodes}-node-{py_version}-{commit_info[:7]}-{time_str}\"\n\n # Inserting random sleep because this test starts multiple training jobs around the same time, resulting in\n # a throttling error for SageMaker APIs.\n time.sleep(Random(x=training_job_name).random() * 60)\n\n test_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"resources\")\n venv_dir = os.path.join(test_dir, \"sm_benchmark_venv\")\n\n ctx = Context()\n\n with ctx.cd(test_dir), ctx.prefix(f\"source {venv_dir}/bin/activate\"):\n log_file = f\"results-{commit_info}-{time_str}-{framework_version}-{device_cuda_str}-{py_version}-{num_nodes}-node.txt\"\n run_out = ctx.run(\n f\"timeout 45m python tf_sm_benchmark.py \"\n f\"--framework-version {framework_version} \"\n f\"--image-uri {image_uri} \"\n f\"--instance-type ml.{ec2_instance_type} \"\n f\"--node-count {num_nodes} \"\n f\"--python {py_version} \"\n f\"--region {region} \"\n f\"--job-name {training_job_name}\"\n f\"2>&1 | tee {log_file}\",\n warn=True,\n echo=True,\n )\n\n if not (run_out.ok or run_out.return_code == 124):\n target_upload_location = os.path.join(target_upload_location, \"failure_log\")\n\n ctx.run(\n f\"aws s3 cp {os.path.join(test_dir, log_file)} {os.path.join(target_upload_location, log_file)}\"\n )\n\n LOGGER.info(f\"Test results can be found at {os.path.join(target_upload_location, log_file)}\")\n\n result_statement, throughput = _print_results_of_test(\n os.path.join(test_dir, log_file), processor\n )\n throughput /= num_nodes\n\n assert run_out.ok, (\n f\"Benchmark Test failed with return code {run_out.return_code}. \"\n f\"Test results can be found at {os.path.join(target_upload_location, log_file)}\"\n )\n\n threshold_table = (\n (\n TENSORFLOW_SM_TRAINING_CPU_1NODE_THRESHOLD\n if num_nodes == 1\n else TENSORFLOW_SM_TRAINING_CPU_4NODE_THRESHOLD\n )\n if processor == \"cpu\"\n else TENSORFLOW_SM_TRAINING_GPU_1NODE_THRESHOLD\n if num_nodes == 1\n else TENSORFLOW_SM_TRAINING_GPU_4NODE_THRESHOLD\n )\n threshold = get_threshold_for_image(framework_version, threshold_table)\n LOGGER.info(\n f\"tensorflow {framework_version} sagemaker training {device_cuda_str} {py_version} \"\n f\"imagenet {num_nodes} nodes Throughput: {throughput} images/sec, threshold: {threshold} images/sec\"\n )\n assert throughput > threshold, (\n f\"tensorflow {framework_version} sagemaker training {processor} {py_version} imagenet {num_nodes} nodes \"\n f\"Benchmark Result {throughput} does not reach the threshold {threshold}\"\n )", "def test_ecs_tensorflow_training_mnist_gpu(gpu_only, ecs_container_instance, tensorflow_training, training_cmd,\n ecs_cluster_name):\n instance_id, cluster_arn = ecs_container_instance\n\n num_gpus = ec2_utils.get_instance_num_gpus(instance_id)\n\n ecs_utils.ecs_training_test_executor(ecs_cluster_name, cluster_arn, training_cmd, tensorflow_training, instance_id,\n num_gpus=num_gpus)", "def meta_train(tasks, model, args, device, method='random', meta_iters=10000, num_updates=5, meta_batch_size=5):\n # Define logging\n os.makedirs(args.save_path, exist_ok=True)\n writer = SummaryWriter(\n os.path.join(args.save_path, 'runs', '{}'.format(datetime.now()).replace(\":\", \"_\")))\n\n header = ' Time Task Iteration Loss Accuracy'\n log_template = '{:>10} {:>25} {:10.0f} {:10.6f} {:10.6f}'\n test_template = 'Test mean: {}, Test std: {}'\n\n print(header)\n start = time.time()\n\n # Define optimizers, lr schedulers and loss function\n optimizer_bert = AdamW(params=model.proto_net.encoder.bert.parameters(), lr=args.bert_lr)\n optimizer = optim.Adam(params=chain(model.proto_net.encoder.mlp.parameters(),\n model.output_layer.parameters()),\n lr=args.lr)\n scheduler_bert = get_cosine_schedule_with_warmup(optimizer_bert, 200, meta_iters)\n scheduler = get_cosine_schedule_with_warmup(optimizer, 0, meta_iters)\n # ProtoNets always have CrossEntropy loss due to softmax output\n cross_entropy = nn.CrossEntropyLoss()\n\n print('Loading Tokenizer..')\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)\n special_tokens_dict = {'additional_special_tokens': [\"[MNT]\", \"[URL]\"]}\n\n num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)\n print('We have added', num_added_toks, 'tokens')\n model.proto_net.encoder.bert.resize_token_embeddings(len(tokenizer))\n # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.\n\n # setup task sampler and task model\n sampler = TaskSampler(tasks, method=method, custom_task_ratio=args.custom_task_ratio, supp_query_split=True)\n task_model = type(model)(args)\n task_model.proto_net.encoder.bert.resize_token_embeddings(len(tokenizer))\n\n iterations = 0\n # Iterate over the data\n train_iter = sampler.get_iter('train', tokenizer, batch_size=args.batch_size, shuffle=True)\n model.train()\n\n # setup validation task and episodes for evaluation\n val_task = get_validation_task(args)\n episodes = torch.load(args.episodes)\n\n # dummy data to overwrite old values of task model output layer\n dummy_w = torch.randn((args.mlp_dims[-1], 2))\n dummy_b = torch.randn(2)\n\n average_query_loss = 0\n best_query_loss = 1e+9\n best_test_mean = -1\n best_test_last = -1\n convergence_tolerance_cnt = 0\n # outer loop (meta-iterations)\n for i in range(meta_iters):\n grads = []\n task_losses_inner = {}\n task_accuracies_inner = {}\n task_losses_outer = {}\n task_accuracies_outer = {}\n # inner loop (sample different tasks)\n for task_sample in range(meta_batch_size):\n # clone original model\n task_model.proto_net.load_state_dict(model.proto_net.state_dict())\n task_model.initialize_classifier(nn.Parameter(dummy_w), nn.Parameter(dummy_b), hard_replace=True)\n task_model.to(device)\n task_model.train()\n\n # new optimizer for every new task model\n task_optimizer_bert = optim.SGD(params=task_model.proto_net.encoder.bert.parameters(), lr=args.bert_lr)\n task_optimizer = optim.SGD(params=chain(task_model.proto_net.encoder.mlp.parameters(),\n task_model.output_layer.parameters()),\n lr=args.inner_lr)\n\n # prepare support and query set\n batch = next(train_iter)\n support = batch[:3]\n query = batch[3:]\n\n # setup output layer (via meta-model's prototype network)\n proto_embeddings = model.proto_net(support[0].to(device), attention_mask=support[2].to(device))\n prototypes = model.proto_net.calculate_centroids((proto_embeddings, support[1]), sampler.get_num_classes())\n W, b = task_model.calculate_output_params(prototypes.detach())\n task_model.initialize_classifier(W, b)\n\n # train some iterations on support set\n for update in range(num_updates):\n task_optimizer_bert.zero_grad()\n task_optimizer.zero_grad()\n predictions = task_model(support[0].to(device), attention_mask=support[2].to(device))\n task_loss = cross_entropy(predictions, support[1].long().squeeze().to(device))\n task_loss.backward()\n task_optimizer.step()\n task_optimizer_bert.step()\n\n # record task losses and accuracies for logging\n task_losses_inner[sampler.get_name()] = task_loss.item()\n task_accuracies_inner[sampler.get_name()] = sampler.calculate_accuracy(predictions, support[1].to(device))\n\n # trick to add prototypes back to computation graph\n W = 2 * prototypes + (W - 2 * prototypes).detach()\n b = -prototypes.norm(dim=1)**2 + (b + prototypes.norm(dim=1)**2).detach()\n task_model.initialize_classifier(W, b, hard_replace=True)\n\n # calculate gradients for meta update on the query set\n predictions = task_model(query[0].to(device), attention_mask=query[2].to(device))\n query_loss = cross_entropy(predictions, query[1].long().squeeze().to(device))\n query_loss.backward()\n\n # record task losses and accuracies for logging\n task_losses_outer[sampler.get_name()] = query_loss.item()\n task_accuracies_outer[sampler.get_name()] = sampler.calculate_accuracy(predictions, query[1].to(device))\n average_query_loss += query_loss.item()\n\n # register W and b parameters again to avoid error in weight update\n W = nn.Parameter(W)\n b = nn.Parameter(b)\n task_model.initialize_classifier(W, b, hard_replace=True)\n\n # save gradients of first task model\n if task_sample == 0:\n for param in task_model.parameters():\n if param.requires_grad and param.grad is not None:\n grads.append(param.grad.clone())\n # add the gradients of all task samples\n else:\n p = 0\n for param in task_model.parameters():\n if param.requires_grad and param.grad is not None:\n grads[p] += param.grad.clone()\n p += 1\n\n # perform meta update\n # first load/add the calculated gradients in the meta-model\n # (already contains gradients from prototype calculation)\n p = 0\n for param in model.parameters():\n if param.requires_grad and param.grad is not None:\n param.grad += grads[p]\n p += 1\n # update model parameters according to the gradients from inner loop (clear gradients afterwards)\n optimizer.step()\n optimizer_bert.step()\n scheduler.step()\n scheduler_bert.step()\n optimizer.zero_grad()\n optimizer_bert.zero_grad()\n\n iterations += 1\n if iterations % args.log_every == 0:\n average_query_loss /= (args.log_every*meta_batch_size)\n iter_loss = sum(task_losses_outer.values()) / len(task_losses_outer.values())\n iter_acc = sum(task_accuracies_outer.values()) / len(task_accuracies_outer.values())\n writer.add_scalar('Meta_Average/Loss/outer'.format(sampler.get_name()), iter_loss, iterations)\n writer.add_scalar('Meta_Average/Accuracy/outer'.format(sampler.get_name()), iter_acc, iterations)\n for t in tasks:\n task_name = t.get_name()\n if task_name in task_losses_inner.keys():\n writer.add_scalar('{}/Loss/inner'.format(task_name), task_losses_inner[task_name], iterations)\n writer.add_scalar('{}/Accuracy/inner'.format(task_name), task_accuracies_inner[task_name], iterations)\n writer.add_scalar('{}/Loss/outer'.format(task_name), task_losses_outer[task_name], iterations)\n writer.add_scalar('{}/Accuracy/outer'.format(task_name), task_accuracies_outer[task_name], iterations)\n print(log_template.format(\n str(timedelta(seconds=int(time.time() - start))),\n sampler.get_name(),\n iterations,\n iter_loss,\n iter_acc))\n\n # save best snapshot\n if average_query_loss < best_query_loss:\n best_query_loss = average_query_loss\n average_query_loss = 0\n snapshot_prefix = os.path.join(args.save_path, 'best_query')\n snapshot_path = (\n snapshot_prefix +\n '_loss_{:.5f}_iter_{}_model.pt'\n ).format(best_query_loss, iterations)\n model.save_model(snapshot_path)\n # Keep only the best snapshot\n for f in glob.glob(snapshot_prefix + '*'):\n if f != snapshot_path:\n os.remove(f)\n\n # evaluate in k shot fashion\n if iterations % args.eval_every == 0:\n task_model.proto_net.load_state_dict(model.proto_net.state_dict())\n task_model.initialize_classifier(nn.Parameter(dummy_w), nn.Parameter(dummy_b), hard_replace=True)\n test_mean, test_std = k_shot_testing(task_model, episodes, val_task, device, num_updates=args.inner_updates,\n num_test_batches=args.num_test_batches)\n writer.add_scalar('{}/Acc'.format(val_task.get_name()), test_mean, iterations)\n writer.add_scalar('{}/STD'.format(val_task.get_name()), test_std, iterations)\n print(test_template.format(test_mean, test_std), flush=True)\n if test_mean > best_test_mean:\n best_test_mean = test_mean\n snapshot_prefix = os.path.join(args.save_path, 'best_test_{}'.format(val_task.get_name()))\n snapshot_path = (\n snapshot_prefix +\n '_acc_{:.5f}_iter_{}_model.pt'\n ).format(best_test_mean, iterations)\n model.save_model(snapshot_path)\n # Keep only the best snapshot\n for f in glob.glob(snapshot_prefix + '*'):\n if f != snapshot_path:\n os.remove(f)\n \n if test_mean > best_test_last:\n best_test_last = best_test_mean\n convergence_tolerance_cnt = 0\n else:\n convergence_tolerance_cnt += 1\n\n if convergence_tolerance_cnt == args.convergence_tolerance:\n break\n\n\n # saving redundant parameters\n # Save model checkpoints.\n if iterations % args.save_every == 0:\n iter_loss = sum(task_losses_outer.values()) / len(task_losses_outer.values())\n snapshot_prefix = os.path.join(args.save_path, 'snapshot')\n snapshot_path = (\n snapshot_prefix +\n '_iter_{}_loss_{}_model.pt'\n ).format(iterations, iter_loss)\n logging.debug('Saving model...')\n model.save_model(snapshot_path)\n # Keep only the last snapshot\n for f in glob.glob(snapshot_prefix + '*'):\n if f != snapshot_path:\n os.remove(f)\n\n writer.close()", "def run_time_operation(self, learning_option, cluster):\r\n\r\n # whether or not test procedure\r\n is_train = tf.placeholder_with_default(True, shape=())\r\n learning_option['is_train'] = is_train\r\n\r\n # get worker info: worker num, device type, device num\r\n device = self.get_attr('device')\r\n num = re.sub('[^0-9]', '', cluster.get('types')[device])\r\n type = cluster.get('types')[device].replace(str(num), '')\r\n\r\n # construct API\r\n def apiConstructor():\r\n # CIFAR-10 images: [224, 224, 3]\r\n # label: [1000]\r\n def train_in():\r\n x, y = learning_option.get('train_imagenet')\r\n return x, y\r\n def test_in():\r\n x, y = learning_option.get('test_imagenet')\r\n return x, y\r\n\r\n images, labels = tf.cond(is_train, train_in, test_in)\r\n # get output dimension\r\n outdim = list(images.get_shape()[i].value for i in xrange(len(images.get_shape())))\r\n\r\n # set output\r\n self.set_output('image', images)\r\n self.set_output('label', labels)\r\n self.set_dimension('image', outdim)\r\n\r\n # set tf summary\r\n tf.summary.image(self.name, images, max_outputs=10)\r\n\r\n with tf.variable_scope(self.name):\r\n # single node, model parallelism: explicit worker mapping\r\n # data parallelism: equally duplicate model\r\n if learning_option.get(\"parallel\", None) != \"DP\":\r\n with tf.device('/job:worker/task:{0}/{1}:{2}'.format(device, type, num)):\r\n apiConstructor()\r\n else:\r\n apiConstructor()", "def Test(self):\n print('Testing:')\n # set mode eval\n torch.cuda.empty_cache()\n self.network.eval()\n transform = transforms.Compose([Rescale(params.rescale_size),\n RandomCrop(params.image_size),\n \n ToTensor()\n ])\n dataset = Cityscapes(params.dataset_root, mode='test', transforms = transform)\n test_loader = DataLoader(dataset,\n batch_size=params.test_batch,\n shuffle=params.shuffle,\n num_workers=params.dataloader_workers)\n # prepare test data\n recal = 0\n precision = 0\n F_one = 0\n IOU = 0\n accuracy_new = 0\n test_size = 1124\n if test_size % self.params.test_batch != 0:\n total_batch = test_size // self.params.test_batch + 1\n else:\n total_batch = test_size // self.params.test_batch\n\n # test for one epoch\n for batch_idx, batch in enumerate(test_loader):\n self.pb.click(batch_idx, total_batch)\n image, label, name = batch['image'], batch['label'], batch['label_name']\n image_cuda, label_cuda = image.cuda(), label.cuda()\n pred = image_cuda\n pred = pred.to(torch.device(\"cpu\"))\n pred = pred.detach()\n img_grid = pred[0]\n #img_grid = torchvision.utils.make_grid(out) \n img_grid = img_grid.numpy().transpose(1, 2, 0)*255\n cv2.imwrite(\"/content/drive/My Drive/Test_images/original%d.jpg\" % batch_idx, img_grid)\n if self.params.should_split:\n image_cuda.requires_grad_()\n out = checkpoint_sequential(self.network, self.params.split, image_cuda)\n else:\n out = self.network(image_cuda)\n TP, FP, TN, FN = confusion(out, label_cuda)\n recal = recal+TP\n precision = precision+FP\n F_one = F_one +TN\n IOU = IOU+ FN \n _,predict = torch.max(out.data,1)\n predict = predict.to(torch.device(\"cpu\"))\n predict = predict.detach()\n img = predict[0]\n img = img.numpy()*255\n #img_grid = torchvision.utils.make_grid(out) \n cv2.imwrite(\"/content/drive/My Drive/Test_images/predict_label%d.png\" % batch_idx, img)\n label = label_cuda.to(torch.device(\"cpu\"))\n label = label.detach()\n label = label[0].numpy()*255\n cv2.imwrite(\"/content/drive/My Drive/Test_images/original_label%d.png\" % batch_idx, label)\n\n accuracy_final = accuracy(out, label_cuda)\n accuracy_new = accuracy_new + accuracy_final\n print(\"\\t\")\n print(recal/total_batch, precision/ total_batch, F_one/ total_batch, IOU/ total_batch)\n print(\"\\t\")\n print(accuracy_new/total_batch)", "def generate_mnist_datasets(\n datapoints_per_task,\n K_list,\n cir_inner_loop_list, \n test_task_idx, \n val_task_idx,\n n_finetune_sets):\n\n # arbitrarily chosen, class-imbalance rate in outer and inner training loops\n cir_outer_loop = 0.5\n cir_inner_loop = 0.5\n # class-imbalance rate in the test sets of the test and validation tasks\n cir_test = 0.5\n # arbitrarily chosen, percentage of data that will be used in the inner training loop\n percent_data_inner_loop = 0.5\n\n percent_data_finetune_val = 0.8\n\n n_test_set = 4000\n\n test_task_idx, val_task_idx = test_task_idx, val_task_idx\n\n finetune_sets_per_K_cir = {}\n test_task_test_set, val_task = {}, {}\n \n\n train_task_list_inner, train_task_list_outer = [], []\n\n train_tasks_idxs = [i for i in range(0,10) if i not in [val_task_idx, test_task_idx]]\n\n base_path = '/home/USER/Documents'\n if (not (os.path.exists(base_path))):\n base_path = '/home/ubuntu/Projects'\n train_images, train_labels = loadlocal_mnist(\n images_path= base_path + '/MAML/raw_data/MNIST_data/train-images-idx3-ubyte', \n labels_path= base_path + '/MAML/raw_data/MNIST_data/train-labels-idx1-ubyte')\n\n test_images, test_labels = loadlocal_mnist(\n images_path= base_path + '/MAML/raw_data/MNIST_data/t10k-images-idx3-ubyte', \n labels_path= base_path + '/MAML/raw_data/MNIST_data/t10k-labels-idx1-ubyte')\n\n\n train_images, test_images = train_images.reshape((-1,28,28))/255.0, test_images.reshape((-1,28,28))/255.0\n images = np.concatenate((train_images, test_images))\n labels = np.concatenate((train_labels, test_labels))\n\n test_task_normal_indexes, val_task_normal_indexes = list(np.nonzero(labels == test_task_idx)[0]), list(np.nonzero(train_labels == val_task_idx)[0])\n test_task_X_normal, val_task_X_normal = images[test_task_normal_indexes],train_images[val_task_normal_indexes]\n test_task_Y_normal, val_task_Y_normal = np.zeros_like(labels[test_task_normal_indexes]), np.zeros_like(train_labels[val_task_normal_indexes])\n\n\n # val and test task have anomalies (samples of other numbers) that are not used for training\n # besides the two sets of anomalies (one for val task and one for test task are disjoint)\n test_task_anomalous_indexes = list(np.nonzero(test_labels[:5000] != test_task_idx)[0])\n val_task_anomalous_indexes= [index for index, element in enumerate(list(test_labels[5000:])) if element not in [val_task_idx, test_task_idx]]\n\n\n test_task_X_anomalous, val_task_X_anomalous = test_images[:5000][test_task_anomalous_indexes],test_images[5000:][val_task_anomalous_indexes]\n test_task_Y_anomalous, val_task_Y_anomalous = np.ones_like(test_labels[:5000][test_task_anomalous_indexes]), np.ones_like(test_labels[5000:][val_task_anomalous_indexes])\n\n test_task_X, val_task_X = np.concatenate((test_task_X_normal, test_task_X_anomalous)), np.concatenate((val_task_X_normal, val_task_X_anomalous))\n test_task_Y, val_task_Y = np.expand_dims(np.concatenate((test_task_Y_normal, test_task_Y_anomalous)),-1), np.expand_dims(np.concatenate((val_task_Y_normal, val_task_Y_anomalous)),-1)\n\n\n train_tasks_X_list, train_tasks_Y_list = [], []\n for task_idx in train_tasks_idxs:\n train_task_normal_indexes = list(np.nonzero(train_labels == task_idx)[0]) \n train_task_anomalous_indexes = [index for index, element in enumerate(list(train_labels)) if element not in [task_idx, val_task_idx, test_task_idx]]\n assert(len(np.nonzero(train_labels[train_task_anomalous_indexes] == val_task_idx)[0]) == 0)\n assert(len(np.nonzero(train_labels[train_task_anomalous_indexes] == test_task_idx)[0]) == 0)\n train_task_X_normal, train_task_X_anomalous = train_images[train_task_normal_indexes], train_images[train_task_anomalous_indexes]\n train_task_Y_normal, train_task_Y_anomalous = np.zeros_like(train_labels[train_task_normal_indexes]), np.ones_like(train_labels[train_task_anomalous_indexes])\n train_task_X, train_task_Y = np.concatenate((train_task_X_normal, train_task_X_anomalous)), np.concatenate((train_task_Y_normal, train_task_Y_anomalous))\n train_tasks_X_list.append(train_task_X)\n train_tasks_Y_list.append(np.expand_dims(train_task_Y,-1))\n\n\n\n # building test task sets of data\n normal_indexes, anomaly_indexes = list(np.nonzero(test_task_Y == 0)[0]), list(np.nonzero(test_task_Y == 1)[0])\n n_test_set_normal = int(n_test_set*cir_test)\n test_set_normal_indexes = random.sample(normal_indexes, n_test_set_normal)\n test_set_anomaly_indexes = random.sample(anomaly_indexes, n_test_set - n_test_set_normal)\n test_set_indexes = []\n test_set_indexes += test_set_normal_indexes\n test_set_indexes += test_set_anomaly_indexes\n\n test_task_test_set['test_X'], test_task_test_set['test_Y'] = test_task_X[test_set_indexes], test_task_Y[test_set_indexes]\n\n\n #shuffle\n s_test = np.arange(test_task_test_set['test_X'].shape[0])\n np.random.shuffle(s_test)\n test_task_test_set['test_X'], test_task_test_set['test_Y'] = test_task_test_set['test_X'][s_test], test_task_test_set['test_Y'][s_test]\n\n rest_normal_indexes = [index for index in normal_indexes if index not in test_set_normal_indexes]\n rest_anomaly_indexes = [index for index in anomaly_indexes if index not in test_set_anomaly_indexes]\n\n\n for K in K_list:\n finetune_sets_per_cir = {}\n for cir in cir_inner_loop_list:\n\n rest_normal_indexes = [index for index in normal_indexes if index not in test_set_normal_indexes]\n rest_anomaly_indexes = [index for index in anomaly_indexes if index not in test_set_anomaly_indexes]\n \n finetune_sets_list = []\n\n disjoint = False\n if(cir*K*n_finetune_sets<len(rest_normal_indexes)):\n disjoint = True\n\n n_finetune_normal = int(K*cir)\n n_finetune_anomaly = K - n_finetune_normal\n for i in range(n_finetune_sets):\n # if enough for disjoint do that\n # else sample randomly\n # store in a dict with keys cir_K\n finetune_normal_indexes = random.sample(rest_normal_indexes, n_finetune_normal)\n finetune_anomaly_indexes = random.sample(rest_anomaly_indexes, n_finetune_anomaly)\n finetune_indexes = []\n finetune_indexes += finetune_normal_indexes\n finetune_indexes += finetune_anomaly_indexes\n finetune_set = {}\n finetune_set['finetune_X'], finetune_set['finetune_Y'] = test_task_X[finetune_indexes], test_task_Y[finetune_indexes]\n\n #shuffle\n s_finetune = np.arange(finetune_set['finetune_X'].shape[0])\n np.random.shuffle(s_finetune)\n finetune_set['finetune_X'], finetune_set['finetune_Y'] = finetune_set['finetune_X'][s_finetune], finetune_set['finetune_Y'][s_finetune]\n\n finetune_sets_list.append(finetune_set)\n \n if(disjoint):\n rest_normal_indexes = [index for index in rest_normal_indexes if index not in finetune_normal_indexes]\n rest_anomaly_indexes = [index for index in rest_anomaly_indexes if index not in finetune_anomaly_indexes]\n\n finetune_sets_per_cir[str(cir)] = finetune_sets_list\n\n finetune_sets_per_K_cir[str(K)] = finetune_sets_per_cir\n\n\n #building val task sets of data\n normal_indexes, anomaly_indexes = list(np.nonzero(val_task_Y == 0)[0]), list(np.nonzero(val_task_Y == 1)[0])\n n_val_finetune = int(percent_data_finetune_val*datapoints_per_task)\n n_val_test_set = datapoints_per_task - n_val_finetune\n n_val_test_set_normal = int(n_val_test_set*cir_test)\n val_test_set_normal_indexes = random.sample(normal_indexes, n_val_test_set_normal)\n\n\n val_test_set_anomaly_indexes = random.sample(anomaly_indexes, n_val_test_set - n_val_test_set_normal)\n val_test_set_indexes = []\n val_test_set_indexes += val_test_set_normal_indexes\n val_test_set_indexes += val_test_set_anomaly_indexes\n val_task['test_X'], val_task['test_Y'] = val_task_X[val_test_set_indexes], val_task_Y[val_test_set_indexes]\n\n\n rest_normal_indexes = [index for index in normal_indexes if index not in val_test_set_normal_indexes]\n rest_anomaly_indexes = [index for index in anomaly_indexes if index not in val_test_set_anomaly_indexes]\n\n n_val_finetune_normal = int(n_val_finetune*cir_inner_loop)\n val_finetune_normal_indexes = random.sample(rest_normal_indexes, n_val_finetune_normal)\n val_finetune_anomaly_indexes = random.sample(rest_anomaly_indexes, n_val_finetune - n_val_finetune_normal)\n val_finetune_indexes = []\n val_finetune_indexes += val_finetune_normal_indexes\n val_finetune_indexes += val_finetune_anomaly_indexes\n\n val_task['finetune_X'], val_task['finetune_Y'] = val_task_X[val_finetune_indexes], val_task_Y[val_finetune_indexes]\n\n #shuffle\n s_val_finetune = np.arange(val_task['finetune_X'].shape[0])\n s_val_test = np.arange(val_task['test_X'].shape[0])\n np.random.shuffle(s_val_finetune)\n np.random.shuffle(s_val_test)\n\n val_task['finetune_X'], val_task['finetune_Y'] = val_task['finetune_X'][s_val_finetune], val_task['finetune_Y'][s_val_finetune]\n val_task['test_X'], val_task['test_Y'] = val_task['test_X'][s_val_test], val_task['test_Y'][s_val_test]\n\n\n\n # building sets of data of the training tasks\n for task_X, task_Y in zip(train_tasks_X_list, train_tasks_Y_list):\n normal_indexes, anomaly_indexes = list(np.nonzero(task_Y == 0)[0]), list(np.nonzero(task_Y == 1)[0])\n\n n_inner_loop = int(percent_data_inner_loop*datapoints_per_task)\n n_inner_loop_normal = int(n_inner_loop*cir_inner_loop)\n n_outer_loop = datapoints_per_task - n_inner_loop\n n_outer_loop_normal = int(n_outer_loop*cir_outer_loop)\n \n inner_loop_normal_indexes = random.sample(normal_indexes, n_inner_loop_normal)\n inner_loop_anomaly_indexes = random.sample(anomaly_indexes, n_inner_loop - n_inner_loop_normal)\n inner_loop_indexes = []\n inner_loop_indexes += inner_loop_normal_indexes\n inner_loop_indexes += inner_loop_anomaly_indexes\n\n train_task_inner_X, train_task_inner_Y = task_X[inner_loop_indexes], task_Y[inner_loop_indexes]\n\n rest_normal_indexes = [index for index in normal_indexes if index not in inner_loop_normal_indexes]\n rest_anomaly_indexes = [index for index in anomaly_indexes if index not in inner_loop_anomaly_indexes]\n\n \n outer_loop_normal_indexes = random.sample(rest_normal_indexes, n_outer_loop_normal)\n outer_loop_anomaly_indexes = random.sample(rest_anomaly_indexes, n_outer_loop - n_outer_loop_normal)\n outer_loop_indexes = []\n outer_loop_indexes += outer_loop_normal_indexes\n outer_loop_indexes += outer_loop_anomaly_indexes\n\n train_task_outer_X, train_task_outer_Y = task_X[outer_loop_indexes], task_Y[outer_loop_indexes]\n\n\n s_inner = np.arange(train_task_inner_X.shape[0])\n s_outer = np.arange(train_task_outer_X.shape[0])\n np.random.shuffle(s_inner)\n np.random.shuffle(s_outer)\n train_task_list_inner.append([train_task_inner_X[s_inner],train_task_inner_Y[s_inner]])\n train_task_list_outer.append([train_task_outer_X[s_outer],train_task_outer_Y[s_outer]])\n\n\n\n train_tasks_inner_X = np.stack([train_task_list_inner[i][0]\n for i in range(len(train_task_list_inner))], 0)\n train_tasks_inner_Y = np.stack([train_task_list_inner[i][1]\n for i in range(len(train_task_list_inner))], 0)\n train_tasks_outer_X = np.stack([train_task_list_outer[i][0]\n for i in range(len(train_task_list_outer))], 0)\n train_tasks_outer_Y = np.stack([train_task_list_outer[i][1]\n for i in range(len(train_task_list_outer))], 0)\n\n \n train_tasks = {'X_train_inner': train_tasks_inner_X,\n 'Y_train_inner': train_tasks_inner_Y,\n 'X_train_outer': train_tasks_outer_X,\n 'Y_train_outer': train_tasks_outer_Y\n }\n\n\n return train_tasks, val_task, test_task_test_set, finetune_sets_per_K_cir", "def main(params):\n params = run_train.prepare_experiment_folder(params, FOLDER_EXPERIMENT)\n\n # run_train.check_pathes_patterns(paths)\n tl_expt.set_experiment_logger(params['path_expt'])\n logging.info('COMPUTER: \\n%r', platform.uname())\n logging.info(tl_expt.string_dict(params, desc='PARAMETERS'))\n\n tl_expt.create_subfolders(params['path_expt'], LIST_SUBFOLDER)\n\n path_csv = os.path.join(params['path_expt'], NAME_CSV_TRIPLES)\n df_paths = get_csv_triplets(\n params['path_list'], path_csv, params['path_images'], params['path_segms'], force_reload=FORCE_RERUN\n )\n\n dict_classif = seg_clf.load_classifier(params['path_classif'])\n params_clf = dict_classif['params']\n params_clf.update(params)\n logging.info(tl_expt.string_dict(params, desc='UPDATED PARAMETERS'))\n\n # perform on new images\n df_stat = pd.DataFrame()\n _wrapper_detection = partial(\n load_compute_detect_centers,\n params=params_clf,\n path_classif=params['path_classif'],\n path_output=params['path_expt'],\n )\n iterate = tl_expt.WrapExecuteSequence(_wrapper_detection, df_paths.iterrows(), nb_workers=params['nb_workers'])\n for dict_center in iterate:\n df_stat = df_stat.append(dict_center, ignore_index=True)\n df_stat.to_csv(os.path.join(params['path_expt'], NAME_CSV_TRIPLES_TEMP))\n\n df_stat.set_index(['image'], inplace=True)\n df_stat.to_csv(os.path.join(params['path_expt'], NAME_CSV_TRIPLES))\n logging.info('STATISTIC: \\n %r', df_stat.describe())", "def Run(benchmark_spec):\n _UpdateBenchmarkSpecWithFlags(benchmark_spec)\n vm = benchmark_spec.vms[0]\n\n if benchmark_spec.tpus:\n mnist_benchmark_script = 'mnist_tpu.py'\n mnist_benchmark_cmd = ('cd tpu/models && '\n 'export PYTHONPATH=$(pwd) && '\n 'cd official/mnist && '\n 'python {script} '\n '--data_dir={data_dir} '\n '--iterations={iterations} '\n '--model_dir={model_dir} '\n '--batch_size={batch_size}'.format(\n script=mnist_benchmark_script,\n data_dir=benchmark_spec.data_dir,\n iterations=benchmark_spec.iterations,\n model_dir=benchmark_spec.model_dir,\n batch_size=benchmark_spec.batch_size))\n else:\n mnist_benchmark_script = 'mnist.py'\n mnist_benchmark_cmd = ('cd models && '\n 'export PYTHONPATH=$(pwd) && '\n 'cd official/mnist && '\n 'python {script} '\n '--data_dir={data_dir} '\n '--model_dir={model_dir} '\n '--batch_size={batch_size} '.format(\n script=mnist_benchmark_script,\n data_dir=benchmark_spec.data_dir,\n model_dir=benchmark_spec.model_dir,\n batch_size=benchmark_spec.batch_size))\n\n if nvidia_driver.CheckNvidiaGpuExists(vm):\n mnist_benchmark_cmd = '{env} {cmd}'.format(\n env=tensorflow.GetEnvironmentVars(vm), cmd=mnist_benchmark_cmd)\n samples = []\n metadata = CreateMetadataDict(benchmark_spec)\n\n if benchmark_spec.train_steps > 0:\n if benchmark_spec.tpus:\n tpu = benchmark_spec.tpu_groups['train'].GetName()\n num_shards = '--num_shards={}'.format(\n benchmark_spec.tpu_groups['train'].GetNumShards())\n else:\n tpu = num_shards = ''\n\n if benchmark_spec.tpus:\n mnist_benchmark_train_cmd = (\n '{cmd} --tpu={tpu} --use_tpu={use_tpu} --train_steps={train_steps} '\n '{num_shards} --noenable_predict'.format(\n cmd=mnist_benchmark_cmd,\n tpu=tpu,\n use_tpu=bool(benchmark_spec.tpus),\n train_steps=benchmark_spec.train_steps,\n num_shards=num_shards))\n else:\n mnist_benchmark_train_cmd = (\n '{cmd} --train_epochs={train_epochs} '.format(\n cmd=mnist_benchmark_cmd,\n train_epochs=benchmark_spec.train_epochs))\n\n start = time.time()\n stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_train_cmd)\n elapsed_seconds = (time.time() - start)\n samples.extend(MakeSamplesFromTrainOutput(\n metadata, stdout + stderr, elapsed_seconds, benchmark_spec.train_steps))\n\n if benchmark_spec.eval_steps > 0:\n if benchmark_spec.tpus:\n mnist_benchmark_eval_cmd = (\n '{cmd} --tpu={tpu} --use_tpu={use_tpu} --eval_steps={eval_steps}'\n .format(\n cmd=mnist_benchmark_cmd,\n use_tpu=bool(benchmark_spec.tpus),\n tpu=benchmark_spec.tpu_groups['eval'].GetName(),\n eval_steps=benchmark_spec.eval_steps))\n else:\n mnist_benchmark_eval_cmd = ('{cmd} --eval_steps={eval_steps}'.format(\n cmd=mnist_benchmark_cmd, eval_steps=benchmark_spec.eval_steps))\n\n stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_eval_cmd)\n samples.extend(MakeSamplesFromEvalOutput(metadata, stdout + stderr,\n elapsed_seconds))\n return samples", "def main():\n test_runner = TestRunner(\n FLAGS.workspace, FLAGS.bench_home, imagenet_dir=FLAGS.train_data_dir)\n test_runner.run_tests(FLAGS.test_list.split(','))", "def train_distributed():\n # Distributed stuff learnt from this repo: https://github.com/GoogleCloudPlatform/cloudml-dist-\n # mnist-example/blob/master/trainer/task.py\n\n # For Distributed TensorFlow\n env = json.loads(os.environ.get('TF_CONFIG', '{}'))\n cluster_info = env.get('cluster')\n cluster_spec = tf.train.ClusterSpec(cluster_info)\n task_info = env.get('task')\n job_name, task_index = task_info['type'], task_info['index']\n\n device_fn = tf.train.replica_device_setter(\n cluster=cluster_spec,\n worker_device='/job:%s/task:%d' % (job_name, task_index))\n\n print(\"Start job:%s, index:%d\" % (job_name, task_index))\n\n server = tf.train.Server(cluster_spec,\n job_name=job_name, task_index=task_index)\n\n # Start a parameter server node\n if job_name == 'ps':\n server.join()\n\n # Start a master/worker node\n if job_name == 'master' or job_name == 'worker':\n is_chief = (job_name == 'master')\n\n with tf.Graph().as_default() as graph: # TODO necessary?\n with tf.device(device_fn):\n # Prepare the data\n train_data, test_data, embeddings_file = prepare_data()\n\n # Create the model\n print(\"(%s,%d) Creating %d layers of %d units.\" %\n (job_name, task_index, FLAGS.num_layers, FLAGS.size))\n model = create_model(False)\n\n # Create train_dir\n if is_chief:\n if not tf.gfile.Exists(FLAGS.train_dir):\n tf.gfile.MkDir(FLAGS.train_dir)\n\n # TensorBoard summaries\n (test_loss, test_perplexity, bucket_loss_placeholders,\n bucket_perplexity_placeholders, summary, summary_writer) = create_summary_objects(graph)\n\n # Create supervisor\n init_op = tf.global_variables_initializer()\n\n # Create Supervisor. Disabling checkpoints and summaries, because we do that manually\n sv = tf.train.Supervisor(is_chief=is_chief, logdir=FLAGS.train_dir, init_op=init_op,\n init_fn=lambda session: after_init(session, model, embeddings_file),\n saver=model.saver, global_step=model.global_step,\n save_model_secs=0, save_summaries_secs=0, summary_op=None,\n summary_writer=None)\n\n with sv.managed_session(server.target) as sess:\n train(sess, model, train_data, test_data, summary, summary_writer, test_loss,\n test_perplexity, bucket_loss_placeholders, bucket_perplexity_placeholders,\n is_chief, job_name, task_index, sv.should_stop)\n sv.stop()", "def run_test(filepath):\n num_class = 120 # dogbreeds class\n model = Resnet50MO(num_class, checkpoint_path=None)\n\n # image settings\n crop_size = model.input_size\n scale_size = model.input_size\n input_size = model.input_size\n input_mean = model.input_mean\n input_std = model.input_std\n\n # hyperparams settings\n epochs = 1\n batch_size = 32 # mini-batch-size\n learning_rate = 0.01\n momentum = 0.5\n decay_factor = 10\n eval_freq = 5 # in epochs\n\n # data generator settings: dataset and dataloader\n train_dataset = DogImageset(filepath, input_size,\n input_mean=input_mean, input_std=input_std)\n val_dataset = DogImageset(filepath, input_size,\n input_mean=input_mean, input_std=input_std)\n \n train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)\n val_loader = DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=False)\n\n # Loss and backprop settings\n model.cuda()\n criterion = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(\n model.parameters(),\n lr=learning_rate,\n momentum=momentum\n )\n\n run_model_train_test(model, train_loader, criterion, optimizer)", "def run_universal_demo(args, use_gpu: bool = True) -> None:\n if \"scannet\" in args.dataset:\n args.img_name_unique = False\n else:\n args.img_name_unique = True\n\n args.u_classes = names_utils.get_universal_class_names()\n args.print_freq = 10\n\n args.split = \"test\"\n # os.environ[\"CUDA_VISIBLE_DEVICES\"] = ','.join(str(x) for x in args.test_gpu)\n logger.info(args)\n logger.info(\"=> creating model ...\")\n args.num_model_classes = len(args.u_classes)\n\n itask = InferenceTask(\n args,\n base_size=args.base_size,\n crop_h=args.test_h,\n crop_w=args.test_w,\n input_file=args.input_file,\n model_taxonomy=\"universal\",\n eval_taxonomy=\"universal\",\n scales=args.scales,\n )\n itask.execute()", "def backend_train_test_loop(e=None, timeout=30,\n is_compute_contributivity='True',\n is_parallelize=''):\n if is_parallelize == '':\n is_parallelize = None\n else:\n is_parallelize = strtobool(is_parallelize)\n\n from databoard.db_tools import backend_train_test_loop\n is_compute_contributivity = strtobool(is_compute_contributivity)\n backend_train_test_loop(\n e, timeout, is_compute_contributivity, is_parallelize)", "def test_training():\n config = SmartDict()\n\n config.NETWORK_CLASS = LMBiSeNet\n config.DATASET_CLASS = DummyCamvid\n\n config.IS_DEBUG = False\n config.IMAGE_SIZE = [128, 160]\n config.BATCH_SIZE = 2\n config.TEST_STEPS = 1\n config.MAX_STEPS = 2\n config.SAVE_CHECKPOINT_STEPS = 1\n config.KEEP_CHECKPOINT_MAX = 5\n config.SUMMARISE_STEPS = 1\n config.IS_PRETRAIN = False\n config.TASK = Tasks.SEMANTIC_SEGMENTATION\n\n # network model config\n config.NETWORK = SmartDict()\n config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer\n config.NETWORK.OPTIMIZER_KWARGS = {\"learning_rate\": 0.001}\n config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE\n config.NETWORK.BATCH_SIZE = config.BATCH_SIZE\n config.NETWORK.DATA_FORMAT = \"NHWC\"\n\n # daasegt config\n config.DATASET = SmartDict()\n config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE)\n config.DATASET.BATCH_SIZE = config.BATCH_SIZE\n config.DATASET.DATA_FORMAT = \"NHWC\"\n\n environment.init(\"test_lm_bisenet\")\n prepare_dirs(recreate=True)\n start_training(config, profile_step=1)", "def run_test():\n # Get the sets of images and labels for training, validation, and\n # test on MNIST.\n train ,validation,test = datasets_mnist.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data)\n # Tell TensorFlow that the model will be built into the default Graph.\n with tf.Graph().as_default():\n # Generate placeholders for the images and labels.\n images_placeholder, labels_placeholder, phase_pl = placeholder_inputs(\n FLAGS.batch_size)\n\n # Build a Graph that computes predictions from the inference model.\n logits = mnist.inference(images_placeholder,\n FLAGS.hidden1,\n FLAGS.hidden2, \n phase_pl)\n\n eval_correct = mnist.evaluation(logits, labels_placeholder)\n # Add the variable initializer Op.\n all_variable = tf.global_variables()\n \n # Create a saver for writing training checkpoints.\n saver = tf.train.Saver()\n\n # Create a session for running Ops on the Graph.\n with tf.Session() as sess:\n\n saver.restore(sess, \"log/model.ckpt-1999\")\n for variable in all_variable:\n if \"moving\" in variable.name:\n print(variable.name, variable.eval())\n do_eval(sess,\n eval_correct,\n images_placeholder,\n labels_placeholder,\n phase_pl,\n test)", "def run_custom_training_tests():\n test_custom_training()\n test_custom_distributed_training()\n test_custom_multimodel_training()\n test_custom_distributed_multimodel_training()", "def run_task(data_dir, task_id):\n print(\"Train and test for task %d ...\" % task_id)\n\n print(\"We are going to use this\")\n \n\n # Parse data\n train_files = glob.glob('%s/qa3_*_train.txt' % (data_dir, task_id))\n test_files = glob.glob('%s/qa3_*_test.txt' % (data_dir, task_id))\n\n dictionary = {\"nil\": 0}\n train_story, train_questions, train_qstory = parse_babi_task(train_files, dictionary, False)\n test_story, test_questions, test_qstory = parse_babi_task(test_files, dictionary, False)\n\n general_config = BabiConfig(train_story, train_questions, dictionary)\n\n\n # #### R: this line build a empty model to train\n # memory, model, loss = build_model(general_config)\n\n # if general_config.linear_start:\n # train_linear_start(train_story, train_questions, train_qstory, memory, model, loss, general_config)\n # else:\n # train(train_story, train_questions, train_qstory, memory, model, loss, general_config)\n\n\n\n # memory, model, loss = build_model(general_config)\n\n # this line\n test(test_story, test_questions, test_qstory, memory, model, loss, general_config)", "def main():\n grid_tester_cpu = GridTesterCPU()\n\n # parse args, load configuration and create all required objects.\n grid_tester_cpu.setup_grid_experiment()\n\n # GO!\n grid_tester_cpu.run_grid_experiment()", "def test_multitask_early_fusion(self):\n args = BASE_ARGS.copy()\n args.update(MULTITASK_ARGS)\n args.update(EARLY_FUSION_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 5.0, 'failed to train image_seq2seq on image+text task'\n )", "def test_ecs_tensorflow_training_fasterrcnn_gpu(gpu_only, ecs_container_instance, tensorflow_training, training_cmd,\n ecs_cluster_name):\n instance_id, cluster_arn = ecs_container_instance\n\n num_gpus = ec2_utils.get_instance_num_gpus(instance_id)\n\n ecs_utils.ecs_training_test_executor(ecs_cluster_name, cluster_arn, training_cmd, tensorflow_training, instance_id,\n num_gpus=num_gpus)", "def test_multitask(self):\n args = BASE_ARGS.copy()\n args.update(MULTITASK_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 5.0, 'failed to train image_seq2seq on image+text task'\n )", "def evaluate(sess, images_ph, labels_ph, softmax, mnist, config, task):\n\n print 'Evaluating on {} task ({}x{}, {} distractors) using {} glimpses (at {} scales)'.format(\n task, config.new_size, config.new_size, config.n_distractors,\n config.num_glimpses, config.n_patches)\n\n # Evaluation\n test_acc = []\n val_acc = []\n\n for k, dataset in enumerate([mnist.validation, mnist.test]):\n\n steps_per_epoch = dataset.num_examples // config.eval_batch_size\n correct_cnt = 0\n num_samples = steps_per_epoch * config.batch_size\n # loc_net.sampling = True\n\n for test_step in tqdm(xrange(steps_per_epoch)):\n\n images, labels = dataset.next_batch(config.batch_size)\n images = images.reshape((-1, config.original_size, config.original_size, 1))\n labels_bak = labels\n\n if task == 'translated':\n images = translate(images, width=config.new_size, height=config.new_size)\n elif task == 'cluttered':\n images = clutter(images,\n dataset.images.reshape((-1, config.original_size, config.original_size, 1)),\n width=config.new_size, height=config.new_size, n_patches=config.n_distractors\n )\n elif task == 'cluttered_var':\n images, _, _, _ = clutter_rnd(images,\n train_data=dataset.images.reshape(\n (-1, config.original_size, config.original_size, 1)),\n lim=config.distractor_range,\n color_digits=config.color_digits,\n color_noise=config.color_noise,\n width=config.new_size, height=config.new_size, norm=True)\n\n # else:\n # print 'original mnist data ({}x{}).'.format(config.original_size,config.original_size)\n\n # Duplicate M times (average prediction over M repeats)\n images = np.tile(images, [config.M, 1, 1, 1])\n labels = np.tile(labels, [config.M])\n\n softmax_val = sess.run(softmax,\n feed_dict={\n images_ph: images,\n labels_ph: labels\n })\n softmax_val = np.reshape(softmax_val,\n [config.M, -1, config.num_classes])\n softmax_val = np.mean(softmax_val, 0)\n\n pred_labels_val = np.argmax(softmax_val, 1)\n correct_cnt += np.sum(pred_labels_val == labels_bak)\n acc = correct_cnt / float(num_samples)\n\n if k == 0:\n print '\\nVal accuracy\\t{:4.4f} ({:4.4f} error)'.format(100 * acc, 100 - 100 * acc)\n val_acc = acc\n else:\n print 'Test accuracy\\t{:4.4f} ({:4.4f} error)\\n'.format(100 * acc, 100 - 100 * acc)\n test_acc = acc\n\n return test_acc, val_acc", "def do_testing(gpu=0):\n # expected environment variables\n os.environ[\"BERT_BASE_DIR\"] = \"pretrained/cased_L-12_H-768_A-12\"\n os.environ[\"DATA_DIR\"] = \"dataset\"\n os.environ[\"OUTPUT_DIR\"] = \"output\"\n assert os.environ.get(\"BERT_BASE_DIR\") is not None\n assert os.environ.get(\"DATA_DIR\") is not None\n assert os.environ.get(\"OUTPUT_DIR\") is not None\n\n # set the gpu index\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpu)\n # set the required flags\n FLAGS.task_name = \"topic\"\n FLAGS.do_predict = True\n FLAGS.data_dir = os.environ.get(\"DATA_DIR\")\n FLAGS.vocab_file = os.path.join(os.environ.get(\"BERT_BASE_DIR\"), \"vocab.txt\")\n FLAGS.bert_config_file = os.path.join(os.environ.get(\"BERT_BASE_DIR\"), \"bert_config.json\")\n FLAGS.init_checkpoint = os.path.join(os.environ.get(\"BERT_BASE_DIR\"), \"bert_model.ckpt\")\n FLAGS.do_lower_case = False\n FLAGS.max_seq_length = 128\n FLAGS.output_dir = os.environ.get(\"OUTPUT_DIR\")\n\n run_classifier.main(0)", "def run_experiments() :\n #%%\n target_size=(32,32)\n g_specs = {\n \"batch_size\" : [ 30 , 60, 100 ],\n \"learning_rate\" : [ 0.0002, 0.0003, 0.0005 ],\n \"drop_out_rate\" : [ 0.2, 0.25, 0.3 ],\n \"rescale_mode\" : [ \"max_q\" , \"max\", \"\" ]\n }\n\n model_traits = MODEL_TRAITS[\"model2\"].copy()\n tt_obj = model_traits[\"trainer_tester_class\"]( model_traits )\n del model_traits[\"trainer_tester_class\"]\n\n cnt = 0\n for batchs, lrate, do_rate, resc_mode in product( g_specs[\"batch_size\"],\n g_specs[\"learning_rate\"],\n g_specs[\"drop_out_rate\"],\n g_specs[\"rescale_mode\"] ) :\n\n tt_obj.model_traits.update( {\"batch_size\" : batchs,\n \"learning_rate\" : lrate,\n \"rescale_mode\" : resc_mode,\n \"drop_out_rate\" : do_rate } )\n\n train_4d, train_gt = tu.make_4d_arrays( images_dir=\"images/train\",\n target_size=target_size )\n\n test_4d, test_gt = tu.make_4d_arrays( images_dir=\"images/test\",\n target_size=target_size )\n\n data = {\"train_4d\" : train_4d,\n \"test_4d\" : test_4d,\n \"train_y\" : train_gt,\n \"test_y\" : test_gt}\n\n valid_accu_log, train_accu_log = tt_obj.train( model_traits, data,\n logl=100 )\n idx_v = int(np.argmax( valid_accu_log))\n idx_t = int(np.argmax( train_accu_log))\n\n model_traits.update({\"valid_accu_log\" : valid_accu_log,\n \"train_accu_log\" : train_accu_log,\n \"best_valid\" : max(valid_accu_log),\n \"best_valid_at\" : idx_v,\n \"train_at_best_valid\" : train_accu_log[idx_v],\n \"best_train\" : max(train_accu_log),\n \"best_train_at\": idx_t })\n\n #print(cnt, pformat(model_traits) )\n print( \"%d : best_train = %.4f, best_valid = %.4f\" % \\\n (cnt, max(train_accu_log), max(valid_accu_log) ))\n\n with open( \"exp_results_%d.json\" % cnt,\n \"wt\" , encoding=\"utf8\" ) as f_out :\n print( json.dumps( model_traits ), file=f_out)\n\n\n cnt += 1\n #%%", "def run_mnist(flags_obj):\n model_function = model_fn\n config = tf.estimator.RunConfig(protocol='grpc+verbs',\n save_checkpoints_secs=300,\n save_summary_steps=200,\n log_step_count_steps=200)\n data_format = flags_obj.data_format\n if data_format is None:\n data_format = ('channels_first'\n if tf.test.is_built_with_cuda() else 'channels_last')\n mnist_classifier = tf.estimator.Estimator(\n model_fn=model_function,\n model_dir=flags_obj.model_dir,\n config=config,\n params={\n 'data_format': data_format,\n })\n\n # Set up training and evaluation input functions.\n def train_input_fn():\n \"\"\"Prepare data for training.\"\"\"\n # When choosing shuffle buffer sizes, larger sizes result in better\n # randomness, while smaller sizes use less memory. MNIST is a small\n # enough dataset that we can easily shuffle the full epoch.\n ds = dtrain(flags_obj.data_dir)\n ds = ds.cache().shuffle(buffer_size=50000).batch(flags_obj.batch_size)\n\n # Iterate through the dataset a set number (`epochs_between_evals`) of times\n # during each training session.\n ds = ds.repeat()\n return ds\n\n def eval_input_fn():\n return dtest(flags_obj.data_dir).batch(\n 100).make_one_shot_iterator().get_next()\n\n \n train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=flags_obj.train_steps)\n eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn,throttle_secs=300)\n tf.estimator.train_and_evaluate(mnist_classifier, train_spec, eval_spec)\n \n '''# Train and evaluate model.\n for _ in range(flags_obj.train_epochs // flags_obj.epochs_between_evals):\n mnist_classifier.train(input_fn=train_input_fn, hooks=train_hooks)\n eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)\n print('\\nEvaluation results:\\n\\t%s\\n' % eval_results)\n\n if model_helpers.past_stop_threshold(flags_obj.stop_threshold,\n eval_results['accuracy']):\n break\n '''\n # Export the model\n if flags_obj.export_dir is not None:\n image = tf.placeholder(tf.float32, [None, 28, 28])\n input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({\n 'image': image,\n })\n mnist_classifier.export_savedmodel(flags_obj.export_dir, input_fn)", "def test_image_task_early_fusion(self):\n args = BASE_ARGS.copy()\n args.update(IMAGE_ARGS)\n args.update(EARLY_FUSION_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 8.6, 'failed to train image_seq2seq on image task'\n )", "def main(_):\n\n params = create_params()\n\n assert params[\"train_dataset_path\"]\n assert params[\"eval_dataset_path\"]\n\n input_fn = input_fn_from_files(\n params[\"train_dataset_path\"])\n eval_input_fn = input_fn_from_files(\n params[\"eval_dataset_path\"])\n\n feature_columns = create_feature_columns(params)\n\n model_fn = create_model_fn(feature_columns)\n estimator = create_tpu_estimator(model_fn, feature_columns, params)\n\n for cycle_index in range(params[\"train_epochs\"]):\n tf.logging.info(\"Starting a training cycle: {}/{}\".format(\n cycle_index + 1, params[\"train_epochs\"]))\n estimator.train(input_fn=input_fn, steps=params[\"steps_per_epoch\"])\n tf.logging.info(\"Beginning evaluation.\")\n eval_results = estimator.evaluate(eval_input_fn,\n steps=params[\"num_eval_steps\"])\n tf.logging.info(\"Evaluation complete.\")\n\n recall_1 = float(eval_results[\"recall@1\"])\n recall_5 = float(eval_results[\"recall@5\"])\n loss = float(eval_results[\"loss\"])\n tf.logging.info(\n \"Iteration {}: recall@1 = {:.4f}, recall@5 = {:.4f}, Loss = {:.4f}\"\n .format(cycle_index + 1, recall_1, recall_5, loss))", "def task():\n\n\tprint('Example task executed.')", "def run_all_tasks(data_dir):\n print(\"Training and testing for all tasks ...\")\n for t in range(20):\n run_task(data_dir, task_id=t + 1)", "def main(seed, filter_, num_classes, setup, model_name, images_dir, precision_mode, test):\n f1, f2 = filter_\n model_name = 'flex_random_seed_{}_resnet_manual_highres_center_only_f1_{}_f2_{}'.format(seed, f1, f2)\n frozen_graph_filepath = './Models/Frozen_graphs/{}_{}/'.format(f1,f2) + model_name + '_frozen_graph.pb'\n frozen_graph, x_tensor, y_tensor = trt_frozen_graph_and_tensors(\n model_name=model_name, \n frozen_graph_filepath=frozen_graph_filepath, \n precision_mode=precision_mode\n )\n\n elapsed_time_full_dataset = []\n sum_of_confusion_matrices = np.zeros((6, 6))\n \n with tf.compat.v1.Session(graph=frozen_graph) as sess:\n for image_file in [img for img in os.listdir(images_dir) if img.endswith('.JPG')]:\n\n img = Image.open(images_dir + image_file)\n sx,sy = img.size\n\n print(\"Image size is %i x %i\" % (sx,sy)) # sx = 4912, sy = 3264\n print(\"Loading image %s\" % image_file)\n\n img_np = np.array(img)/255.0\n del img\n\n print(\"Predicting for image %s (%i x %i pixel)\" % (image_file,sx,sy))\n\n start = time.time()\n predictions_flex = sess.run(y_tensor, feed_dict={x_tensor:np.expand_dims(img_np, 0)})\n elapsed = time.time() - start\n elapsed_time_full_dataset.append(elapsed)\n del img_np #deleting afterwards to not take the deleting time into account\n\n print(\"Prediction took %f seconds (inference on full image)\" % elapsed)\n print(\"Merging predictions\")\n # merge the predictions on the quarter images\n predictions_flex_combined = np.zeros(predictions_flex.shape)\n\n elapsed = time.time()-start\n if embedded_version:\n print(\"Prediction took %f seconds (inference on split up image)\" % elapsed)\n\n if embedded_version:\n predictions_flex = predictions_flex_combined\n\n if save_annotations:\n print(\"Computing annotations...\")\n annotations = []\n d = 4\n for x in range(100, sx-101, d):\n for y in range(100, sy-101, d):\n x0 = int(round(float(x-100)/4) + 15)\n y0 = int(round(float(y-100)/4) + 15)\n probs_flex = np.squeeze(predictions_flex[0, y0, x0, :])\n annotations.append((probs_flex, x, y))\n\n if test: # add a prefix for test to not replace real experiments\n model_name = 'TEST_' + model_name\n\n # saving annotations\n annotation_dir = images_dir.replace('Data', 'Results/seeds/annotations_trt') + image_file\n annotate_and_save(annotations, d, annotation_dir, model_name, precision_mode)\n classes_image = annotate_and_save_per_class(\n annotations, \n d, \n annotation_dir, \n model_name, \n precision_mode\n )\n\n labels = load_labels(annotation_dir)\n confusion_matrix = np.zeros((num_classes, num_classes))\n for (c_name, x, y) in labels:\n if 100 <= x < sx-101 and 100 <= y < sy-101:\n x0 = int(round(float(x-100)/4) + 15 )\n y0 = int(round(float(y-100)/4) + 15)\n probs_flex = np.squeeze(predictions_flex[0, y0, x0, :])\n\n predicted_class = np.argmax(probs_flex)\n c = train_model.get_classes().index(c_name)\n confusion_matrix[c, predicted_class] += 1\n print(confusion_matrix)\n sum_of_confusion_matrices += confusion_matrix\n\n print(sum_of_confusion_matrices)\n sum_of_cm_fp = './Results/seeds/preds_trt/{}/{}_{}/sum_of_cm_'\\\n .format(precision_mode.lower(), f1,f2) + model_name + '_fp32.npy'\n elapsed_time_fp = './Results/seeds/elapsed_trt/{}/{}_{}/time_taken_'\\\n .format(precision_mode.lower(), f1,f2) + model_name + '_fp32.npy'\n\n\n np.save(sum_of_cm_fp, sum_of_confusion_matrices)\n np.save(elapsed_time_fp, elapsed_time_full_dataset)\n tf.reset_default_graph()", "def run_experiment(hparams):\n\n data_file_name = build_data_file_name(hparams.pair, hparams.time_interval, hparams.data_period)\n\n df = data_pre_processing(data_file_name, hparams.path_to_archives, hparams.path_to_data_dir)\n\n rows = df.shape[0]\n\n train, test = prepare_data(df[rows - 100:rows], hparams.feature_window, hparams.label_window)\n\n print(\"train:{}\".format(train))\n print(\"test:{}\".format(test))\n # my_feature_columns = [tf.feature_column.numeric_column('f')]\n # estimator = tf.estimator.DNNClassifier(\n # feature_columns=[],\n # hidden_units=[1024, 512, 256])\n\n # estimator = tf.estimator.DNNRegressor()", "def Experiment1(train_x,train_y,test_x,test_y,task):\r\n lambda_r = np.array(np.arange(0,151,1))\r\n if(task=='1'):\r\n #Task1: Effects of regularization parameters\r\n Exp1_task1(lambda_r,train_x,train_y,test_x,test_y)\r\n if(task=='2'):\r\n #Task2: Effects of No.of examples\r\n Exp1_task2(lambda_r,train_x,train_y,test_x,test_y)", "def test_image_task(self):\n args = BASE_ARGS.copy()\n args.update(IMAGE_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 8.6, 'failed to train image_seq2seq on image task'\n )", "def evaluate(eval_ds, model, task):\n\n print('==========EVAL==========')\n # Testing contrastive accuracy\n if task['name'] == 'contrastive_accuracy':\n ds = eval_ds.map(data_utils.pretrain_preprocess)\n ds = ds.batch(128)\n test_contrast_acc = tf.keras.metrics.Accuracy(name='test_constrastive_accuracy')\n for x in ds:\n image = x['image']\n image = tf.transpose(image, [1, 0, 2, 3, 4])\n image = tf.reshape(\n image, \n (image.shape[0]*image.shape[1], image.shape[2], image.shape[3], image.shape[4])\n )\n out = model(image, mode='unsupervised', training=False)\n metrics.update_contrastive_accuracy2(test_contrast_acc, out, TEMP)\n print('test contrastive accuracy')\n print(test_contrast_acc.result())\n return \n\n # Testing classification accuracy \n ds = eval_ds.filter(lambda x: x['label'] != task['excluded_label'])\n ds = ds.map(data_utils.eval_preprocess)\n ds = ds.batch(FLAGS.eval_bs)\n test_class_acc = tf.keras.metrics.Accuracy(name='test_class_accuracy')\n for x in ds:\n image = x['image']\n labels = x[task['name']]\n if task['name'] == 'extr':\n out = model(image, mode='eval', sup_layers=2, training=False)\n else:\n out = model(image, mode='eval', sup_layers=1, training=False)\n metrics.update_supervised_accuracy(test_class_acc, labels, out)\n \n if FLAGS.debug:\n print(tf.math.argmax(out, axis=-1))\n print('test classification accuracy')\n print(test_class_acc.result())", "def test_run(self):\n sut = ExperimentEmail()\n train = os.path.join(os.path.dirname(__file__), \"data\", \"sample.csv\")\n val = os.path.join(os.path.dirname(__file__), \"data\", \"sample.csv\")\n outdir = tempfile.mkdtemp()\n\n # Act\n sut.run(train, val, outdir, batch_size=32, epochs=2)", "def one_shot_test(self, model, support_set_size, number_of_tasks_per_alphabet,\n is_validation):\n\n # Set some variables that depend on dataset\n if is_validation:\n alphabets = self._validation_alphabets\n print('\\nMaking One Shot Task on validation alphabets:')\n else:\n alphabets = self._evaluation_alphabets\n print('\\nMaking One Shot Task on evaluation alphabets:')\n\n mean_global_accuracy = 0\n\n for alphabet in alphabets:\n mean_alphabet_accuracy = 0\n for _ in range(number_of_tasks_per_alphabet):\n images, _ = self.get_one_shot_batch(\n support_set_size, is_validation=is_validation)\n probabilities = model.predict_on_batch(images)\n\n # Added this condition because noticed that sometimes the outputs\n # of the classifier was almost the same in all images, meaning that\n # the argmax would be always by defenition 0.\n if np.argmax(probabilities) == 0 and probabilities.std()>0.01:\n accuracy = 1.0\n else:\n accuracy = 0.0\n\n mean_alphabet_accuracy += accuracy\n mean_global_accuracy += accuracy\n\n mean_alphabet_accuracy /= number_of_tasks_per_alphabet\n\n print(alphabet + ' alphabet' + ', accuracy: ' +\n str(mean_alphabet_accuracy))\n if is_validation:\n self._current_validation_alphabet_index += 1\n else:\n self._current_evaluation_alphabet_index += 1\n\n mean_global_accuracy /= (len(alphabets) *\n number_of_tasks_per_alphabet)\n\n print('\\nMean global accuracy: ' + str(mean_global_accuracy))\n\n # reset counter\n if is_validation:\n self._current_validation_alphabet_index = 0\n else:\n self._current_evaluation_alphabet_index = 0\n\n return mean_global_accuracy", "def run_cross_validation_process_test(info_string, models):\n\n batch_size = 64\n num_fold = 0\n yfull_test = []\n x_test_id = []\n nfolds = len(models)\n\n datagen_test = ImageDataGenerator(\n featurewise_center=True,\n featurewise_std_normalization=True,\n preprocessing_function=pre_processing_image\n )\n\n # print(image_array.shape)\n x_test, x_test_id = load_images_test()\n print(len(x_test))\n image_test_array = np.asarray(x_test, dtype=np.float32)\n start_time = time.time()\n print(\"Datagen.fit started\")\n datagen_test.fit(image_test_array, augment=False)\n print('Fit Completed: {} seconds'.format(round(time.time() - start_time, 2)))\n\n for i in range(nfolds):\n model = models[i]\n num_fold += 1\n print('Start KFold number {} from {}'.format(num_fold, nfolds))\n\n #test_prediction = model.predict_generator(generator=datagen_test.fit(image_test_array, seed=79),\n # steps=len(image_test_array)/32, max_q_size=20, workers=8, verbose=1)\n\n test_prediction = model.predict(image_test_array, batch_size=batch_size, verbose=1)\n\n yfull_test.append(test_prediction)\n\n test_res = merge_several_folds_mean(yfull_test, nfolds)\n info_string = 'loss_' + info_string \\\n + '_folds_' + str(nfolds)\n create_submission(test_res, x_test_id, info_string)\n d=pd.DataFrame(test_res,columns=FISH_CLASSES)", "def mini_imagenet_tasksets(\n train_ways=5,\n train_samples=10,\n test_ways=5,\n test_samples=10,\n root='~/data',\n data_augmentation=None,\n device=None,\n **kwargs,\n):\n if data_augmentation is None:\n train_data_transforms = None\n test_data_transforms = None\n elif data_augmentation == 'normalize':\n train_data_transforms = Compose([\n lambda x: x / 255.0,\n ])\n test_data_transforms = train_data_transforms\n elif data_augmentation == 'lee2019':\n normalize = Normalize(\n mean=[120.39586422/255.0, 115.59361427/255.0, 104.54012653/255.0],\n std=[70.68188272/255.0, 68.27635443/255.0, 72.54505529/255.0],\n )\n train_data_transforms = Compose([\n ToPILImage(),\n RandomCrop(84, padding=8),\n ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),\n RandomHorizontalFlip(),\n ToTensor(),\n normalize,\n ])\n test_data_transforms = Compose([\n normalize,\n ])\n else:\n raise ValueError('Invalid data_augmentation argument.')\n\n train_dataset = l2l.vision.datasets.MiniImagenet(\n root=root,\n mode='train',\n download=True,\n )\n valid_dataset = l2l.vision.datasets.MiniImagenet(\n root=root,\n mode='validation',\n download=True,\n )\n test_dataset = l2l.vision.datasets.MiniImagenet(\n root=root,\n mode='test',\n download=True,\n )\n if device is None:\n train_dataset.transform = train_data_transforms\n valid_dataset.transform = test_data_transforms\n test_dataset.transform = test_data_transforms\n else:\n train_dataset = l2l.data.OnDeviceDataset(\n dataset=train_dataset,\n transform=train_data_transforms,\n device=device,\n )\n valid_dataset = l2l.data.OnDeviceDataset(\n dataset=valid_dataset,\n transform=test_data_transforms,\n device=device,\n )\n test_dataset = l2l.data.OnDeviceDataset(\n dataset=test_dataset,\n transform=test_data_transforms,\n device=device,\n )\n train_dataset = l2l.data.MetaDataset(train_dataset)\n valid_dataset = l2l.data.MetaDataset(valid_dataset)\n test_dataset = l2l.data.MetaDataset(test_dataset)\n\n train_transforms = [\n NWays(train_dataset, train_ways),\n KShots(train_dataset, train_samples),\n LoadData(train_dataset),\n RemapLabels(train_dataset),\n ConsecutiveLabels(train_dataset),\n ]\n valid_transforms = [\n NWays(valid_dataset, test_ways),\n KShots(valid_dataset, test_samples),\n LoadData(valid_dataset),\n ConsecutiveLabels(valid_dataset),\n RemapLabels(valid_dataset),\n ]\n test_transforms = [\n NWays(test_dataset, test_ways),\n KShots(test_dataset, test_samples),\n LoadData(test_dataset),\n RemapLabels(test_dataset),\n ConsecutiveLabels(test_dataset),\n ]\n\n _datasets = (train_dataset, valid_dataset, test_dataset)\n _transforms = (train_transforms, valid_transforms, test_transforms)\n return _datasets, _transforms", "def run_inference(test_loader, model, model_params, testing_params, ofolder, cuda_available,\n i_monte_carlo=None):\n # INIT STORAGE VARIABLES\n preds_npy_list, gt_npy_list = [], []\n pred_tmp_lst, z_tmp_lst, fname_tmp = [], [], ''\n volume = None\n weight_matrix = None\n\n for i, batch in enumerate(tqdm(test_loader, desc=\"Inference - Iteration \" + str(i_monte_carlo))):\n with torch.no_grad():\n # GET SAMPLES\n # input_samples: list of batch_size tensors, whose size is n_channels X height X width X depth\n # gt_samples: idem with n_labels\n # batch['*_metadata']: list of batch_size lists, whose size is n_channels or n_labels\n if model_params[\"name\"] == \"HeMISUnet\":\n input_samples = imed_utils.cuda(imed_utils.unstack_tensors(batch[\"input\"]), cuda_available)\n else:\n input_samples = imed_utils.cuda(batch[\"input\"], cuda_available)\n gt_samples = imed_utils.cuda(batch[\"gt\"], cuda_available, non_blocking=True)\n\n # EPISTEMIC UNCERTAINTY\n if testing_params['uncertainty']['applied'] and testing_params['uncertainty']['epistemic']:\n for m in model.modules():\n if m.__class__.__name__.startswith('Dropout'):\n m.train()\n\n # RUN MODEL\n if model_params[\"name\"] in [\"HeMISUnet\", \"FiLMedUnet\"]:\n metadata = get_metadata(batch[\"input_metadata\"], model_params)\n preds = model(input_samples, metadata)\n else:\n preds = model(input_samples)\n\n if model_params[\"name\"] == \"HeMISUnet\":\n # Reconstruct image with only one modality\n input_samples = batch['input'][0]\n\n if model_params[\"name\"] == \"UNet3D\" and model_params[\"attention\"]:\n imed_utils.save_feature_map(batch, \"attentionblock2\", os.path.dirname(ofolder), model, input_samples,\n slice_axis=test_loader.dataset.slice_axis)\n\n # PREDS TO CPU\n preds_cpu = preds.cpu()\n\n # RECONSTRUCT 3D IMAGE\n last_batch_bool = (i == len(test_loader) - 1)\n\n slice_axis = imed_utils.AXIS_DCT[testing_params['slice_axis']]\n\n # LOOP ACROSS SAMPLES\n for smp_idx in range(len(preds_cpu)):\n if \"bounding_box\" in batch['input_metadata'][smp_idx][0]:\n imed_obj_detect.adjust_undo_transforms(testing_params[\"undo_transforms\"].transforms, batch, smp_idx)\n\n if not model_params[\"name\"].endswith('3D'):\n last_sample_bool = (last_batch_bool and smp_idx == len(preds_cpu) - 1)\n # undo transformations\n preds_idx_undo, metadata_idx = testing_params[\"undo_transforms\"](preds_cpu[smp_idx],\n batch['gt_metadata'][smp_idx],\n data_type='gt')\n # preds_idx_undo is a list n_label arrays\n preds_idx_arr = np.array(preds_idx_undo)\n\n # TODO: gt_filenames should not be a list\n fname_ref = metadata_idx[0]['gt_filenames'][0]\n\n # NEW COMPLETE VOLUME\n if pred_tmp_lst and (fname_ref != fname_tmp or last_sample_bool):\n # save the completely processed file as a nifti file\n fname_pred = os.path.join(ofolder, fname_tmp.split('/')[-1])\n fname_pred = fname_pred.split(testing_params['target_suffix'][0])[0] + '_pred.nii.gz'\n # If Uncertainty running, then we save each simulation result\n if testing_params['uncertainty']['applied']:\n fname_pred = fname_pred.split('.nii.gz')[0] + '_' + str(i_monte_carlo).zfill(2) + '.nii.gz'\n\n output_nii = imed_utils.pred_to_nib(data_lst=pred_tmp_lst,\n z_lst=z_tmp_lst,\n fname_ref=fname_tmp,\n fname_out=fname_pred,\n slice_axis=slice_axis,\n kernel_dim='2d',\n bin_thr=0.9 if testing_params[\"binarize_prediction\"] else -1)\n # TODO: Adapt to multilabel\n preds_npy_list.append(output_nii.get_fdata()[:, :, :, 0])\n gt_npy_list.append(nib.load(fname_tmp).get_fdata())\n\n output_nii_shape = output_nii.get_fdata().shape\n if len(output_nii_shape) == 4 and output_nii_shape[-1] > 1:\n imed_utils.save_color_labels(np.stack(pred_tmp_lst, -1),\n testing_params[\"binarize_prediction\"],\n fname_tmp,\n fname_pred.split(\".nii.gz\")[0] + '_color.nii.gz',\n imed_utils.AXIS_DCT[testing_params['slice_axis']])\n\n # re-init pred_stack_lst\n pred_tmp_lst, z_tmp_lst = [], []\n\n # add new sample to pred_tmp_lst, of size n_label X h X w ...\n pred_tmp_lst.append(preds_idx_arr)\n\n # TODO: slice_index should be stored in gt_metadata as well\n z_tmp_lst.append(int(batch['input_metadata'][smp_idx][0]['slice_index']))\n fname_tmp = fname_ref\n\n else:\n pred_undo, metadata, last_sample_bool, volume, weight_matrix = \\\n imed_utils.volume_reconstruction(batch,\n preds_cpu,\n testing_params['undo_transforms'],\n smp_idx, volume, weight_matrix)\n fname_ref = metadata[0]['gt_filenames'][0]\n # Indicator of last batch\n if last_sample_bool:\n pred_undo = np.array(pred_undo)\n fname_pred = os.path.join(ofolder, fname_ref.split('/')[-1])\n fname_pred = fname_pred.split(testing_params['target_suffix'][0])[0] + '_pred.nii.gz'\n # If uncertainty running, then we save each simulation result\n if testing_params['uncertainty']['applied']:\n fname_pred = fname_pred.split('.nii.gz')[0] + '_' + str(i_monte_carlo).zfill(2) + '.nii.gz'\n\n # Choose only one modality\n output_nii = imed_utils.pred_to_nib(data_lst=[pred_undo],\n z_lst=[],\n fname_ref=fname_ref,\n fname_out=fname_pred,\n slice_axis=slice_axis,\n kernel_dim='3d',\n bin_thr=0.5 if testing_params[\"binarize_prediction\"] else -1)\n preds_npy_list.append(output_nii.get_fdata().transpose(3, 0, 1, 2))\n gt_lst = []\n for gt in metadata[0]['gt_filenames']:\n # For multi-label, if all labels are not in every image\n if gt is not None:\n gt_lst.append(nib.load(gt).get_fdata())\n else:\n gt_lst.append(np.zeros(gt_lst[0].shape))\n\n gt_npy_list.append(np.array(gt_lst))\n # Save merged labels with color\n\n if pred_undo.shape[0] > 1:\n imed_utils.save_color_labels(pred_undo,\n testing_params['binarize_prediction'],\n batch['input_metadata'][smp_idx][0]['input_filenames'],\n fname_pred.split(\".nii.gz\")[0] + '_color.nii.gz',\n slice_axis)\n\n return preds_npy_list, gt_npy_list", "def test_is_trainable(estimator_fn, machine_settings):\n # Setup\n batch_size = 128 # Must be divisible by number of replicas (8 for TPU v2)\n crop_size = 24\n eval_count = 1024\n eval_steps = int(eval_count / batch_size)\n assert eval_steps * batch_size == eval_count\n estimator = estimator_fn(\n micronet.cifar.linear_model.create_model, batch_size, batch_size)\n\n # Replace with lambda?\n def input_fn(params):\n # Only the TPUEstimator needs to pass batch_size to the input_fn.\n if 'batch_size' in params:\n assert params['batch_size'] == batch_size\n del params\n mini_ds = cifar_ds.train_dataset(\n cloud_storage=machine_settings.is_cloud)\n mini_ds = mini_ds.map(\n cifar_ds.preprocess_fn(augment=False, crop_to=crop_size))\n # Take a small amount and repeat so that the test can show training\n # in a smaller amount of steps (so the test runs quickly).\n mini_ds.take(500).repeat()\n return mini_ds.batch(batch_size, drop_remainder=True)\n\n # Test\n # 1. Check that the untrained model predicts randomly.\n #\n # I want the test to pass 99% of the time.\n # For a 1000 trial experiment with success probability of 1% (100 classes),\n # CDF_inverse(0.01) ~= 3\n # CDF_inverse(0.99) ~= 19\n # (from binomial dist calculator:\n # https://www.di-mgt.com.au/binomial-calculator.html)\n # TODO: is it valid to assume a random output from the untrained model?\n results = estimator.evaluate(input_fn, steps=eval_steps)\n assert 3/eval_count < results[micronet.estimator.TOP_1_ACCURACY_KEY] \\\n <= 19/eval_count\n\n # 2. Check that the model can be trained.\n # Using the eval_steps as the max training steps. Could use something else.\n estimator.train(input_fn, max_steps=eval_steps)\n\n # 3. Check that the training has increased the model's accuracy.\n # Results is a dict containing the metrics defined by the model_fn.\n # FIXME 4: I should encapsulate/separate the metric creation so that it\n # is easy to assume that certain metrics are present.\n results = estimator.evaluate(input_fn, steps=eval_steps)\n # We should expect some improvement over the random case, 1/100. Running\n # it a few times gave ~4.5%, so using a value a little lower to make sure\n # the test reliably passes (while still being useful).\n assert results[micronet.estimator.TOP_1_ACCURACY_KEY] >= 0.040", "def finetune(ft_ds, model, task, epochs=10, eval_ds=None):\n\n print('==========FINETUNE==========')\n\n # Filter out undesired examples with excluded_label\n ds = ft_ds.filter(lambda x: x['label'] != task['excluded_label'])\n ds = ds.map(data_utils.finetune_preprocess)\n ds = ds.shuffle(1000)\n ds = ds.batch(FLAGS.finetune_bs)\n\n # loss, metrics, optimizers\n train_loss= tf.keras.metrics.Mean(name='train_loss')\n train_sup_acc = tf.keras.metrics.Accuracy(name='train_supervised_accuracy')\n criterion_sup = tf.nn.softmax_cross_entropy_with_logits \n optimizer = tf.keras.optimizers.Adam(learning_rate=0.001) \n for epoch in range(epochs): \n train_loss.reset_states()\n train_sup_acc.reset_states()\n for x in ds:\n with tf.GradientTape() as tape:\n image = x['image']\n labels = x[task['name']]\n out = model(image, mode='supervised', sup_layers=1, training=True)\n # print(tf.math.argmax(out, axis=-1))\n metrics.update_supervised_accuracy(train_sup_acc, labels, out)\n loss = criterion_sup(tf.one_hot(labels, depth=task['num_classes']), out)\n loss = tf.math.reduce_mean(loss)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(\n filter(lambda gv: gv[0] is not None, zip(gradients, model.trainable_variables))\n )\n train_loss.update_state(loss)\n print('supervised loss')\n print(train_loss.result())\n print('supervised accuracy')\n print(train_sup_acc.result())\n\n # Evaluate results on eval_ds if possible\n if eval_ds: \n evaluate(eval_ds, model, task)", "def compute_taskemb(args, train_dataset, model):\n tb_writer = SummaryWriter()\n\n train_sampler = RandomSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.batch_size)\n\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_epochs\n\n if args.finetune_feature_extractor and not args.finetune_classifier:\n raise ValueError(\"finetune_classifier should be True when finetune_feature_extractor is True.\")\n\n if args.finetune_classifier:\n model.train()\n\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = ['bias', 'LayerNorm.weight']\n\n if args.finetune_feature_extractor:\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n 'weight_decay': args.weight_decay},\n {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n 'weight_decay': 0.0}\n ]\n else:\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)\n and args.model_type not in n],\n 'weight_decay': args.weight_decay},\n {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)\n and args.model_type not in n],\n 'weight_decay': 0.0}\n ]\n\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,\n num_training_steps=t_total)\n\n else:\n model.eval()\n optimizer = None\n scheduler = None\n\n logger.info(\"***** Compute TaskEmb *****\")\n logger.info(\"Num examples = %d\", len(train_dataset))\n logger.info(\"Batch size = %d\", args.batch_size)\n\n total_num_examples = 0\n model.zero_grad()\n train_iterator = trange(int(args.num_epochs), desc=\"Epoch\", disable=False)\n set_seed(args) # Added here for reproductibility (even between python 2 and 3)\n global_feature_dict = {}\n for _ in train_iterator:\n num_examples = 0\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=False)\n for step, batch in enumerate(epoch_iterator):\n model.train()\n batch = tuple(t.to(args.device) for t in batch)\n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'token_type_ids': batch[2],\n 'start_positions': batch[3],\n 'end_positions': batch[4]}\n\n outputs = model(**inputs)\n\n loss, start_logits, end_logits = outputs[0], outputs[1], outputs[2]\n\n input_mask = inputs['attention_mask']\n\n if not args.use_labels:\n feature_dict = compute_Fisher_no_labels(args, model, input_mask, start_logits, end_logits)\n else:\n feature_dict = compute_Fisher_with_labels(args, model, input_mask, loss)\n ###\n if len(global_feature_dict) == 0:\n for key in feature_dict:\n global_feature_dict[key] = feature_dict[key].detach().cpu().numpy()\n else:\n for key in feature_dict:\n global_feature_dict[key] += feature_dict[key].detach().cpu().numpy()\n\n if ((not args.use_labels) and args.finetune_classifier):\n model.zero_grad()\n loss.backward()\n\n if args.finetune_classifier:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n scheduler.step() # Update learning rate schedule\n optimizer.step()\n\n model.zero_grad()\n num_examples += inputs['input_ids'].size(0)\n total_num_examples += num_examples\n\n # Normalize\n for key in global_feature_dict:\n global_feature_dict[key] = global_feature_dict[key] / total_num_examples\n\n # Save features\n for key in global_feature_dict:\n np.save(os.path.join(args.output_dir, '{}.npy'.format(key)), global_feature_dict[key])", "def run(config_file):\n config = load_config(config_file)\n config_global = config['global']\n\n # setup a logger\n logger = logging.getLogger('experiment')\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler_stdout = logging.StreamHandler(sys.stdout)\n handler_stdout.setLevel(config['logger']['level'])\n handler_stdout.setFormatter(formatter)\n logger.addHandler(handler_stdout)\n\n if 'path' in config['logger']:\n handler_file = logging.FileHandler(config['logger']['path'])\n handler_file.setLevel(config['logger']['level'])\n handler_file.setFormatter(formatter)\n logger.addHandler(handler_file)\n\n logger.setLevel(config['logger']['level'])\n\n # Allow the gpu to be used in parallel\n sess_config = tf.ConfigProto()\n sess_config.gpu_options.allow_growth = True\n if 'max_threads' in config_global:\n sess_config.intra_op_parallelism_threads = config_global['max_threads']\n\n # we allow to set the random seed in the config file for reproducibility. However, when running on GPU, results\n # will still be nondeterministic (due to nondeterministic behavior of tensorflow)\n if 'random_seed' in config_global:\n seed = config_global['random_seed']\n logger.info('Using fixed random seed'.format(seed))\n np.random.seed(seed)\n tf.set_random_seed(seed)\n\n with tf.Session(config=sess_config) as sess:\n # We are now fetching all relevant modules. It is strictly required that these module contain a variable named\n # 'component' that points to a class which inherits from experiment.Data, experiment.Experiment,\n # experiment.Trainer or experiment.Evaluator\n data_module = config['data-module']\n model_module = config['model-module']\n training_module = config['training-module']\n evaluation_module = config.get('evaluation-module', None)\n\n # The modules are now dynamically loaded\n DataClass = importlib.import_module(data_module).component\n ModelClass = importlib.import_module(model_module).component\n TrainingClass = importlib.import_module(training_module).component\n EvaluationClass = importlib.import_module(evaluation_module).component if evaluation_module else None\n\n # We then wire together all the modules and start training\n data = DataClass(config['data'], config_global, logger)\n model = ModelClass(config['model'], config_global, logger)\n training = TrainingClass(config['training'], config_global, logger)\n\n # setup the data (validate, create generators, load data, or else)\n logger.info('Setting up the data')\n data.setup()\n # build the model (e.g. compile it)\n logger.info('Building the model')\n model.build(data, sess)\n # start the training process\n logger.info('Starting the training process')\n training.start(model, data, sess)\n\n # perform evaluation, if required\n if EvaluationClass:\n logger.info('Evaluating')\n evaluation = EvaluationClass(config['evaluation'], config_global, logger)\n evaluation.start(model, data, sess)\n else:\n logger.info('No evaluation')\n\n logger.info('DONE')", "def main():\n args = parse_args()\n interactive_auth = InteractiveLoginAuthentication(tenant_id=os.getenv(\"TENANT_ID\"))\n work_space = Workspace.from_config(auth=interactive_auth)\n\n # Set up the dataset for training\n datastore = work_space.get_default_datastore()\n dataset = Dataset.File.from_files(path=(datastore, args.target_folder))\n\n # Set up the experiment for training\n experiment = Experiment(workspace=work_space, name=args.file.replace(\".py\", \"\"))\n # azureml._restclient.snapshots_client.SNAPSHOT_MAX_SIZE_BYTES = 2000000000\n config = ScriptRunConfig(\n source_directory=\".\",\n script=args.file,\n compute_target=\"cpu-cluster\",\n arguments=[\n \"--target_folder\",\n dataset.as_named_input(\"input\").as_mount(),\n \"--experiment\",\n True,\n \"--log_folder\",\n \"./logs\",\n ],\n )\n\n # Set up the Tensoflow/Keras environment\n environment = work_space.environments[args.file.replace(\".py\", \"\")]\n config.run_config.environment = environment\n\n # Run the experiment for training\n run = experiment.submit(config)\n aml_url = run.get_portal_url()\n print(\n \"Submitted to an Azure Machine Learning compute cluster. Click on the link below\"\n )\n print(\"\")\n print(aml_url)\n\n tboard = Tensorboard([run])\n # If successful, start() returns a string with the URI of the instance.\n tboard.start(start_browser=True)\n run.wait_for_completion(show_output=True)\n # After your job completes, be sure to stop() the streaming otherwise it will continue to run.\n print(\"Press enter to stop\")\n input()\n tboard.stop()\n\n # Register\n metrics = run.get_metrics()\n run.register_model(\n model_name=args.target_folder,\n tags={\"model\": \"LSTM\"},\n model_path=\"outputs/keras_lstm.h5\",\n model_framework=\"keras\",\n model_framework_version=\"2.2.4\",\n properties={\n \"train_loss\": metrics[\"train_loss\"][-1],\n \"val_loss\": metrics[\"val_loss\"][-1],\n \"data\": \"USD/TWD from {0} to {1}\".format(metrics[\"start\"], metrics[\"end\"]),\n \"epoch\": metrics[\"epoch\"],\n },\n )\n\n run.register_model(\n model_name=\"scaler\",\n tags={\"data\": \"USD/TWD from 1983-10-04\", \"model\": \"MinMaxScaler\"},\n model_path=\"outputs/scaler.pickle\",\n model_framework=\"sklearn\",\n )", "def warmup_resnet_imagenet_128_gpu_8_real(self):\n test_id = 'warmup_resnet_imagenet.gpu_8.128.real'\n args = {}\n config = self.build_resnet_test_config(test_id, args, batch_size=128, gpus=8,\n repeat=1, total_batches=1300)\n self.run_test_suite(config)", "def main():\r\n # assert tf.__version__[0] == \"2\"\r\n\r\n \"\"\" Load Config \"\"\"\r\n with open('./config/config_origin.json', 'r') as f:\r\n CONFIG = json.load(f)\r\n BATCH_SIZE = CONFIG[\"BATCH_SIZE\"]\r\n ROOT_PATH = CONFIG[\"ROOT_PATH\"]\r\n TRAIN_DATA_DIR = CONFIG[\"TRAIN_DATA_DIR\"]\r\n TEST_DATA_DIR = CONFIG[\"TEST_DATA_DIR\"]\r\n TRAIN_DATA_DIR = os.path.join(ROOT_PATH, TRAIN_DATA_DIR)\r\n TEST_DATA_DIR = os.path.join(ROOT_PATH, TEST_DATA_DIR)\r\n MODEL_CKPT = CONFIG[\"MODEL_CKPT\"]\r\n\r\n \"\"\" Prepare Model \"\"\"\r\n n = 6 # order of ResNetv2\r\n version = 2\r\n depth = model_depth(n, version)\r\n MODEL_TYPE = 'ResNet%dv%d' % (depth, version)\r\n SAVES_DIR = \"models-%s/\" % MODEL_TYPE\r\n SAVES_DIR = os.path.join(ROOT_PATH, SAVES_DIR)\r\n MODEL_CKPT = os.path.join(SAVES_DIR, MODEL_CKPT)\r\n\r\n # Features directory\r\n FEATURE_DIR = os.path.join(ROOT_PATH, \"features\")\r\n FEATURE_DIR = os.path.join(FEATURE_DIR, \"models-%s/\" % MODEL_TYPE)\r\n if not os.path.exists(FEATURE_DIR):\r\n os.mkdir(FEATURE_DIR)\r\n\r\n if not os.path.exists(SAVES_DIR):\r\n os.mkdir(SAVES_DIR)\r\n model = resnet_v2(input_shape=INPUT_SHAPE, depth=depth, num_classes=2)\r\n model.compile(loss='categorical_crossentropy',\r\n optimizer=Adam(learning_rate=lr_schedule(TRAINING_EPOCHS)),\r\n metrics=METRICS)\r\n # model.summary()\r\n print(MODEL_TYPE)\r\n\r\n \"\"\" Load Weights \"\"\"\r\n model_ckpt_file = os.path.join(SAVES_DIR, MODEL_CKPT)\r\n if os.path.exists(model_ckpt_file):\r\n print(\"Model ckpt found! Loading...:%s\" % model_ckpt_file)\r\n model.load_weights(model_ckpt_file)\r\n\r\n \"\"\" Extract Testing Data \"\"\"\r\n _train_filenames = os.listdir(os.path.join(TRAIN_DATA_DIR, \"bad_1\"))\r\n train_bad_df = pd.DataFrame({\r\n 'filename': _train_filenames\r\n })\r\n n_bad_samples = train_bad_df.shape[0]\r\n train_bad_df.to_csv(os.path.join(\r\n FEATURE_DIR, \"bad_samples_list.csv\"), index=False)\r\n\r\n \"\"\" Extract good samples \"\"\"\r\n _train_filenames = os.listdir(os.path.join(TRAIN_DATA_DIR, \"good_0\"))\r\n train_good_df = pd.DataFrame({\r\n 'filename': _train_filenames\r\n })\r\n n_good_samples = train_good_df.shape[0]\r\n train_good_df.to_csv(os.path.join(\r\n FEATURE_DIR, \"good_samples_list.csv\"), index=False)\r\n\r\n \"\"\" Create bad sample validation generator \"\"\"\r\n train_bad_datagen = ImageDataGenerator(rescale=1./255)\r\n train_bad_generator = train_bad_datagen.flow_from_dataframe(\r\n train_bad_df,\r\n os.path.join(TRAIN_DATA_DIR, \"bad_1\"),\r\n x_col='filename',\r\n y_col=None,\r\n class_mode=None,\r\n target_size=IMAGE_SIZE,\r\n color_mode=\"grayscale\",\r\n batch_size=BATCH_SIZE,\r\n shuffle=False\r\n )\r\n\r\n \"\"\" Create bad sample validation generator \"\"\"\r\n train_good_datagen = ImageDataGenerator(rescale=1./255)\r\n train_good_generator = train_good_datagen.flow_from_dataframe(\r\n train_good_df,\r\n os.path.join(TRAIN_DATA_DIR, \"good_0\"),\r\n x_col='filename',\r\n y_col=None,\r\n class_mode=None,\r\n target_size=IMAGE_SIZE,\r\n color_mode=\"grayscale\",\r\n batch_size=BATCH_SIZE,\r\n shuffle=False\r\n )\r\n\r\n \"\"\" Extractor \"\"\"\r\n extractor = Model(\r\n model.inputs, model.layers[-2].output) # flatten_2 (Flatten) (None, 12544)\r\n # features = extractor.predict(data)\r\n\r\n \"\"\" Extract train set 的特征 \"\"\"\r\n import time\r\n # bad samples\r\n start = time.perf_counter()\r\n print(\"Start extracting bad samples...\")\r\n features = extractor.predict_generator(\r\n train_bad_generator, steps=np.ceil(n_bad_samples / BATCH_SIZE),\r\n workers=4, verbose=1)\r\n print(\"features.shape:\", features.shape) # (16/32/etc, 12544)\r\n np.save(os.path.join(FEATURE_DIR, \"features_train_bad.npy\"), features)\r\n\r\n elapsed = (time.perf_counter() - start)\r\n print(\"Prediction time used:\", elapsed)\r\n # TODO 用 pandas 存储\r\n # good samples\r\n start = time.perf_counter()\r\n print(\"Start extracting good samples...\")\r\n features = extractor.predict_generator(\r\n train_good_generator, steps=np.ceil(n_good_samples / BATCH_SIZE),\r\n workers=4, verbose=1)\r\n print(\"features.shape:\", features.shape) # (16/32/etc, 12544)\r\n np.save(os.path.join(FEATURE_DIR, \"features_train_good.npy\"), features)\r\n\r\n elapsed = (time.perf_counter() - start)\r\n print(\"Prediction time used:\", elapsed)", "def main():\n\n dataset = ConvMNIST(64)\n print(dataset.get_train().x.shape)\n\n\n inputs = Value(type=tf.float32, shape=(None, 28, 28, 1), cls = None)\n targets = Value(type=tf.int64, shape=(None), cls = 10)\n learning_rate = 0.0001\n\n fc_hidden = [1024, 500]\n c_h = [\n (3, 3, 1, 32),\n (3, 3, 32, 64)\n ]\n conv_hidden = ConvHidden(conv_weights=c_h, fc_weights=fc_hidden)\n\n config = Config(inputs, targets, conv_hidden, learning_rate)\n\n network = ConvNetworkBuilder(config)\n hidden = FFConvHiddenBuilder()\n _ = network.build_network(hidden)\n\n\n train_config = TrainerConfig(\n epochs = EPOCHS, display_after = DISPLAY_STEP, \n keep_prob = KEEP_PROB,checkpoint_path=None, \n summary_path=None\n )\n\n trainer = Trainer(network, train_config)\n trainer.train(dataset)", "def main(_):\n\n spec = cluster_spec(args.num_workers, 1)\n cluster = tf.train.ClusterSpec(spec).as_cluster_def()\n\n def shutdown(signal, frame):\n logger.warn(\"Received signal {}: exiting\".format(signal))\n sys.exit(128+signal)\n signal.signal(signal.SIGHUP, shutdown)\n signal.signal(signal.SIGINT, shutdown)\n signal.signal(signal.SIGTERM, shutdown)\n\n if args.job_name == 'worker':\n config = tf.ConfigProto(intra_op_parallelism_threads=1,\n inter_op_parallelism_threads=2)\n\n server = tf.train.Server(cluster, job_name='worker',\n task_index=args.task,\n config=config)\n run(args, server)\n else:\n config = tf.ConfigProto(device_filters=['/job:ps'])\n server = tf.train.Server(cluster, job_name='ps', task_index=args.task,\n config=config)\n while True:\n time.sleep(1000)", "def run_sagemaker_tests(images):\n if not images:\n return\n pool_number = len(images)\n with Pool(pool_number) as p:\n p.map(run_sagemaker_pytest_cmd, images)", "def run_fn(fn_args: TrainerFnArgs):\n tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)\n\n train_dataset = _input_fn(\n fn_args.train_files,\n tf_transform_output,\n constants.TRAIN_BATCH_SIZE,\n is_train=True\n )\n\n eval_dataset = _input_fn(\n fn_args.eval_files,\n tf_transform_output,\n constants.EVAL_BATCH_SIZE,\n is_train=False\n )\n\n # # check for availabe tpu and gpu units\n # try:\n # tpu = tf.distribute.cluster_resolver.TPUClusterResolver()\n # tf.config.experimental_connect_to_cluster(tpu)\n # tf.tpu.experimental.initialize_tpu_system(tpu)\n # strategy = tf.distribute.experimental.TPUStrategy(tpu)\n # except ValueError:\n # strategy = tf.distribute.MirroredStrategy()\n\n # with strategy.scope():\n model = get_model(fn_args)\n\n try:\n log_dir = fn_args.model_run_dir\n except KeyError:\n log_dir = os.path.join(os.path.dirname(fn_args.serving_model_dir), \"logs\")\n\n absl.logging.info('Tensorboard logging to {}'.format(log_dir))\n\n callbacks = [\n # tf.keras.callbacks.ModelCheckpoint(\"DeepLabV3plus.ckpt\", verbose=1, save_weights_only=True, save_best_only=True),\n tf.keras.callbacks.ReduceLROnPlateau(monitor=\"iou_score\", factor=0.2, patience=6, verbose=1, mode=\"max\"),\n tf.keras.callbacks.EarlyStopping(monitor=\"iou_score\", patience=16, mode=\"max\", verbose=1, restore_best_weights=True),\n tf.keras.callbacks.TensorBoard(log_dir=log_dir, update_freq=\"batch\")\n ]\n\n absl.logging.info('Start training the top classifier')\n \n model.fit(\n train_dataset,\n epochs=constants.EPOCHS,\n steps_per_epoch=fn_args.train_steps,\n validation_data=eval_dataset,\n validation_steps=fn_args.eval_steps,\n callbacks=callbacks\n )\n\n signatures = {\n 'serving_default':\n _get_serve_image_fn(model).get_concrete_function(\n tf.TensorSpec(\n shape=[None, constants.HEIGHT, constants.WIDTH, 3],\n dtype=tf.float32,\n name=_transformed_name(constants.IMAGE_KEY)\n )\n )\n }\n\n model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)", "def construct_test_model(self):\n # Set the placeholder for the input episode\n self.inputa = tf.placeholder(tf.float32)\n self.inputb = tf.placeholder(tf.float32)\n self.labela = tf.placeholder(tf.float32)\n self.labelb = tf.placeholder(tf.float32)\n\n with tf.variable_scope('meta-test-model', reuse=None) as training_scope: \n # construct the model weights \n self.ss_weights = ss_weights = self.construct_resnet_ss_weights()\n self.weights = weights = self.construct_resnet_weights()\n self.fc_weights = fc_weights = self.construct_fc_weights()\n\n # Load test base epoch number from FLAGS\n num_updates = FLAGS.test_base_epoch_num\n\n def task_metalearn(inp, reuse=True):\n \"\"\"The function to process one episode in a meta-batch.\n Args:\n inp: the input episode.\n reuse: whether reuse the variables for the normalization.\n Returns:\n A serious outputs like losses and accuracies.\n \"\"\"\n # Seperate inp to different variables\n inputa, inputb, labela, labelb = inp\n # Generate empty list to record accuracies\n accb_list = []\n\n # Embed the input images to embeddings with ss weights\n emb_outputa = self.forward_resnet(inputa, weights, ss_weights, reuse=reuse)\n emb_outputb = self.forward_resnet(inputb, weights, ss_weights, reuse=True)\n\n # This part is similar to the meta-train function, you may refer to the comments above\n outputa = self.forward_fc(emb_outputa, fc_weights)\n lossa = self.loss_func(outputa, labela) \n grads = tf.gradients(lossa, list(fc_weights.values()))\n gradients = dict(zip(fc_weights.keys(), grads))\n fast_fc_weights = dict(zip(fc_weights.keys(), [fc_weights[key] - \\\n self.update_lr*gradients[key] for key in fc_weights.keys()]))\n outputb = self.forward_fc(emb_outputb, fast_fc_weights)\n accb = tf.contrib.metrics.accuracy(tf.argmax(tf.nn.softmax(outputb), 1), tf.argmax(labelb, 1))\n accb_list.append(accb)\n \n for j in range(num_updates - 1):\n lossa = self.loss_func(self.forward_fc(emb_outputa, fast_fc_weights), labela)\n grads = tf.gradients(lossa, list(fast_fc_weights.values()))\n gradients = dict(zip(fast_fc_weights.keys(), grads))\n fast_fc_weights = dict(zip(fast_fc_weights.keys(), [fast_fc_weights[key] - \\\n self.update_lr*gradients[key] for key in fast_fc_weights.keys()]))\n outputb = self.forward_fc(emb_outputb, fast_fc_weights)\n accb = tf.contrib.metrics.accuracy(tf.argmax(tf.nn.softmax(outputb), 1), tf.argmax(labelb, 1))\n accb_list.append(accb)\n\n lossb = self.loss_func(outputb, labelb)\n\n task_output = [lossb, accb, accb_list]\n\n return task_output\n\n if FLAGS.norm is not 'None':\n unused = task_metalearn((self.inputa[0], self.inputb[0], self.labela[0], self.labelb[0]), False)\n\n out_dtype = [tf.float32, tf.float32, [tf.float32]*num_updates]\n\n result = tf.map_fn(task_metalearn, elems=(self.inputa, self.inputb, self.labela, self.labelb), \\\n dtype=out_dtype, parallel_iterations=FLAGS.meta_batch_size)\n lossesb, accsb, accsb_list = result\n\n self.metaval_total_loss = total_loss = tf.reduce_sum(lossesb)\n self.metaval_total_accuracy = total_accuracy = tf.reduce_sum(accsb)\n self.metaval_total_accuracies = total_accuracies =[tf.reduce_sum(accsb_list[j]) for j in range(num_updates)]", "def test_IODimensions(self):\n tasks = [(1,1,100,True),(10,1,100,True),(1,10,100,True),(10,10,100,True),\n (1,1,100,False),(10,1,100,False),(1,10,100,False),(10,10,100,False)]\n for t in tasks:\n N_in ,N_out, N_samples, tf = t\n X = np.random.randn(N_samples,N_in) if N_in > 1 else np.random.randn(N_samples)\n y = np.random.randn(N_samples,N_out) if N_out > 1 else np.random.randn(N_samples)\n Xp = np.random.randn(N_samples,N_in) if N_in > 1 else np.random.randn(N_samples)\n esn = ESN(N_in,N_out,teacher_forcing=tf)\n prediction_tr = esn.fit(X,y)\n prediction_t = esn.predict(Xp)\n self.assertEqual(prediction_tr.shape,(N_samples,N_out))\n self.assertEqual(prediction_t.shape,(N_samples,N_out))", "def test_net_on_dataset(args, dataset_name, proposal_file, output_dir, multi_gpu=False, gpu_id=0, use_matlab = False, early_stop=False):\n\n \n # print(\"test_net_on_dataset\")\n dataset = JsonDataset(dataset_name)\n test_timer = Timer()\n \n test_timer.tic()\n \n all_boxes = test_net(args, dataset_name, proposal_file, output_dir, gpu_id=gpu_id, early_stop=early_stop)\n test_timer.toc()\n\n logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))\n\n roidb = dataset.get_roidb()\n num_images = len(roidb)\n num_classes = cfg.MODEL.NUM_CLASSES + 1\n final_boxes = empty_results(num_classes, num_images)\n test_corloc = 'train' in dataset_name\n \n\n all_cls_scores = {}\n\n for i, entry in enumerate(roidb):\n\n if early_stop and i > 10: break\n\n boxes = all_boxes[entry['image']]\n \n cls_key = entry['image'].replace('.jpg','').split('/')[-1]\n\n # print(cls_key)\n\n if boxes['scores'] is not None:\n if test_corloc:\n # print(\"corlooking\")\n _, _, cls_boxes_i = box_results_for_corloc(boxes['scores'], boxes['boxes'])\n else:\n _, _, cls_boxes_i = box_results_with_nms_and_limit(boxes['scores'], boxes['boxes'])\n\n extend_results(i, final_boxes, cls_boxes_i)\n else:\n final_boxes = None\n \n results = task_evaluation.evaluate_all(dataset, final_boxes, output_dir, test_corloc, use_matlab = use_matlab)\n return results", "def test_mnist(args):\n # type: () -> None\n\n # Build dataset and model\n dataset = MNIST(path=args.path)\n model = MEMMNIST(input_shape=dataset.shape, code_length=64, cpd_channels=100, mem_dim=100, shrink_thres=0.5/100).cuda().eval()\n\n # Set up result helper and perform test\n helper = MEMResultHelper(dataset, model, checkpoints_dir=args.checkpoints, output_file='mem_mnist.txt')\n helper.test_one_class_classification()", "def train_on_tasks(config):\n seed = config['seed']\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n random.seed(seed)\n\n tasks = config.pop('tasks')\n\n task_vis_params = config.pop('vis_params')\n\n # all_stats = []\n transfer_matrix = defaultdict(list)\n total_steps = 0\n\n if 'learner' in config:\n learner = config.pop('learner')\n else:\n learner_path = config.pop('learner_path')\n learner = torch.load(learner_path)\n task_level_tuning = config.pop('task_level_tuning')\n if task_level_tuning:\n ray_params = config.pop('ray_params')\n local_mode = config.pop('local_mode')\n redis_address = config.pop('redis_address')\n all_analysis = []\n selected_tags = []\n for t_id, (task, vis_p) in enumerate(zip(tasks, task_vis_params)):\n #todo sync transfer matrix\n static_params = dict(\n t_id=t_id, task=task, tasks=tasks, vis_p=vis_p,\n transfer_matrix=transfer_matrix, total_steps=total_steps\n )\n\n if task_level_tuning:\n if not ray.is_initialized():\n if local_mode:\n ray.init(local_mode=local_mode)\n else:\n ray.init(redis_address,\n log_to_driver=False,\n logging_level=logging.ERROR)\n\n config['static_params'] = static_params\n config['learner_path'] = learner_path\n config['seed'] += t_id\n\n # reporter = CLIReporter(max_progress_rows=10)\n # print(reporter._metric_columns)\n # print(reporter.DEFAULT_COLUMNS)\n # reporter.add_metric_column('avg_acc_val')\n # reporter.add_metric_column('total_params')\n # reporter.add_metric_column('fw_t')\n # reporter.add_metric_column('data_t')\n # reporter.add_metric_column('eval_t')\n # reporter.add_metric_column('epoch_t')\n # reporter.add_metric_column('total_t')\n # ray_params['progress_reporter'] = reporter\n analysis = tune.run(train_t, config=config, **ray_params)\n\n all_analysis.append(analysis)\n\n def get_key(trial):\n # return trial.last_result['avg_acc_val_so_far']\n return trial.last_result['best_val']\n best_trial = max(analysis.trials, key=get_key)\n for trial in analysis.trials:\n if trial != best_trial:\n trial_path = trial.logdir\n shutil.rmtree(trial_path)\n # am = np.argmax(list(map(get_key, analysis.trials)))\n # print(\"BEST IS {}: {}\".format(am, best_trial.last_result['avg_acc_val']))\n\n # t = best_trial.last_result['duration_iterations']\n total_steps = best_trial.last_result['total_steps']\n selected_tags.append(best_trial.experiment_tag)\n best_learner_path = os.path.join(best_trial.logdir, 'learner.pth')\n learner = torch.load(best_learner_path, map_location='cpu')\n shutil.rmtree(best_trial.logdir)\n\n #todo UPDATE LEARNER AND SAVE\n torch.save(learner, learner_path)\n else:\n rescaled, t, metrics, b_state_dict, \\\n stats = train_single_task(config=deepcopy(config), learner=learner,\n **static_params)\n\n # all_stats.append(stats)\n # update_rescaled(list(rescaled.values()), list(rescaled.keys()), tag,\n # g_task_vis, False)\n\n if task_level_tuning:\n return all_analysis, selected_tags\n else:\n save_path = path.join(tune.get_trial_dir(), 'learner.pth')\n logger.info('Saving {} to {}'.format(learner, save_path))\n torch.save(learner, save_path)", "def test_cpu_one(self):\n self.sim.add_task(self.sim.cpu, 1)\n self.assertEqual(len(self.sim.cpu), 0)\n self.sim.add_task(self.sim.cpu, 2)\n self.sim.add_task(self.sim.cpu, 3)\n self.sim.add_task(self.sim.cpu, 4)\n self.assertEqual(len(self.sim.cpu), 3)\n task, time = self.sim.cpu.process()\n self.assertEqual(task, 1)\n task, time = self.sim.cpu.process()\n self.assertEqual(task, 2)\n task, time = self.sim.cpu.process()\n self.assertEqual(task, 3)\n task, time = self.sim.cpu.process()\n self.assertEqual(task, 4)\n self.assertEqual(time, None)\n self.assertEqual(len(self.sim.cpu), 0)", "def main():\n dataset = MNIST(BATCH_SIZE)\n \n inputs = Value(type=tf.float32, shape=(None, 784), cls=None)\n targets = Value(type=tf.int64, shape=(None), cls=10)\n fc_hidden = FCHidden(weights=[300, 150])\n\n config = Config(inputs, targets, fc_hidden, LEARNING_RATE)\n\n network_builder = FFNetworkBuilder(config)\n hidden_builder = FFHiddenBuilder()\n _ = network_builder.build_network(hidden_builder)\n\n train_config = TrainerConfig(\n epochs = EPOCHS, display_after = DISPLAY_STEP, \n keep_prob = KEEP_PROB,checkpoint_path=None, \n summary_path=None\n )\n trainer = Trainer(network_builder, train_config)\n trainer.train(dataset)", "def test_keras_mnist():\n data = fetch(\"mnist\")\n check(data, n_samples_train=60000, n_samples_test=10000, n_features=28 * 28)", "def cli(config, data, metrics, model, loader):\n runner = TestMlRunner(config=config, data=data, metrics=metrics, model=model, loader=loader)\n print(runner.run())", "def main():\n # This automatically looks for a directory .azureml\n interactive_auth = InteractiveLoginAuthentication(tenant_id=os.getenv(\"TENANT_ID\"))\n work_space = Workspace.from_config(auth=interactive_auth)\n\n # Choose a name for your CPU cluster\n cpu_cluster_name = \"cpu-cluster\"\n\n # Verify that the cluster does not exist already\n try:\n cpu_cluster = ComputeTarget(workspace=work_space, name=cpu_cluster_name)\n print(\"Found existing cluster, use it.\")\n except ComputeTargetException:\n compute_config = AmlCompute.provisioning_configuration(\n vm_size=\"STANDARD_D2_V2\", max_nodes=4, idle_seconds_before_scaledown=2400\n )\n cpu_cluster = ComputeTarget.create(work_space, cpu_cluster_name, compute_config)\n\n cpu_cluster.wait_for_completion(show_output=True)", "def run_mnist_test():\n mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n train_x, train_y = mnist.train.images, mnist.train.labels,\n test_x, test_y = mnist.test.images, mnist.test.labels\n # Reshape right off the bat to save some time.\n train_x = train_x.reshape(-1, 28, 28, 1)\n test_x = test_x.reshape(-1, 28, 28, 1)\n\n conv1 = LeNetClassifier.ConvLayer(kernel_width=5, kernel_height=5,\n feature_maps=1)\n conv2 = LeNetClassifier.ConvLayer(kernel_width=3, kernel_height=3,\n feature_maps=32)\n conv3 = LeNetClassifier.ConvLayer(kernel_width=3, kernel_height=3,\n feature_maps=64)\n network = LeNetClassifier((28, 28, 1), [conv1, conv2, conv3],\n [4 * 4 * 128, 625], 10, batch_size=128)\n\n saver = tf.train.Saver()\n\n sess = tf.Session()\n init = tf.initialize_all_variables()\n sess.run(init)\n\n writer = tf.train.SummaryWriter(\"mnist_logs\", sess.graph_def)\n\n print(\"Tensorflow: Starting MNIST test...\")\n\n accuracy = 0\n start_time = time.time()\n iterations = 0\n while iterations < 2000:\n if iterations % 500 == 0:\n test_batch = mnist.test.next_batch(128)\n result = sess.run(network.predict(),\n feed_dict={network.inputs(): test_batch[0],\n network.expected_outputs(): test_batch[1]})\n argmax = np.argmax(test_batch[1], axis=1)\n accuracy = np.mean(argmax == result)\n print(\"Tensorflow: step %d, testing accuracy %s\" % \\\n (iterations, accuracy))\n\n batch = mnist.train.next_batch(128)\n sess.run(network.train(), feed_dict={network.inputs(): batch[0],\n network.expected_outputs(): batch[1]})\n iterations += 1\n\n # Save the network at the end.\n #saver.save(sess, \"Variables/test.ckpt\")\n\n elapsed = time.time() - start_time\n speed = iterations / elapsed\n print(\"Tensorflow: Ran %d training iterations. (%f iter/s)\" % \\\n (iterations, speed))\n print(\"Tensorflow: MNIST test completed in %f seconds.\" % (elapsed))\n return (elapsed, speed)", "def Experiment2(train_x,train_y,test_x,test_y,filename,task):\r\n if(task=='1'):\r\n #task: 3.1 Model selection using cross validation\r\n t1 = timeit.default_timer()\r\n print(\"file: \",filename)\r\n Exp2_t1(train_x,train_y,test_x,test_y)\r\n t2 = timeit.default_timer()\r\n t = t2-t1\r\n print(\"run time: \",t)\r\n if(task=='2'):\r\n #task: 3.2 Model selection using Evidence function\r\n t1 = timeit.default_timer()\r\n print(\"file: \",filename)\r\n Exp2_t2(train_x,train_y,test_x,test_y)\r\n t2 = timeit.default_timer()\r\n t = t2-t1\r\n print(\"run time: \",t)", "def train(self, example):\n data = dict()\n data[\"inst\"] = \"train\"\n data[\"examples\"] = example\n\n q_idx, data_id = self.put(data, q_idx=0) # Send instruction to first nnet\n self.get(q_idx, data_id) # Blocks here\n\n # Done", "def main(config_file):\n \n # Load the configuration from json file\n assert os.path.isfile(\n config_file), \"No json configuration file found at {}\".format(config_file)\n config = utils.LoadConfig(config_file)\n\n # use GPU if available\n config.cuda = torch.cuda.is_available()\n\n # Set the random seed for reproducible experiments\n torch.manual_seed(config.general['seed'])\n if config.cuda:\n torch.cuda.manual_seed(config.general['seed'])\n \n #Generate output path if it does not exist\n out_dir = config.general['out_dir']\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n \n #Save config file\n config.save(os.path.join(out_dir, 'experiment_config.json'))\n\n # Set the logger\n utils.set_logger(os.path.join(out_dir, 'train.log'))\n\n # Create the input data pipeline\n logging.info(\"Loading the datasets...\")\n\n # Load data\n train, test = read_and_format_full_dataset()\n train_kaggle, test_kaggle = read_and_format_kaggle_dataset()\n \n #Using kaggle's training data for training\n train, val = split_train_val_partition(train_kaggle, config.data['split_train_percentage'],config.general['seed'])\n \n #Adding data augmentation to training\n # train = MNISTDatasetLabels(train,\n # transform=transforms.Compose([\n # Normalization(),\n # transforms.RandomHorizontalFlip(0.5),\n # transforms.RandomVerticalFlip(0.5),\n # transforms.RandomPerspective(),\n # transforms.RandomRotation(30)])) \n \n train = MNISTDatasetLabels(train,\n transform=transforms.Compose([\n Normalization(),\n transforms.RandomRotation(15)])) \n \n val = MNISTDatasetLabels(val,\n transform=transforms.Compose([Normalization()])) \n \n test = MNISTDatasetLabels(test,\n transform=transforms.Compose([Normalization()])) \n \n test_kaggle = MNISTDatasetNoLabels(test_kaggle,\n transform=transforms.Compose([Normalization()])) \n \n train_dataloader = DataLoader(train, batch_size=config.CNN_train['batch_size'], shuffle=True, num_workers=config.CNN_train['num_workers'])\n val_dataloader = DataLoader(val, batch_size=config.CNN_train['batch_size'], shuffle=True, num_workers=config.CNN_train['num_workers'])\n test_dataloader = DataLoader(test, batch_size=config.CNN_train['batch_size'], shuffle=False, num_workers=config.CNN_train['num_workers'])\n test_kaggle_dataloader = DataLoader(test_kaggle, batch_size=config.CNN_train['batch_size'], shuffle=False, num_workers=config.CNN_train['num_workers'])\n\n logging.info(\"- done.\")\n \n # Train the model\n logging.info(\"Starting training for {} epoch(s)\".format(config.CNN_train['num_epochs']))\n train_wraper(train_dataloader, val_dataloader, config)\n logging.info(\"- done.\")\n \n #Evaluate the model test set \n # Using Kaggle's test set unknown labels (can have true labels or not (Kaggle's case))\n logging.info(\"Starting the model evaluation on Kaggle's test data\")\n eval_out_kaggle = evaluate_return_labels(test_kaggle_dataloader, config)\n #Save the results\n eval_out_kaggle.to_csv(os.path.join(out_dir, 'test_result_kaggle.csv'),index=False)\n logging.info(\"- done.\")\n \n # Using test set with known labels\n logging.info(\"Starting the model evaluation on test data\")\n eval_out = evaluate_return_labels(test_dataloader, config)\n #Save the results\n eval_out.to_csv(os.path.join(out_dir, 'test_result.csv'),index=False)\n logging.info(\"- done.\")\n \n # Compute metrics\n if 'TrueLabel' in eval_out:\n #Evaluate the model with test set (known labels)\n logging.info(\"Calculating final metrics\")\n # Get unique true labels in dataset\n classes = eval_out.TrueLabel.unique()\n # Sort them\n classes.sort()\n # Calculate accuracy\n accuracy_total = accuracy(eval_out)\n # Calculate error rate\n error_rate_total = error_rate(eval_out)\n # Confussion matrix\n c_matrix = confusion_matrix(eval_out, classes)\n plot_confusion_matrix(c_matrix, classes, 'CNN', out_dir)\n # Overall metrics\n metrics_per_class, metrics_overall = confusion_matrix_metrics(c_matrix)\n metrics_overall['accuracy_percent'] = accuracy_total\n metrics_overall['error_rate_percent'] = error_rate_total\n \n metrics_per_class.to_csv(os.path.join(out_dir, 'CNN_results_per_class.csv'))\n metrics_overall.to_csv(os.path.join(out_dir, 'CNN_results_overall.csv'))\n \n logging.info(\"- done.\")", "def main(feats_name, targets_name, model_name, n_boot, seed_start, output_filename, train_test_flag):\n\n #load feats and targets\n input_dict = {}\n input_dict['feats'] = 'data/%s' % (feats_name)\n input_dict['targets'] = 'data/%s' % (targets_name)\n #load the feats and targets\n df = pd.read_csv(\"%s\" % (input_dict['feats']))\n targets = pd.read_csv(\"%s\" % (input_dict['targets']))\n #drop columns not used for prediction\n drop_cols = [\"Unnamed: 0\",\"index\"]\n for dc in drop_cols:\n if dc in targets.columns:\n targets = targets.drop(dc,axis=1)\n if dc in df.columns:\n df = df.drop(dc,axis=1)\n #reduce to training or test set only if requested\n if (train_test_flag == 'train') and ('test_set' in df.columns):\n targets = targets[df['test_set'] == 0]\n df = df[df['test_set'] == 0]\n elif (train_test_flag == 'test') and ('test_set' in df.columns):\n targets = targets[df['test_set'] == 1]\n df = df[df['test_set'] == 1]\n df = df.drop('test_set', axis = 1)\n \n #broadcast the feats and targets\n df_b = sc.broadcast(df)\n targets_b = sc.broadcast(targets)\n\n #Set up the classifier. 3fold CV for selection of regularization term.\n if model_name == 'linear':\n model = LinearRegression(fit_intercept=True,\n normalize=False,\n copy_X=True,\n n_jobs=1) \n elif model_name == 'lasso':\n model = LassoCV(alphas = [.05,.1,.2],\n normalize = False,\n fit_intercept = True,\n verbose = False,\n copy_X = False,\n n_jobs = 3)\n elif model_name == 'ridge':\n model = RidgeCV(alphas = [.00001,.0001,.001,.01,.1,1,10,100,1000,10000],\n normalize = False,\n fit_intercept = True,\n verbose = 1,\n cv = 3)\n else:\n raise ValueError('model_name not recognized.')\n \n #Create an RDD that specifies prng seed to use\n samp_list = [(n,) for n in np.arange(seed_start, seed_start+n_boot)]\n samp_rdd = sc.parallelize(samp_list,n_boot) #create RDD with one partition for each row (second arg is number of partitions)\n #Create a function that takes a tuple as input and returns \n def func(tup):\n \"\"\"\n Takes as input a tuple containing an integer. The integer specifies the random seed that will be used to \n randomly sample, with replacement, observations from the feats set provided. The model is fitted to the \n sampled feats. Resulting best fit parameters, along with some other summary statistics and information are\n provided as input in a JSON string that will be written to the output file when all jobs are completed.\n \n Parameters\n ----------\n tup, rdd\n - series of tuples with different integer values defining the RNG seed to be used to sample observations\n \n Returns\n ----------\n tup[0], int\n - the seed that was used\n json.dumps(results_dict), str\n - dict in json format with the following keys:\n - alpha, the regularization term providing the best fit according to 3 fold cross-validation\n - random_state, the initial state used for fitting\n - training_feats, the name of the training_feats csv file\n - training_targets, the name of the target variable csv file\n - cv, the type of cross-validation used\n - sklearn_version, which version of sklearn was used\n - mse_min, the mean squared error for the test set on each fold\n - r2, the r-squared value (% var explained)\n - coef, parameter vector\n - intercept, intercept parameter\n - column_names, feature name corresponding to each parameter in the parameter vector\n \"\"\"\n #take a random sample with replacement\n np.random.seed(seed=tup[0]) #set the seed\n n_obs = np.shape(df_b.value)[0] #number of observations determines sample size\n samp = list(np.random.randint(0,high=n_obs,size=n_obs)) #draw the random sample with replacement\n #fit the model\n tic = time.time()\n results = model.fit(df_b.value.iloc[samp,:],np.ravel(targets_b.value.iloc[samp]))\n toc = tic - time.time()\n #save the results in a dict\n results_dict = {}\n results_dict['alpha'] = results.alpha_\n results_dict['random_state'] = results.random_state\n results_dict['training_feats'] = input_dict['feats']\n results_dict['training_targets'] = input_dict['targets']\n results_dict['cv'] = results.cv\n results_dict['sklearn_version'] = sklearn.__version__\n results_dict['mse_min'] = results.mse_path_.min()\n results_dict['r2'] = results.score(df_b.value.iloc[samp,:],np.ravel(targets_b.value.iloc[samp]))\n results_dict['coef'] = list(results.coef_)\n results_dict['intercept'] = results.intercept_\n results_dict['column_names'] = [i for i in df_b.value.columns]\n results_dict['fit_time'] = toc\n #convert results dict to json and save in tuple\n return(json.dumps(results_dict))\n\n #fit model in parallel\n results = samp_rdd.map(lambda p: func(p))\n #save to text file\n results.saveAsTextFile(output_filename)\n #stop the SparkContext.\n if not local_mode:\n sc.stop()", "def run_test(**kwargs):\n cmd = 'python yolov3/test.py'\n pms_list = [\n 'batch_size', 'model_def',\n 'data_config', 'weights_path',\n 'class_path', 'iou_thres',\n 'nms_thres', 'conf_thres',\n 'n_cpu', 'img_size'\n ]\n call_command(pms_list, cmd, kwargs)", "def main():\n logger.info(\"=> creating model ...\")\n logger.info(\"Classes: %s\", cfg.classes)\n\n value_scale = 255\n mean = [0.485, 0.456, 0.406]\n mean = [item * value_scale for item in mean]\n std = [0.229, 0.224, 0.225]\n std = [item * value_scale for item in std]\n gray_folder = os.path.join(cfg.result_path, 'gray')\n color_folder = os.path.join(cfg.result_path, 'color')\n\n test_transform = pt_transform.Compose([pt_transform.Normalize(mean=mean, std=std, is_train=False)])\n\n if cfg.data_root[-1] == \"/\":\n val_list = cfg.data_root + cfg.val_list\n else:\n val_list = cfg.data_root + '/' + cfg.val_list\n\n test_data = pt_dataset.SemData(\n split='val', data_root=cfg.data_root,\n data_list=val_list,\n transform=test_transform)\n\n test_loader = ds.GeneratorDataset(test_data, column_names=[\"data\", \"label\"],\n shuffle=False)\n test_loader.batch(1)\n colors = numpy.loadtxt(cfg.color_txt).astype('uint8')\n\n from src.model import cpnet\n\n CPNet = cpnet.CPNet(\n prior_channels=256,\n proir__size=60,\n am_kernel_size=11,\n pretrained=True,\n pretrained_path=cfg.pretrain_path,\n deep_base=True\n )\n\n ms_checkpoint = load_checkpoint(cfg.ckpt)\n load_param_into_net(CPNet, ms_checkpoint, strict_load=True)\n CPNet.set_train(False)\n test(test_loader, test_data.data_list, CPNet, cfg.classes, mean, std, cfg.base_size, cfg.test_h,\n cfg.test_w, cfg.scales, gray_folder, color_folder, colors)\n if cfg.split != 'test':\n cal_acc(test_data.data_list, gray_folder, cfg.classes)", "def finetuned():\n launch_training_on_all_splits(experiment='full', splits=NAME_SPLIT, base_model='ft', dropout=0.7304, learning_rate=0.0000976)", "def train_and_test(self, train_fn, test_fn):\n logging.info(\"Training..\")\n self.train(train_fn)\n logging.info(\"Testing..\")\n return self.test(test_fn)\n logging.info(\"Done!\")", "def main(_):\n\n args = HParams\n\n spec = cluster_spec(args.num_workers, 1)\n cluster = tf.train.ClusterSpec(spec).as_cluster_def()\n\n def shutdown(signal, frame):\n logger.warn('Received signal %s: exiting', signal)\n sys.exit(128+signal)\n signal.signal(signal.SIGHUP, shutdown)\n signal.signal(signal.SIGINT, shutdown)\n signal.signal(signal.SIGTERM, shutdown)\n\n # Reset graph before allocating any?\n tf.reset_default_graph()\n\n if args.job_name == \"monitor\":\n server = tf.train.Server(cluster, job_name=\"worker\", task_index=args.task,\n config=tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=2))\n run_monitor(args, server)", "def test_documentation_popxl_mnist_rts_train_test(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts --test\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def main(args):\n\n print(now(), \"test_model.py main() running.\")\n\n test_log = \"clean_test_log.txt\"\n to_log_file(args, args.output, test_log)\n\n # Set device\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n ####################################################\n # Dataset\n if args.dataset.lower() == \"cifar10\":\n transform_train = get_transform(args.normalize, args.train_augment)\n transform_test = get_transform(args.normalize, False)\n trainset = torchvision.datasets.CIFAR10(\n root=\"./data\", train=True, download=True, transform=transform_train\n )\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=128)\n testset = torchvision.datasets.CIFAR10(\n root=\"./data\", train=False, download=True, transform=transform_test\n )\n testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False)\n elif args.dataset.lower() == \"cifar100\":\n transform_train = get_transform(args.normalize, args.train_augment)\n transform_test = get_transform(args.normalize, False)\n trainset = torchvision.datasets.CIFAR100(\n root=\"./data\", train=True, download=True, transform=transform_train\n )\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=128)\n testset = torchvision.datasets.CIFAR100(\n root=\"./data\", train=False, download=True, transform=transform_test\n )\n testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False)\n\n elif args.dataset.lower() == \"tinyimagenet_first\":\n transform_train = get_transform(\n args.normalize, args.train_augment, dataset=args.dataset\n )\n transform_test = get_transform(args.normalize, False, dataset=args.dataset)\n trainset = TinyImageNet(\n TINYIMAGENET_ROOT,\n split=\"train\",\n transform=transform_train,\n classes=\"firsthalf\",\n )\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=64, num_workers=1, shuffle=True\n )\n testset = TinyImageNet(\n TINYIMAGENET_ROOT,\n split=\"val\",\n transform=transform_test,\n classes=\"firsthalf\",\n )\n testloader = torch.utils.data.DataLoader(\n testset, batch_size=64, num_workers=1, shuffle=False\n )\n\n elif args.dataset.lower() == \"tinyimagenet_last\":\n transform_train = get_transform(\n args.normalize, args.train_augment, dataset=args.dataset\n )\n transform_test = get_transform(args.normalize, False, dataset=args.dataset)\n trainset = TinyImageNet(\n TINYIMAGENET_ROOT,\n split=\"train\",\n transform=transform_train,\n classes=\"lasthalf\",\n )\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=64, num_workers=1, shuffle=True\n )\n testset = TinyImageNet(\n TINYIMAGENET_ROOT,\n split=\"val\",\n transform=transform_test,\n classes=\"lasthalf\",\n )\n testloader = torch.utils.data.DataLoader(\n testset, batch_size=64, num_workers=1, shuffle=False\n )\n\n elif args.dataset.lower() == \"tinyimagenet_all\":\n transform_train = get_transform(\n args.normalize, args.train_augment, dataset=args.dataset\n )\n transform_test = get_transform(args.normalize, False, dataset=args.dataset)\n trainset = TinyImageNet(\n TINYIMAGENET_ROOT,\n split=\"train\",\n transform=transform_train,\n classes=\"all\",\n )\n trainloader = torch.utils.data.DataLoader(\n trainset, batch_size=64, num_workers=1, shuffle=True\n )\n testset = TinyImageNet(\n TINYIMAGENET_ROOT,\n split=\"val\",\n transform=transform_test,\n classes=\"all\",\n )\n testloader = torch.utils.data.DataLoader(\n testset, batch_size=64, num_workers=1, shuffle=False\n )\n\n else:\n print(\"Dataset not yet implemented. Exiting from test_model.py.\")\n sys.exit()\n\n ####################################################\n\n ####################################################\n # Network and Optimizer\n net = get_model(args.model, args.dataset)\n\n # load model from path if a path is provided\n if args.model_path is not None:\n net = load_model_from_checkpoint(args.model, args.model_path, args.dataset)\n else:\n print(\"No model path provided, continuing test with untrained network.\")\n net = net.to(device)\n ####################################################\n\n ####################################################\n # Test Model\n training_acc = test(net, trainloader, device)\n natural_acc = test(net, testloader, device)\n print(now(), \" Training accuracy: \", training_acc)\n print(now(), \" Natural accuracy: \", natural_acc)\n stats = OrderedDict(\n [\n (\"model path\", args.model_path),\n (\"model\", args.model),\n (\"normalize\", args.normalize),\n (\"augment\", args.train_augment),\n (\"training_acc\", training_acc),\n (\"natural_acc\", natural_acc),\n ]\n )\n to_results_table(stats, args.output, \"clean_performance.csv\")\n ####################################################\n\n return", "def schedule_experiments(train_fun, decode_fun, eval_fun, train_set, dev_set,\n hyperparam_sets, FLAGS):\n\n print(\"===== Scheduled Experiments =====\")\n for hyperparam_set in hyperparam_sets:\n for hp in hyperparam_set:\n setattr(FLAGS, hp, hyperparam_set[hp])\n if hp == 'universal_keep':\n setattr(FLAGS, 'sc_input_keep', hyperparam_set[hp])\n setattr(FLAGS, 'sc_output_keep', hyperparam_set[hp])\n setattr(FLAGS, 'tg_input_keep', hyperparam_set[hp])\n setattr(FLAGS, 'tg_output_keep', hyperparam_set[hp])\n setattr(FLAGS, 'attention_input_keep', hyperparam_set[hp])\n setattr(FLAGS, 'attention_output_keep', hyperparam_set[hp])\n\n print(\"Trying parameter set: \")\n for hp in hyperparam_set:\n print(\"* {}: {}\".format(hp, hyperparam_set[hp]))\n metrics = \"top1_temp_ms\"\n\n metrics_value = single_round_model_eval(\n train_fun, decode_fun, eval_fun, train_set, dev_set, metrics)\n print(\"Parameter set: \")\n for hp in hyperparam_set:\n print(\"* {}: {}\".format(hp, hyperparam_set[hp]))\n print(\"{} = {}\".format(metrics, metrics_value))", "def run_multitask_model(X_train, y_train, cohorts_train,\n X_val, y_val, cohorts_val,\n X_test, y_test, cohorts_test,\n all_tasks, fname_keys, fname_results,\n FLAGS):\n\n model_fname_parts = ['mtl', 'lstm_shared', str(FLAGS.num_lstm_layers), \n 'layers', str(FLAGS.lstm_layer_size), 'units',\n 'dense_shared', str(FLAGS.num_dense_shared_layers), \n 'layers', str(FLAGS.dense_shared_layer_size), 'dense_units',\n 'specific', str(FLAGS.num_multi_layers), 'layers', \n str(FLAGS.multi_layer_size), \n 'specific_units', 'readmission']\n\n n_tasks = len(np.unique(cohorts_train))\n cohort_key = dict(zip(all_tasks, range(n_tasks)))\n\n if FLAGS.test_time:\n model_path = FLAGS.experiment_name + \\\n '/models/' + \"_\".join(model_fname_parts)\n model = load_model(model_path)\n y_pred = model.predict(X_test)\n \n cohort_aucs = []\n for task in all_tasks:\n if FLAGS.test_bootstrap:\n all_aucs = bootstrap_predict(X_test, y_test, cohorts_test,\n task=task, model=model, return_everything=True, test=True,\n all_tasks=all_tasks,\n num_bootstrap_samples=FLAGS.num_test_bootstrap_samples)\n cohort_aucs.append(np.array(all_aucs))\n else:\n y_pred_in_cohort = y_pred[cohorts_test ==\n task, cohort_key[task]]\n y_true_in_cohort = y_test[cohorts_test == task]\n auc = roc_auc_score(y_true_in_cohort, y_pred_in_cohort)\n cohort_aucs.append(auc)\n\n if FLAGS.test_bootstrap:\n cohort_aucs = np.array(cohort_aucs)\n cohort_aucs = np.concatenate(\n (cohort_aucs, np.expand_dims(np.mean(cohort_aucs, axis=0), 0)))\n\n all_micro_aucs = bootstrap_predict(X_test, y_test, cohorts_test, 'all', model, return_everything=True, test=True,\n all_tasks=all_tasks, num_bootstrap_samples=FLAGS.num_test_bootstrap_samples)\n cohort_aucs = np.concatenate(\n (cohort_aucs, np.array([all_micro_aucs])))\n\n else:\n macro_auc = np.mean(cohort_aucs)\n cohort_aucs.append(macro_auc)\n micro_auc = roc_auc_score(y_test, y_pred[np.arange(len(y_test)), [\n cohort_key[c] for c in cohorts_test]])\n cohort_aucs.append(micro_auc)\n\n suffix = 'single' if not FLAGS.test_bootstrap else 'all'\n test_auc_fname = 'test_auc_on_multitask_' + suffix\n np.save(FLAGS.experiment_name + '/results/' +\n test_auc_fname, cohort_aucs)\n return\n\n # model\n mtl_model = create_multitask_model(X_train.shape[1:], FLAGS.num_lstm_layers,\n FLAGS.lstm_layer_size, FLAGS.num_dense_shared_layers, FLAGS.dense_shared_layer_size,\n FLAGS.num_multi_layers, FLAGS.multi_layer_size, output_dim=1, tasks=all_tasks)\n\n early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=4)\n\n model_dir = FLAGS.experiment_name + \\\n '/checkpoints/' + \"_\".join(model_fname_parts)\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n model_fname = model_dir + '/{epoch:02d}-{val_loss:.2f}.hdf5'\n checkpointer = tf.keras.callbacks.ModelCheckpoint(model_fname, monitor='val_loss', verbose=1)\n mtl_model.fit(X_train, [y_train for i in range(n_tasks)],\n batch_size=100,\n epochs=FLAGS.epochs,\n verbose=1,\n sample_weight=get_mtl_sample_weights(\n y_train, cohorts_train, all_tasks, sample_weights=samp_weights),\n callbacks=[early_stopping, checkpointer],\n validation_data=(X_val, [y_val for i in range(n_tasks)]))\n\n mtl_model.save(FLAGS.experiment_name + '/models/' +\n \"_\".join(model_fname_parts))\n\n cohort_aucs = []\n\n y_pred = get_correct_task_mtl_outputs(\n mtl_model.predict(X_val), cohorts_val, all_tasks)\n\n # task aucs\n for task in all_tasks:\n print('Multitask AUC on', task, ': ')\n if FLAGS.no_val_bootstrap:\n y_pred_in_task = y_pred[cohorts_val == task]\n try:\n auc = roc_auc_score(y_val[cohorts_val == task], y_pred_in_task)\n except:\n auc = np.nan\n cohort_aucs.append(auc)\n else:\n \n min_auc, max_auc, avg_auc = bootstrap_predict(\n X_val, y_val, cohorts_val, task, mtl_model,\\\n all_tasks=all_tasks, num_bootstrap_samples=FLAGS.num_val_bootstrap_samples)\n cohort_aucs.append(np.array([min_auc, max_auc, avg_auc]))\n print(\"(min/max/average):\")\n\n print(cohort_aucs[-1])\n \n # macro average\n cohort_aucs = np.array(cohort_aucs)\n cohort_aucs = np.concatenate(\n (cohort_aucs, np.expand_dims(np.nanmean(cohort_aucs, axis=0), 0)))\n\n # micro average\n if FLAGS.no_val_bootstrap:\n cohort_aucs = np.concatenate(\n (np.nanmean(cohort_aucs, axis=0), np.array([roc_auc_score(y_val, y_pred)])))\n else:\n min_auc, max_auc, avg_auc = bootstrap_predict(\n X_val, y_val, cohorts_val, 'all', mtl_model,\\\n all_tasks=all_tasks, num_bootstrap_samples=FLAGS.num_val_bootstrap_samples)\n cohort_aucs = np.concatenate(\n (cohort_aucs, np.array([[min_auc, max_auc, avg_auc]])))\n\n current_run_params = [FLAGS.num_lstm_layers, FLAGS.lstm_layer_size, FLAGS.num_dense_shared_layers,\n FLAGS.dense_shared_layer_size, FLAGS.num_multi_layers, FLAGS.multi_layer_size]\n\n try:\n multitask_model_results = np.load(fname_results)\n multitask_model_key = np.load(fname_keys)\n multitask_model_results = np.concatenate(\n (multitask_model_results, np.expand_dims(cohort_aucs, 0)))\n multitask_model_key = np.concatenate(\n (multitask_model_key, np.array([current_run_params])))\n\n except:\n multitask_model_results = np.expand_dims(cohort_aucs, 0)\n multitask_model_key = np.array([current_run_params])\n\n np.save(fname_results, multitask_model_results)\n np.save(fname_keys, multitask_model_key)\n print('Saved multitask results.')", "def simulate_run(run, maker, all_data, train_mask, test_mask, instances, independent, mixture):\n\n train_data = all_data.masked(train_mask)\n test_data = all_data.masked(test_mask)\n\n if instances is not None:\n ids = sorted(train_data.run_lists, key = lambda _: numpy.random.rand())[:instances]\n train_data = train_data.filter(*ids)\n\n if independent:\n train_data = train_data.collect_independent(mixture).only_nonempty()\n else:\n train_data = train_data.collect_systematic(mixture).only_nonempty()\n\n budget = test_data.common_budget\n #budget = test_data.common_budget / 2 # XXX\n suite = borg.fake.FakeSuite(test_data)\n\n if maker.subname == \"preplanning-dir\":\n model_kwargs = {\"K\": 64}\n\n if \"set_alpha\" in maker.variants:\n model_kwargs[\"alpha\"] = 1e-2\n else:\n model_kwargs = {}\n\n solver = maker(suite, train_data, model_kwargs = model_kwargs)\n successes = []\n\n for (i, instance_id) in enumerate(test_data.run_lists):\n logger.info(\"simulating run %i/%i on %s\", i, len(test_data), instance_id)\n\n with suite.domain.task_from_path(instance_id) as instance:\n with borg.accounting() as accountant:\n answer = solver.start(instance).run_then_stop(budget)\n\n succeeded = suite.domain.is_final(instance, answer)\n\n logger.info(\n \"%s %s on %s (%.2f CPU s)\",\n maker.name,\n \"succeeded\" if succeeded else \"failed\",\n os.path.basename(instance),\n accountant.total.cpu_seconds,\n )\n\n if succeeded:\n successes.append(accountant.total.cpu_seconds)\n\n logger.info(\n \"%s had %i successes over %i instances\",\n maker.name,\n len(successes),\n len(test_data),\n )\n\n description = \"{0} ({1})\".format(mixture, \"Sep.\" if independent else \"Sys.\")\n\n return (\n description,\n maker.name,\n instances,\n len(successes),\n numpy.mean(successes),\n numpy.median(successes),\n )", "def test_mnist():\n skip_if_no_data()\n mode = get_default_mode()\n if hasattr(mode, 'check_py_code'):\n old_value = mode.check_py_code\n mode.check_py_code = False\n try:\n if config.mode == \"DEBUG_MODE\":\n yaml_file = 'mnist_fast'\n else:\n yaml_file = 'mnist'\n limited_epoch_train(os.path.join(yaml_file_path, '%s.yaml'\n % yaml_file))\n try:\n os.remove(os.path.join(save_path, '%s.pkl' % yaml_file))\n os.remove(os.path.join(save_path, '%s_best.pkl' % yaml_file))\n except Exception:\n pass\n finally:\n if hasattr(mode, 'check_py_code'):\n mode.check_py_code = old_value", "def train_main(cls):\n launcher = cls()\n launcher.launch()", "def main():\n # \"\"\"Prepare neuromorphic MNIST image datasets for use in caffe\n # Each dataset will be generated with different number of unique spikes\n # \"\"\"\n # initial_size = 1e6 #best to make this big enough avoid expensive\n # re-allocation\n # test_dir = os.path.abspath('testFull')\n # train_dir = os.path.abspath('trainFull')\n\n # for num_spikes in range(150, 260, 10):\n # #test directory\n # image_dataset = generate_nmnist_dataset(initial_size, test_dir,\n # num_spikes, 0.75)\n # output_lmdb = 'testlmdb' + str(num_spikes)\n # database = save_to_lmdb(image_dataset, output_lmdb)\n # #database.process_all_data(show_lmdb_datum)\n\n # #train directory\n # image_dataset = generate_nmnist_dataset(initial_size, train_dir,\n # num_spikes, 0.75)\n # output_lmdb = 'trainlmdb' + str(num_spikes)\n # save_to_lmdb(image_dataset, output_lmdb)\n\n # TD = ev.read_dataset(os.path.abspath('trainReduced/0/00002.bin'))\n # best to make this big enough avoid expensive re-allocation\n initial_size = 6e5\n test_dir = os.path.abspath('testFull')\n train_dir = os.path.abspath('trainFull')\n\n # test directory\n image_dataset = generate_nmnist_continuous_dataset(initial_size, test_dir)\n save_to_lmdb(image_dataset, 'testlmdb_continuous', True)\n save_to_mat(image_dataset, 'MNIST_continuous_test.mat')\n # database.process_all_data(show_lmdb_datum)\n\n # train directory\n image_dataset = generate_nmnist_continuous_dataset(initial_size, train_dir)\n save_to_lmdb(image_dataset, 'trainlmdb_continuous', True)\n save_to_mat(image_dataset, 'MNIST_continuous_train.mat')\n\n # TD = ev.read_dataset(os.path.abspath('trainReduced/0/00002.bin'))", "def main():\n parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('--config', required=True, help='Configuration file for run. Must be in shared_dir')\n parser.add_argument('-c', '--cluster_size', required=True, help='Number of workers desired in the cluster.')\n parser.add_argument('-s', '--sample_size', required=True, type=float, help='Size of the sample deisred in TB.')\n parser.add_argument('-t', '--instance_type', default='c3.8xlarge', help='e.g. m4.large or c3.8xlarge.')\n parser.add_argument('-n', '--cluster_name', required=True, help='Name of cluster.')\n parser.add_argument('--namespace', default='jtvivian', help='CGCloud NameSpace')\n parser.add_argument('--spot_price', default=0.60, help='Change spot price of instances')\n parser.add_argument('-b', '--bucket', default='tcga-data-cgl-recompute', help='Bucket where data is.')\n parser.add_argument('-d', '--shared_dir', required=True,\n help='Full path to directory with: pipeline script, launch script, config, and master key.')\n params = parser.parse_args()\n\n # Run sequence\n start = time.time()\n # Get number of samples from config\n with open(params.config, 'r') as f:\n num_samples = len(f.readlines())\n # Launch cluster and pipeline\n uuid = fix_launch(params)\n launch_cluster(params)\n ids = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')\n launch_pipeline(params)\n # Blocks until all workers are idle\n stop = time.time()\n # Collect metrics from cluster\n collect_metrics(ids, list_of_metrics, start, stop, uuid=uuid)\n # Apply \"Insta-kill\" alarm to every worker\n map(apply_alarm_to_instance, ids)\n # Kill leader\n logging.info('Killing Leader')\n leader_id = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-leader')[0]\n apply_alarm_to_instance(leader_id, threshold=5)\n # Generate Run Report\n avail_zone = get_avail_zone(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')[0]\n total_cost, avg_hourly_cost = calculate_cost(params.instance_type, ids[0], avail_zone)\n # Report values\n output = ['UUID: {}'.format(uuid),\n 'Number of Samples: {}'.format(num_samples),\n 'Number of Nodes: {}'.format(params.cluster_size),\n 'Cluster Name: {}'.format(params.cluster_name),\n 'Source Bucket: {}'.format(params.bucket),\n 'Average Hourly Cost: ${}'.format(avg_hourly_cost),\n 'Cost per Instance: ${}'.format(total_cost),\n 'Availability Zone: {}'.format(avail_zone),\n 'Start Time: {}'.format(datetime.isoformat(datetime.utcfromtimestamp(start))),\n 'Stop Time: {}'.format(datetime.isoformat(datetime.utcfromtimestamp(stop))),\n 'Total Cost of Cluster: ${}'.format(float(total_cost) * int(params.cluster_size)),\n 'Cost Per Sample: ${}'.format((float(total_cost) * int(params.cluster_size) / int(num_samples)))]\n with open(os.path.join(str(uuid) + '_{}'.format(str(datetime.utcnow()).split()[0]), 'run_report.txt'), 'w') as f:\n f.write('\\n'.join(output))\n # You're done!\n logging.info('\\n\\nScaling Test Complete.')", "def prepare_run(self, **kwargs):\n assert self.cloud\n LOGGER.debug('Validating run tests...')\n for test in kwargs.get('tests', self.stests):\n if test in self.stests:\n self.tests.append(test)\n else:\n raise Exception(f\"Test name '{test}' is invalid\")\n\n if not os.path.exists(self.task_dir):\n os.makedirs(self.task_dir)\n\n task = os.path.join(self.rally_dir, 'task.yaml')\n if not os.path.exists(task):\n LOGGER.error(\"Task file '%s' does not exist.\", task)\n raise Exception(f\"Task file '{task}' does not exist.\")\n self.task_file = os.path.join(self.task_dir, 'task.yaml')\n shutil.copyfile(task, self.task_file)\n\n task_macro = os.path.join(self.rally_dir, 'macro')\n if not os.path.exists(task_macro):\n LOGGER.error(\"Task macro dir '%s' does not exist.\", task_macro)\n raise Exception(f\"Task macro dir '{task_macro}' does not exist.\")\n macro_dir = os.path.join(self.task_dir, 'macro')\n if os.path.exists(macro_dir):\n shutil.rmtree(macro_dir)\n shutil.copytree(task_macro, macro_dir)\n\n self.update_keystone_default_role()\n self.compute_cnt = self.count_hypervisors()\n self.network_extensions = self.cloud.get_network_extensions()\n self.flavor_alt = self.create_flavor_alt()\n self.services = [service.name for service in\n functest_utils.list_services(self.cloud)]\n\n LOGGER.debug(\"flavor: %s\", self.flavor_alt)", "def main():\n\n experiment_config_path = _parse_input()\n all_experiments = read_experiments_config(experiment_config_path)\n\n for experiment_name, experiment_config in all_experiments.items():\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n results, model = perform_experiment(experiment_config)\n weights_file_name = save_model_weights(experiment_name, model)\n testing_layers_files = save_layers_logs(results['Layers Testing Output'], 'Testing')\n training_layers_files = save_layers_logs(results['Layers Training Output'], 'Training')\n\n results.pop('Layers Training Output')\n results.pop('Layers Testing Output')\n print(\"Testing Data Confusion Matrix\")\n print(np.array2string(results['Confusion Matrix']))\n results['Confusion Matrix'] = str(results['Confusion Matrix'].tolist())\n print(\"Experiment Results:\")\n print(json.dumps(results, indent=2, sort_keys=True))\n\n results_file = save_experiment_log(results, experiment_name)\n upload_to_s3([], [], [results_file], [weights_file_name], testing_layers_files + training_layers_files)", "def infinite_infer_run():\n try:\n # This cat-dog model is implemented as binary classifier, since the number\n # of labels is small, create a dictionary that converts the machine\n # labels to human readable labels.\n model_type = 'classification'\n output_map = {0: 'dog', 1: 'cat'}\n # Create an IoT client for sending to messages to the cloud.\n client = greengrasssdk.client('iot-data')\n iot_topic = '$aws/things/{}/infer'.format(os.environ['AWS_IOT_THING_NAME'])\n # Create a local display instance that will dump the image bytes to a FIFO\n # file that the image can be rendered locally.\n local_display = LocalDisplay('480p')\n local_display.start()\n # The sample projects come with optimized artifacts, hence only the artifact\n # path is required.\n model_path = '/opt/awscam/artifacts/mxnet_resnet18-catsvsdogs_FP32_FUSED.xml'\n # Load the model onto the GPU.\n client.publish(topic=iot_topic, payload='Loading action cat-dog model')\n model = awscam.Model(model_path, {'GPU': 1})\n client.publish(topic=iot_topic, payload='Cat-Dog model loaded')\n # Since this is a binary classifier only retrieve 2 classes.\n num_top_k = 2\n # The height and width of the training set images\n input_height = 224\n input_width = 224\n # Do inference until the lambda is killed.\n while True:\n # Get a frame from the video stream\n ret, frame = awscam.getLastFrame()\n if not ret:\n raise Exception('Failed to get frame from the stream')\n # Resize frame to the same size as the training set.\n frame_resize = cv2.resize(frame, (input_height, input_width))\n # Run the images through the inference engine and parse the results using\n # the parser API, note it is possible to get the output of doInference\n # and do the parsing manually, but since it is a classification model,\n # a simple API is provided.\n parsed_inference_results = model.parseResult(model_type,\n model.doInference(frame_resize))\n # Get top k results with highest probabilities\n top_k = parsed_inference_results[model_type][0:num_top_k]\n # Add the label of the top result to the frame used by local display.\n # See https://docs.opencv.org/3.4.1/d6/d6e/group__imgproc__draw.html\n # for more information about the cv2.putText method.\n # Method signature: image, text, origin, font face, font scale, color, and thickness\n cv2.putText(frame, output_map[top_k[0]['label']], (10, 70),\n cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 165, 20), 8)\n # Set the next frame in the local display stream.\n local_display.set_frame_data(frame)\n # Send the top k results to the IoT console via MQTT\n cloud_output = {}\n for obj in top_k:\n cloud_output[output_map[obj['label']]] = obj['prob']\n client.publish(topic=iot_topic, payload=json.dumps(cloud_output))\n except Exception as ex:\n client.publish(topic=iot_topic, payload='Error in cat-dog lambda: {}'.format(ex))", "def run_training_and_tests(test_name, dataset, models, n_images = 1000, training_split = 0.7, \n n_training_images = None, n_test_images = None, \n n_iterations = 1, dimensions = (50, 50)):\n\n aggregate_metrics = {}\n\n # Run specified number of iterations\n for i in range(n_iterations):\n print(\"\\nTest iteration\", i+1)\n\n # Handle if specific training and test set size isn't given\n if (n_training_images is None):\n n_training_images = n_images * training_split\n if (n_test_images is None):\n n_test_images = n_images * (1-training_split)\n\n # Load training and test sets from single dataset\n train_data, train_labels, test_data, test_labels = image_utils.read_dataset(\n n_training_images, \n n_test_images, \n './datasets/' + dataset, \n dimensions[0], \n dimensions[1]\n )\n\n # Train and run tests for each model\n for model in models:\n print(\"Working with model '\" + model.label + \"'\")\n\n # Train model\n start = time.time()\n model.train(copy.deepcopy(train_data), train_labels)\n end = time.time()\n training_time = round(end - start, 3)\n\n # Run predictions on test set\n start = time.time()\n predicted = model.run(copy.deepcopy(test_data))\n end = time.time()\n test_time = round(end - start, 3)\n\n # Calculate metrics and store for aggregate calculations\n metrics = Metrics(test_labels, predicted, training_time, test_time)\n if model.label in aggregate_metrics:\n aggregate_metrics[model.label].append(metrics)\n else:\n aggregate_metrics[model.label] = [metrics]\n\n # Print results\n print(\"Results\\n\" + \"------\")\n print(str(metrics))\n\n # Save model\n filepath = \"./test/\" + test_name + \"/\" + model.label + \"/iteration\" + str(i+1) + \"/\"\n print(\"Saving model to '\" + filepath + model.label + \".joblib'\")\n os.makedirs(os.path.dirname(filepath), exist_ok = True)\n with open(filepath + model.label + '.joblib', 'wb') as file:\n dump(model, file)\n\n # Save results\n print(\"Saving results to '\" + filepath + \"results.txt'\\n\")\n with open(filepath + \"results.txt\", 'w') as file:\n file.write(str(metrics))\n\n # Calculate, print and write aggregate metrics\n print(\n 'Aggregate Results' + '\\n' +\n '-----------------'\n )\n for model in models:\n aggregate = combine_metrics(aggregate_metrics[model.label])\n print(model.label)\n print(aggregate)\n filepath = \"./test/\" + test_name + \"/\" + model.label + \"/\"\n print(\"Saving results to '\" + filepath + \"aggregate_results.txt'\" + \"\\n -- -\\n\")\n with open(filepath + \"aggregate_results.txt\", 'w') as file:\n file.write(str(aggregate))", "def main():\n\n # Experiment Start\n start_time = datetime.now()\n logger.info(\n '################ Bergson Team Experiment Start #################')\n logger.info(\n f'Starting Bergson Astro Pi team experiment at {start_time.strftime(\"%Y-%m-%d %H:%M:%S\")}')\n\n '''\n # Load simple Conv2D AI Model\n logger.info(\"Loading AI Convolutional Model\")\n conv2D_model = load_model(\"Conv2D_TF114\")\n '''\n\n # Load TFLite Model\n logger.info(\"Loading TFLite Mobilenetv2 Model\")\n mobilenetv2_interpreter = load_tflite_model(\"./Mobilenetv2_TF114.tflite\")\n\n # Create Log File\n logger.info(f'Creating Log file at {str(data_file)}')\n with open(data_file, 'w') as f:\n writer = csv.writer(f)\n header = (\"Date/time\", \"Location\", \"Picture Name\", \"Predicted NO2\")\n writer.writerow(header)\n\n # Start Loop over 3 hours\n\n now_time = datetime.now()\n i = 0\n # run a loop for 2 minutes\n while (now_time < start_time + timedelta(minutes=175)):\n\n # Take Earth Picture\n timestamp = datetime.now().strftime(\"%Y-%m-%d_%H:%M:%S\")\n pic_name = f'bergson_img_{timestamp}.jpg'\n capture(rpi_cam, str(dir_path/pic_name))\n logger.info(f'Experiment Pipeline {i} on picture {pic_name}')\n\n # NDVI Preprocessing\n ndvi_image = get_ndvi(str(dir_path/pic_name))\n ndvi_image = np.expand_dims(ndvi_image, axis=2)\n\n # RGB Prepprocessing for expected shape by Mobilenetv2 - comment below line when using simple Conv2D model\n ndvi_rgb_image = get_ndvi_rgb(ndvi_image)\n\n '''\n # Do Inference with simple Conv2D AI Model\n prediction = make_inference(ndvi_image,conv2D_model)\n '''\n \n # Do Inference with TFLite Model\n ndvi_rgb_image = ndvi_rgb_image.astype('float32')\n prediction = make_tflite_inference(\n ndvi_rgb_image, mobilenetv2_interpreter)\n\n # Get Decoded Inference results\n decoded_prediction = decode_prediction(prediction)\n\n # Write Prediction as CSV to disk\n logger.info(\n f'Logging NO2 prediction \\\"{decoded_prediction}\\\" for {pic_name}')\n exif_data = get_img_exif(pic_name, iss, decoded_prediction)\n row = (exif_data['Date/Time'], exif_data['Location'],\n pic_name, exif_data['NO2'])\n with open(data_file, mode='a') as f:\n writer = csv.writer(f)\n writer.writerow(row)\n\n # update the current time\n now_time = datetime.now()\n i = i+1\n\n # End Loop over 3 hours\n\n # Experiment End\n end_time = datetime.now()\n logger.info(\n f'Finishing Bergson Astro Pi team experiment at {end_time.strftime(\"%Y-%m-%d %H:%M:%S\")}')\n experiment_time = end_time - start_time\n logger.info(f'Bergson Astro Pi team experiment run time {experiment_time}')\n logger.info('################ Bergson Team Experiment End #################')", "def test_classification(model, test_loader, criterion, cfg, file):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n model.eval()\n\n end = time.time()\n final_result = []\n\n with torch.no_grad():\n for step, (inputs, labels, ids, chunk_nb,\n split_nb) in enumerate(test_loader):\n data_time.update(time.time() - end)\n val_batch = inputs.cuda()\n val_label = labels.cuda()\n outputs = model(val_batch)\n loss = criterion(outputs, val_label)\n\n for i in range(outputs.size(0)):\n string = \"{} {} {} {} {}\\n\".format(ids[i], \\\n str(outputs.data[i].cpu().numpy().tolist()), \\\n str(int(labels[i].cpu().numpy())), \\\n str(int(chunk_nb[i].cpu().numpy())), \\\n str(int(split_nb[i].cpu().numpy())))\n final_result.append(string)\n\n prec1, prec5 = accuracy(outputs.data, val_label, topk=(1, 5))\n losses.update(loss.item(), val_batch.size(0))\n top1.update(prec1.item(), val_batch.size(0))\n top5.update(prec5.item(), val_batch.size(0))\n batch_time.update(time.time() - end)\n end = time.time()\n if step % cfg.CONFIG.LOG.DISPLAY_FREQ == 0 and cfg.DDP_CONFIG.GPU_WORLD_RANK == 0:\n print('----Testing----')\n print_string = 'Epoch: [{0}][{1}/{2}]'.format(\n 0, step + 1, len(test_loader))\n print(print_string)\n print_string = 'data_time: {data_time:.3f}, batch time: {batch_time:.3f}'.format(\n data_time=data_time.val, batch_time=batch_time.val)\n print(print_string)\n print_string = 'loss: {loss:.5f}'.format(loss=losses.avg)\n print(print_string)\n print_string = 'Top-1 accuracy: {top1_acc:.2f}%, Top-5 accuracy: {top5_acc:.2f}%'.format(\n top1_acc=top1.avg, top5_acc=top5.avg)\n print(print_string)\n if not os.path.exists(file):\n os.mknod(file)\n with open(file, 'w') as f:\n f.write(\"{}, {}\\n\".format(top1.avg, top5.avg))\n for line in final_result:\n f.write(line)", "def fewshot_eval_model(experiment_name, task_name, mt, eval_data, batch_size, \n k=0, random_seed=0, n=None, prompt_data=None, \n instructions=None, answers=None, template_id=0, cot_reasons=None,\n max_decode_steps=128, extract_answers=None,\n trigger_phrase=None,\n print_examples=0, print_all_wrong=False):\n # argument checks\n if k > 0 and prompt_data is None: \n assert len(prompt_data) >= 1, f\"need to provide prompt data of at least len {k}\"\n # define stats\n n_correct = 0\n n_str_em = 0\n n_datapoints = 0\n all_preds = []\n all_labels = []\n # task specific info\n task_name_to_hendrycks_em_group_by = {\n 'commonsense': 1,\n 'deontology': 4,\n 'justice': 4,\n 'utilitarianism': 1,\n 'virtue': 1, # we treat as multiple choice\n 'trolley' : 1,\n 'factual' : 1,\n 'counterfact' : 1,\n }\n if 'virtue' in task_name:\n assert answers is None, \"do not use answers with virtue subset\"\n if answers and not extract_answers:\n extract_answers = answers\n # subsample eval data if requested\n if n is not None:\n eval_data_loop = eval_data.sample(n=n, random_state=random_seed, replace=False)\n else:\n eval_data_loop = eval_data\n # begin eval loop\n # calculate query batch size based on if len(inputs) * len(answers) can fit in BATCH_SIZE query to model\n effective_batch_size = batch_size if not answers else batch_size // len(extract_answers)\n n_chunks = np.ceil(len(eval_data_loop) / effective_batch_size)\n for batch_num, batch in enumerate(np.array_split(eval_data_loop, n_chunks)):\n if batch_num > 0:\n running_acc = n_correct / n_datapoints \n check_answers = extract_answers if answers is None else answers\n prop_invalid_preds = compute_prop_invalid_preds(all_preds, check_answers)\n start = '\\r' # '\\n' if batch_num < 3 else \n print(f\"{start}Batch {batch_num-1} | Acc: {100*running_acc:.2f} | Invalid: {100*prop_invalid_preds:.2f}\", end=\"\")\n # make inputs and labels:\n query_inputs = []\n for test_input in batch.input:\n query_input = format_prompt_from_df(prompt_data, test_input, answers=answers, instructions=instructions, cot_reasons=cot_reasons, separator='\\n', template_id=template_id)\n query_inputs.append(query_input)\n labels = batch.label_str\n # make multiple choice answers for virtue\n if 'virtue' in task_name:\n answers = []\n for answer_list in batch.answers:\n answers.append(answer_list.split(','))\n answers = np.array(answers)\n # query model. query inputs may be editing when doing chain_of_thought multiple choice\n with torch.no_grad():\n preds, scores, query_inputs = predict_model(mt, \n query_inputs, \n answers, \n trigger_phrase=trigger_phrase, \n max_decode_steps=max_decode_steps)\n # record stats\n # first case is when we are generating predictions and extracting answers from them\n if answers is None and extract_answers is not None:\n batch_n_correct, correct_vec = first_appearance_fewshot_accuracy_sum(preds, labels, \n extract_answers=extract_answers, \n trigger_phrase=trigger_phrase,\n return_vec=True)\n else:\n batch_n_correct, correct_vec = fewshot_accuracy_sum(preds, labels, return_vec=True)\n n_correct += batch_n_correct\n n_str_em += em_accuracy_sum(preds, labels)\n n_datapoints += len(batch)\n all_preds.extend(list(preds))\n all_labels.extend(list(labels))\n if (print_examples>0 and batch_num == 0):\n print_idx = np.arange(min(print_examples, len(batch)))\n elif print_all_wrong:\n print_idx = np.argwhere(1-correct_vec).reshape(-1)\n else:\n print_idx = np.array([])\n if len(print_idx) > 0:\n print(f\"\\nExamples from batch {batch_num}...\")\n print(\"--------\")\n for i in print_idx:\n print(f\"Example {i}\")\n print(f\"point: \\n{batch.input.iloc[i]}\")\n print(f\"prompt: \\n{query_inputs[i]}\")\n print(\"pred: \", preds[i])\n print(\"label: \", labels.iloc[i])\n if isinstance(answers, np.ndarray):\n print(\"anwers: \", answers[i])\n print(\"exact scores: \", scores[i])\n print(\"correct: \", correct_vec[i])\n if 'completion' in batch.columns:\n print(\"gpt completion: \", batch.completion.iloc[i])\n print(\"--------\")\n print(f\"Examples acc: {correct_vec[print_idx].mean():.2f}\")\n print(\"--------\\n\")\n del batch, preds, labels, scores\n # calculate EM from Hendrycks et al paper\n group_by = task_name_to_hendrycks_em_group_by[task_name]\n hendrycks_em = get_hendrycks_em(all_preds, all_labels, answers, group_by)\n # make df with results\n results_dict = {\n 'exp_name' : experiment_name,\n 'task_name' : task_name,\n 'k' : k,\n 'n' : n,\n 'seed' : random_seed,\n 'acc' : n_correct / n_datapoints,\n 'acc_em' : n_str_em / n_datapoints,\n 'hendrycks_em': hendrycks_em,\n 'prop_invalid': compute_prop_invalid_preds(all_preds, answers)\n }\n results = pd.DataFrame.from_dict({k : [v] for k,v in results_dict.items()})\n print(\"\\nRESULTS:\")\n for k,v in results_dict.items():\n if any([x in k for x in ['acc', 'em', 'prop']]):\n v = f\"{100*v:.2f}\"\n print(f\" {k}: {str(v):10s}\")\n return results", "def create_cub200_task_distribution(path_to_pkl,\n num_training_samples_per_class=10,\n num_test_samples_per_class=15,\n num_training_classes=20,\n meta_batch_size=5):\n\n global cub200_trainX\n global cub200_trainY\n\n global cub200_valX\n global cub200_valY\n\n global cub200_testX\n global cub200_testY\n\n\n with open(path_to_pkl, 'rb') as f:\n d = pickle.load(f)\n cub200_X, cub200_Y = d['dataset']\n\n cub200_X = cub200_X.astype(np.float32) / 255.0\n cub200_X = (cub200_X - np.asarray((0.4914, 0.4822, 0.4465))) / np.asarray((0.2023, 0.1994, 0.2010))\n\n #\n # TODO\n # random horiz flip + normalize by: \n # transforms.Normalize((0.4914, 0.4822, 0.4465),\n # (0.2023, 0.1994, 0.2010)) (mean, std)\n\n\n\n #np.random.seed(0)\n # TODO: shuffle allocation of class indices to train/val/test\n num_train = 100\n num_val = 50\n num_test = 50\n\n classes = list(set(cub200_Y))\n train_classes = classes[:num_train]\n val_classes = classes[num_train:(num_train+num_val)]\n test_classes = classes[(num_train+num_val):]\n\n train_indices = []\n val_indices = []\n test_indices = []\n\n for i in range(len(cub200_Y)):\n if cub200_Y[i] in train_classes:\n train_indices.append(i)\n elif cub200_Y[i] in val_classes:\n val_indices.append(i)\n elif cub200_Y[i] in test_classes:\n test_indices.append(i)\n\n cub200_trainX = cub200_X[train_indices]\n cub200_trainY = cub200_Y[train_indices]\n\n cub200_valX = cub200_X[val_indices]\n cub200_valY = cub200_Y[val_indices]\n\n cub200_testX = cub200_X[test_indices]\n cub200_testY = cub200_Y[test_indices]\n\n\n train_tasks_list = [ClassificationTask(cub200_trainX,\n cub200_trainY,\n num_training_samples_per_class,\n num_test_samples_per_class,\n num_training_classes,\n split_train_test=0.5)]\n\n # TODO: NOTE: HACK -- validation and test tasks use a fixed number of test-set samples, instead of the supplied\n # ones. This is because in MAML/FOMAML the test set is used to compute the meta-gradient, and a small number of\n # samples is used (in the philosophy of few-shot learning, where only few samples are available).\n # However, in this case we wish to use a few more test-samples to better estimate the accuracy of the model on the validation\n # and test tasks!\n num_test_samples_per_class = 20\n validation_tasks_list = [ClassificationTask(cub200_valX,\n cub200_valY,\n num_training_samples_per_class,\n num_test_samples_per_class,\n num_training_classes,\n split_train_test=0.5)]\n\n test_tasks_list = [ClassificationTask(cub200_valX,\n cub200_valY,\n num_training_samples_per_class,\n num_test_samples_per_class,\n num_training_classes,\n split_train_test=0.5)]\n\n metatrain_task_distribution = TaskDistribution(tasks=train_tasks_list,\n task_probabilities=[1.0],\n batch_size=meta_batch_size,\n sample_with_replacement=True,\n use_classes_only_once=True)\n\n metaval_task_distribution = TaskDistribution(tasks=validation_tasks_list,\n task_probabilities=[1.0],\n batch_size=meta_batch_size,\n sample_with_replacement=True,\n use_classes_only_once=True)\n\n metatest_task_distribution = TaskDistribution(tasks=test_tasks_list,\n task_probabilities=[1.0],\n batch_size=meta_batch_size,\n sample_with_replacement=True,\n use_classes_only_once=True)\n\n return metatrain_task_distribution, metaval_task_distribution, metatest_task_distribution", "def run_offenseval_task_a(training_data, test_data):\n #grid_search_svm(training_data, test_data)\n compare_classifiers(classifiers(), training_data, test_data, dev_stage=True)\n #compare_classifiers(classifiers(), training_data, test_data, dev_stage=False)", "def train():\n args = arguments_st_train()\n\n h, w = map(int, args.input_size.split(','))\n input_size = (h, w)\n\n \n if args.use_random_seed:\n tf.set_random_seed(args.random_seed)\n \n # Create queue coordinator.\n coord = tf.train.Coordinator()\n \n # Load reader.\n with tf.name_scope(\"create_inputs\"):\n reader = ImageReader(\n args.data_dir,\n args.data_list,\n input_size=input_size,\n random_scale=args.random_scale,\n random_mirror=args.random_mirror,\n random_crop=args.random_crop,\n ignore_label=args.ignore_label,\n img_mean=IMG_MEAN,\n coord=coord,\n task=args.task)\n image_batch, label_batch = reader.dequeue(args.batch_size)\n \n # Create network.\n with slim.arg_scope(vgg_arg_scope(weight_decay=args.weight_decay, use_batch_norm=True, is_training=True)):\n if args.network == 'vgg_16_deeplab_st':\n net, end_points = vgg_16_deeplab_st(image_batch, num_classes=args.num_classes, is_training=True, dropout_keep_prob=args.keep_prob)\n elif args.network == 'vgg_16_shortcut_deeplab_st':\n net, end_points = vgg_16_shortcut_deeplab_st(image_batch, num_classes=args.num_classes, is_training=True, dropout_keep_prob=args.keep_prob)\n else:\n raise Exception('network name is not recognized!')\n \n \n # Predictions.\n raw_output = end_points['vgg_16/fc8_voc12']\n\n # gt labels\n raw_gt = prepare_label(label_batch, tf.stack(raw_output.get_shape()[1:3]), num_classes=args.num_classes,\n one_hot=False, task=args.task) # [batch_size, h, w]\n\n # losses\n if args.task == 'normal':\n loss = get_normal_loss(raw_output, raw_gt, args.num_classes, args.ignore_label) * args.loss_scale\n elif args.task == 'seg':\n loss = get_seg_loss(raw_output, raw_gt, args.num_classes, args.ignore_label) * args.loss_scale\n\n l2_losses = [args.weight_decay * tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'weights' in v.name]\n reduced_loss = tf.reduce_mean(loss) + tf.add_n(l2_losses)\n \n # Image summary for visualisation.\n raw_output_up = tf.image.resize_bilinear(raw_output, tf.shape(image_batch)[1:3,])\n raw_output_up = tf.argmax(raw_output_up, axis=3)\n pred = tf.expand_dims(raw_output_up, dim=3)\n \n images_summary = tf.py_func(inv_preprocess, [image_batch, args.save_num_images, IMG_MEAN], tf.uint8)\n labels_summary = tf.py_func(decode_labels, [label_batch, args.save_num_images, args.num_classes, args.task], tf.uint8)\n preds_summary = tf.py_func(decode_labels, [pred, args.save_num_images, args.num_classes, args.task], tf.uint8)\n \n total_summary = tf.summary.image('images', \n tf.concat(axis=2, values=[images_summary, labels_summary, preds_summary]), \n max_outputs=args.save_num_images) # Concatenate row-wise.\n summary_writer = tf.summary.FileWriter(args.snapshot_dir,\n graph=tf.get_default_graph())\n \n # Define loss and optimisation parameters.\n train_op, step_ph = create_train_ops_st(reduced_loss, args)\n \n # Set up tf session and initialize variables. \n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n init = tf.global_variables_initializer()\n \n sess.run(init)\n\n # Load variables if the checkpoint is provided.\n if args.restore_from is not None:\n load_st(sess, args)\n \n # Saver for storing checkpoints of the model.\n save_op = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=args.max_to_keep)\n \n # Start queue threads.\n threads = tf.train.start_queue_runners(coord=coord, sess=sess)\n\n # Iterate over training steps.\n for step in range(args.num_steps):\n start_time = time.time()\n feed_dict = { step_ph : step }\n \n if step % args.save_pred_every == 0:\n loss_value, images, labels, preds, summary, _ = sess.run([reduced_loss, image_batch, label_batch, pred, total_summary, train_op], feed_dict=feed_dict)\n summary_writer.add_summary(summary, step)\n save(save_op, sess, args.snapshot_dir, step)\n else:\n loss_value, _ = sess.run([reduced_loss, train_op], feed_dict=feed_dict)\n duration = time.time() - start_time\n print('step {:d} \\t loss = {:.3f}, ({:.3f} sec/step)'.format(step, loss_value, duration))\n coord.request_stop()\n coord.join(threads)", "def test_run():\n # Only few steps for test\n timesteps = 128\n\n # Compute all sub testing conf\n envs = ['CartPole-v0']\n ml_platforms = ['torch', 'tf']\n agents = ['dqn', 'a2c']\n\n test_combinations = list(it.product(\n envs,\n ml_platforms,\n agents\n )\n )\n\n # Finally test them all\n for conf in test_combinations:\n env_str, ml_platform_str, agent_str = conf\n run(\n agent_str,\n ml_platform_str,\n env_str,\n 'dense',\n timesteps,\n './target/')", "def execute(gpu, exp_batch, exp_alias, suppress_output=True, number_of_workers=12):\n try:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = ','.join(gpu)\n g_conf.VARIABLE_WEIGHT = {}\n\n # At this point the log file with the correct naming is created.\n # You merge the yaml file with the global configuration structure.\n merge_with_yaml(os.path.join('configs', exp_batch, exp_alias + '.yaml'))\n set_type_of_process('train')\n\n # Set the process into loading status.\n coil_logger.add_message('Loading', {'GPU': gpu})\n\n # Put the output to a separate file if it is the case\n if suppress_output:\n if not os.path.exists('_output_logs'):\n os.mkdir('_output_logs')\n sys.stdout = open(os.path.join('_output_logs', exp_alias + '_' +\n g_conf.PROCESS_NAME + '_' + str(os.getpid()) + \".out\"), \"a\",\n buffering=1)\n sys.stderr = open(os.path.join('_output_logs',\n exp_alias + '_err_'+g_conf.PROCESS_NAME + '_'\n + str(os.getpid()) + \".out\"),\n \"a\", buffering=1)\n\n if coil_logger.check_finish('train'):\n coil_logger.add_message('Finished', {})\n return\n\n # Preload option\n if g_conf.PRELOAD_MODEL_ALIAS is not None:\n checkpoint = torch.load(os.path.join('_logs', g_conf.PRELOAD_MODEL_BATCH,\n g_conf.PRELOAD_MODEL_ALIAS,\n 'checkpoints',\n str(g_conf.PRELOAD_MODEL_CHECKPOINT)+'.pth'))\n\n\n # Get the latest checkpoint to be loaded\n # returns none if there are no checkpoints saved for this model\n checkpoint_file = get_latest_saved_checkpoint()\n if checkpoint_file is not None:\n checkpoint = torch.load(os.path.join('_logs', exp_batch, exp_alias,\n 'checkpoints', str(get_latest_saved_checkpoint())))\n iteration = checkpoint['iteration']\n best_loss = checkpoint['best_loss']\n best_loss_iter = checkpoint['best_loss_iter']\n print ('iteration: ', iteration, 'best_loss: ', best_loss)\n else:\n iteration = 0\n best_loss = 10000.0\n best_loss_iter = 0\n\n\n # Define the dataset. This structure is has the __get_item__ redefined in a way\n # that you can access the positions from the root directory as a in a vector.\n full_dataset = os.path.join(os.environ[\"COIL_DATASET_PATH\"], g_conf.TRAIN_DATASET_NAME)\n\n # By instantiating the augmenter we get a callable that augment images and transform them into tensors.\n augmenter = Augmenter(g_conf.AUGMENTATION)\n\n # Instantiate the class used to read the dataset\n dataset = CoILDataset(full_dataset, transform=augmenter, preload_name=str(g_conf.NUMBER_OF_HOURS)+'hours_'+g_conf.TRAIN_DATASET_NAME)\n print (\"Loaded dataset\")\n \n # Creates the sampler, this part is responsible for managing the keys. It divides\n # all keys depending on the measurements and produces a set of keys for each bach.\n # define the sampling strategy for mini-batch, different samplers can be found in 'splitter.py'\n data_loader = select_balancing_strategy(dataset, iteration, number_of_workers)\n\n # Instatiate the network architecture\n model = CoILModel(g_conf.MODEL_TYPE, g_conf.MODEL_CONFIGURATION)\n model.cuda()\n\n optimizer = optim.Adam(model.parameters(), lr=g_conf.LEARNING_RATE) # adabound and adamio can also be used here\n\n if checkpoint_file is not None or g_conf.PRELOAD_MODEL_ALIAS is not None:\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n accumulated_time = checkpoint['total_time']\n loss_window = coil_logger.recover_loss_window('train', iteration)\n else: \n # We accumulate iteration time and keep the average speed\n accumulated_time = 0\n loss_window = []\n\n # freeze the perception module weights if required\n # for m in model.perception.parameters():\n # m.requires_grad = False\n \n # total trainable parameters\n model_parameters = filter(lambda p: p.requires_grad, model.parameters())\n total_params = sum([np.prod(p.size()) for p in model_parameters])\n print ('trainable parameters: ', total_params)\n\n # multi-gpu\n print ('number of gpus: ', torch.cuda.device_count())\n if torch.cuda.device_count() > 1:\n model = nn.DataParallel(model)\n\n criterion = Loss(g_conf.LOSS_FUNCTION)\n\n print ('Start Training')\n\n st = time.time()\n for data in data_loader:\n\n # use this for early stopping if the validation loss is not coming down\n if g_conf.FINISH_ON_VALIDATION_STALE is not None and \\\n check_loss_validation_stopped(iteration, g_conf.FINISH_ON_VALIDATION_STALE):\n break\n\n \"\"\"\n ####################################\n Main optimization loop\n ####################################\n \"\"\"\n\n iteration += 1\n\n if iteration % 1000 == 0:\n adjust_learning_rate_auto(optimizer, loss_window)\n \n # additional learning rate scheduler - cyclic cosine annealing (https://arxiv.org/pdf/1704.00109.pdf)\n # adjust_learning_rate_cosine_annealing(optimizer, loss_window, iteration)\n\n capture_time = time.time()\n controls = data['directions']\n model.zero_grad()\n branches = model(torch.squeeze(data['rgb'].cuda()),\n dataset.extract_inputs(data).cuda())\n loss_function_params = {\n 'branches': branches,\n 'targets': dataset.extract_targets(data).cuda(),\n 'controls': controls.cuda(),\n 'inputs': dataset.extract_inputs(data).cuda(),\n 'branch_weights': g_conf.BRANCH_LOSS_WEIGHT,\n 'variable_weights': g_conf.VARIABLE_WEIGHT\n }\n loss, _ = criterion(loss_function_params)\n loss.backward()\n optimizer.step()\n \"\"\"\n ####################################\n Saving the model if necessary\n ####################################\n \"\"\"\n\n if is_ready_to_save(iteration):\n if torch.cuda.device_count() > 1:\n state_dict_save = model.module.state_dict()\n else:\n state_dict_save = model.state_dict()\n\n state = {\n 'iteration': iteration,\n 'state_dict': state_dict_save,\n 'best_loss': best_loss,\n 'total_time': accumulated_time,\n 'optimizer': optimizer.state_dict(),\n 'best_loss_iter': best_loss_iter\n }\n torch.save(state, os.path.join('_logs', exp_batch, exp_alias\n , 'checkpoints', str(iteration) + '.pth'))\n\n \"\"\"\n ################################################\n Adding tensorboard logs.\n Making calculations for logging purposes.\n These logs are monitored by the printer module.\n #################################################\n \"\"\"\n coil_logger.add_scalar('Loss', loss.data, iteration)\n coil_logger.add_image('Image', torch.squeeze(data['rgb']), iteration)\n if loss.data < best_loss:\n best_loss = loss.data.tolist()\n best_loss_iter = iteration\n\n # Log a random position\n position = random.randint(0, len(data) - 1)\n\n if torch.cuda.device_count() > 1:\n output = model.module.extract_branch(torch.stack(branches[0:4]), controls)\n else:\n output = model.extract_branch(torch.stack(branches[0:4]), controls)\n error = torch.abs(output - dataset.extract_targets(data).cuda())\n\n accumulated_time += time.time() - capture_time\n\n coil_logger.add_message('Iterating',\n {'Iteration': iteration,\n 'Loss': loss.data.tolist(),\n 'Images/s': (iteration * g_conf.BATCH_SIZE) / accumulated_time,\n 'BestLoss': best_loss, 'BestLossIteration': best_loss_iter,\n 'Output': output[position].data.tolist(),\n 'GroundTruth': dataset.extract_targets(data)[\n position].data.tolist(),\n 'Error': error[position].data.tolist(),\n 'Inputs': dataset.extract_inputs(data)[\n position].data.tolist()},\n iteration)\n loss_window.append(loss.data.tolist())\n coil_logger.write_on_error_csv('train', loss.data)\n print(\"Iteration: %d Loss: %f\" % (iteration, loss.data))\n st = time.time()\n\n coil_logger.add_message('Finished', {})\n \n except KeyboardInterrupt:\n coil_logger.add_message('Error', {'Message': 'Killed By User'})\n\n except RuntimeError as e:\n\n coil_logger.add_message('Error', {'Message': str(e)})\n\n except:\n traceback.print_exc()\n coil_logger.add_message('Error', {'Message': 'Something Happened'})", "def run(self, host=None):\n host = self.getFogHost(host)\n num = str(self.getHostNumber(host))\n url = self.baseURL+'host/'+num+'/task'\n try:\n req = requests.post(\n url,\n headers=self.header,\n json={\"taskTypeID\": 1}\n )\n if req.status_code == 200:\n # self.logger.info(\"%s\", \"Scheduled image task for host\")\n pass\n except Exception:\n # self.logger.warning(\"%s\", \"Failed to schedule host imaging\")\n # self.logger.warning(\"%s\", \"Trying to delete existing image task\")\n self.delTask(num)\n req = requests.post(\n url,\n headers=self.header,\n json={\"taskTypeID\": 1}\n )\n if req.status_code == 200:\n # self.logger.info(\"%s\", \"Scheduled image task for host\")\n pass\n sys.exit(0)", "def main():\n\n args = define_and_process_args()\n print('\\n', 'ARGUMENTS', '\\n\\n', args, '\\n')\n\n log_dir = get_log_dir(args)\n print('\\n', 'LOG DIRECTORY', '\\n\\n', log_dir, '\\n')\n\n standardized_data_path = os.path.join(args.data_dir, args.data_filename)\n if not os.path.exists(standardized_data_path):\n message = '%s does not exist.' % standardized_data_path\n raise ValueError(message)\n\n dataset = data.Dataset(standardized_data_path)\n train_raw_seqs, test_raw_seqs = dataset.get_splits(args.test_users)\n train_triplets = [data.prepare_raw_seq(seq) for seq in train_raw_seqs]\n test_triplets = [data.prepare_raw_seq(seq) for seq in test_raw_seqs]\n\n train_input_seqs, train_reset_seqs, train_label_seqs = zip(*train_triplets)\n test_input_seqs, test_reset_seqs, test_label_seqs = zip(*test_triplets)\n\n Model = eval('models.' + args.model_type + 'Model')\n input_size = dataset.input_size\n target_size = dataset.num_classes\n\n # This is just to satisfy a low-CPU requirement on our cluster\n # when using GPUs.\n if 'CUDA_VISIBLE_DEVICES' in os.environ:\n config = tf.ConfigProto(intra_op_parallelism_threads=2,\n inter_op_parallelism_threads=2)\n else:\n config = None\n\n with tf.Session(config=config) as sess:\n model = Model(input_size, target_size, args.num_layers,\n args.hidden_layer_size, args.init_scale,\n args.dropout_keep_prob)\n optimizer = optimizers.Optimizer(\n model.loss, args.num_train_sweeps, args.initial_learning_rate,\n args.num_initial_sweeps, args.num_sweeps_per_decay,\n args.decay_factor, args.max_global_grad_norm)\n train(sess, model, optimizer, log_dir, args.batch_size,\n args.num_sweeps_per_summary, args.num_sweeps_per_save,\n train_input_seqs, train_reset_seqs, train_label_seqs,\n test_input_seqs, test_reset_seqs, test_label_seqs)", "def test_all_tf_execution_regimes(test_case):\n if BACKEND == 'backend_tensorflow':\n return test_util.test_all_tf_execution_regimes(test_case)\n else:\n return test_case", "def run(args):\n # CONFIG\n run_name = get_run_name(args)\n logger.info(f'*** Starting run {run_name} ***')\n data_dir = f'gs://{args.bucket_name}/{args.project_name}/finetune/finetune_data/{args.finetune_data}'\n output_dir = f'gs://{args.bucket_name}/{args.project_name}/finetune/runs/{run_name}'\n\n # Get configs\n pretrained_model_config_path = get_model_config_path(args)\n model_config = get_model_config(pretrained_model_config_path)\n\n # Meta data/label mapping\n input_meta_data = get_input_meta_data(data_dir)\n label_mapping = get_label_mapping(data_dir)\n logger.info(f'Loaded training data meta.json file: {input_meta_data}')\n\n # Calculate steps, warmup steps and eval steps\n train_data_size = input_meta_data['train_data_size']\n num_labels = input_meta_data['num_labels']\n max_seq_length = input_meta_data['max_seq_length']\n if args.limit_train_steps is None:\n steps_per_epoch = int(train_data_size / args.train_batch_size)\n else:\n steps_per_epoch = args.limit_train_steps\n warmup_steps = int(args.num_epochs * train_data_size * args.warmup_proportion/ args.train_batch_size)\n if args.limit_eval_steps is None:\n eval_steps = int(math.ceil(input_meta_data['eval_data_size'] / args.eval_batch_size))\n else:\n eval_steps = args.limit_eval_steps\n\n # some logging\n if args.init_checkpoint is None:\n logger.info(f'Finetuning on datset {args.finetune_data} using default pretrained model {args.model_class}')\n else:\n logger.info(f'Finetuning on datset {args.finetune_data} using pretrained model in {args.init_checkpoint} of type {args.model_class}')\n logger.info(f'Running {args.num_epochs} epochs with {steps_per_epoch:,} steps per epoch')\n logger.info(f'Using warmup proportion of {args.warmup_proportion}, resulting in {warmup_steps:,} warmup steps')\n logger.info(f'Using learning rate: {args.learning_rate}, training batch size: {args.train_batch_size}, num_epochs: {args.num_epochs}')\n\n # Get model\n classifier_model, core_model = get_model(args, model_config, steps_per_epoch, warmup_steps, num_labels, max_seq_length)\n optimizer = classifier_model.optimizer\n loss_fn = get_loss_fn(num_labels)\n try:\n if ',' in args.validation_freq:\n validation_freq = args.validation_freq.split(',')\n validation_freq = [int(v) for v in validation_freq]\n else:\n validation_freq = int(args.validation_freq)\n except:\n raise ValueError(f'Invalid argument for validation_freq!')\n logger.info(f'Using a validation frequency of {validation_freq}')\n\n # Restore checkpoint\n if args.init_checkpoint:\n checkpoint_path = f'gs://{args.bucket_name}/{args.project_name}/pretrain/runs/{args.init_checkpoint}'\n checkpoint = tf.train.Checkpoint(model=core_model)\n checkpoint.restore(checkpoint_path).assert_existing_objects_matched()\n logger.info(f'Successfully restored checkpoint from {checkpoint_path}')\n\n # Run keras compile\n logger.info(f'Compiling keras model...')\n classifier_model.compile(\n optimizer=optimizer,\n loss=loss_fn,\n metrics=get_metrics())\n logger.info(f'... done')\n\n # Create all custom callbacks\n summary_dir = os.path.join(output_dir, 'summaries')\n summary_callback = tf.keras.callbacks.TensorBoard(summary_dir, profile_batch=0)\n time_history_callback = keras_utils.TimeHistory(\n batch_size=args.train_batch_size,\n log_steps=args.time_history_log_steps,\n logdir=summary_dir)\n custom_callbacks = [summary_callback, time_history_callback]\n if args.save_model:\n logger.info('Using save_model option...')\n checkpoint_path = os.path.join(output_dir, 'checkpoint')\n checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, verbose=1)\n custom_callbacks.append(checkpoint_callback)\n if args.early_stopping_epochs > 0:\n logger.info(f'Using early stopping of after {args.early_stopping_epochs} epochs of val_loss not decreasing')\n early_stopping_callback = tf.keras.callbacks.EarlyStopping(patience=args.early_stopping_epochs, monitor='val_loss')\n custom_callbacks.append(early_stopping_callback)\n\n # Generate dataset_fn\n train_input_fn = get_dataset_fn(\n os.path.join(data_dir, 'tfrecords', 'train.tfrecords'),\n max_seq_length,\n args.train_batch_size,\n is_training=True)\n eval_input_fn = get_dataset_fn(\n os.path.join(data_dir, 'tfrecords', 'dev.tfrecords'),\n max_seq_length,\n args.eval_batch_size,\n is_training=False)\n\n # Add mertrics callback to calculate performance metrics at the end of epoch\n performance_metrics_callback = Metrics(\n eval_input_fn,\n label_mapping,\n os.path.join(summary_dir, 'metrics'),\n eval_steps,\n args.eval_batch_size,\n validation_freq)\n custom_callbacks.append(performance_metrics_callback)\n\n # Run keras fit\n time_start = time.time()\n logger.info('Run training...')\n history = classifier_model.fit(\n x=train_input_fn(),\n validation_data=eval_input_fn(),\n steps_per_epoch=steps_per_epoch,\n epochs=args.num_epochs,\n validation_steps=eval_steps,\n validation_freq=validation_freq,\n callbacks=custom_callbacks,\n verbose=1)\n time_end = time.time()\n training_time_min = (time_end-time_start)/60\n logger.info(f'Finished training after {training_time_min:.1f} min')\n\n # Write training log\n all_scores = performance_metrics_callback.scores\n all_predictions = performance_metrics_callback.predictions\n if len(all_scores) > 0:\n final_scores = all_scores[-1]\n logger.info(f'Final eval scores: {final_scores}')\n else:\n final_scores = {}\n full_history = history.history\n if len(full_history) > 0:\n final_val_loss = full_history['val_loss'][-1]\n final_loss = full_history['loss'][-1]\n logger.info(f'Final training loss: {final_loss:.2f}, Final validation loss: {final_val_loss:.2f}')\n else:\n final_val_loss = None\n final_loss = None\n data = {\n 'created_at': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'run_name': run_name,\n 'final_loss': final_loss,\n 'final_val_loss': final_val_loss,\n 'max_seq_length': max_seq_length,\n 'num_train_steps': steps_per_epoch * args.num_epochs,\n 'eval_steps': eval_steps,\n 'steps_per_epoch': steps_per_epoch,\n 'training_time_min': training_time_min,\n 'data_dir': data_dir,\n 'output_dir': output_dir,\n 'all_scores': all_scores,\n 'all_predictions': all_predictions,\n 'num_labels': num_labels,\n 'label_mapping': label_mapping,\n **full_history,\n **final_scores,\n **vars(args),\n }\n # Write run_log\n f_path_training_log = os.path.join(output_dir, 'run_logs.json')\n logger.info(f'Writing training log to {f_path_training_log}...')\n save_to_json(data, f_path_training_log)\n # Write bert config\n model_config.id2label = label_mapping\n model_config.label2id = {v:k for k, v in label_mapping.items()}\n model_config.max_seq_length = max_seq_length\n model_config.num_labels = num_labels\n f_path_bert_config = os.path.join(output_dir, 'bert_config.json')\n logger.info(f'Writing BERT config to {f_path_bert_config}...')\n save_to_json(model_config.to_dict(), f_path_bert_config)", "def MNIST_experiment():\n tsetlin_machine = TsetlinMachine(number_clauses=1000,\n number_action_states=1000,\n precision=3.0,\n threshold=10)\n\n X, y, val_X, val_y = MNIST()\n\n tsetlin_machine.fit(X, y, val_X, val_y, 300)\n print('Final training accuracy:', tsetlin_machine.accuracy(X, y))\n print('Final validation accuracy:', tsetlin_machine.accuracy(val_X, val_y))", "def run_experiment(param_grid, n_processes=-1):\n if type(param_grid) is not list:\n param_grid = [param_grid]\n\n for params in param_grid:\n if 'task' not in params:\n raise Exception('Please define a task function in the param_grid '\n 'to execute.')\n\n if 'num_exp' not in params:\n params['run'] = [0]\n else:\n params['run'] = range(params['num_exp'])\n del params['num_exp']\n\n if type(params['task']) is not list:\n params['task'] = [params['task']]\n\n # Convert parameter grid to iterable list\n params = list(ParameterGrid(param_grid))\n for i in range(len(params)):\n params[i]['id'] = i\n\n print(\"\\033[1mNumber of processes: %d\\033[0m\" % len(params))\n\n if n_processes == -1:\n n_processes = cpu_count()\n if n_processes > 1:\n with Pool(n_processes) as p:\n r = list(tqdm(p.imap_unordered(process_task, params), total=len(params)))\n else:\n for single_param in params:\n process_task(single_param)", "def train_test_model_batch():\n train=learning.Train_kmer_clf()\n train.run()", "def test_distributed(self):\n self.model.eval()\n test_loss, test_correct_preds = 0, defaultdict(int)\n if self.test_loader is None: # running G2E\n self.test_loader, self.test_size, self.test_sampler = self._get_smi_dl(phase=\"test\", shuffle=False)\n self.test_sampler.set_epoch(0)\n if self.rank == 0:\n test_loader = tqdm(self.test_loader, desc='testing...')\n else:\n test_loader = self.test_loader\n \n running_topk_accs = defaultdict(lambda: np.nan)\n with torch.no_grad():\n epoch_test_size = 0\n for i, batch in enumerate(test_loader):\n batch_data = batch[0]\n if not isinstance(batch_data, tuple):\n batch_data = batch_data.cuda(non_blocking=True)\n if self.model_name == 'TransformerEBM':\n batch_data = (batch_data, 'test')\n batch_mask = batch[1].cuda(non_blocking=True)\n batch_energies = self._one_batch(\n batch_data, batch_mask, backprop=False,\n )\n test_batch_size = batch_energies.shape[0]\n test_batch_size = torch.tensor([test_batch_size]).cuda(self.gpu, non_blocking=True)\n dist.all_reduce(test_batch_size, dist.ReduceOp.SUM)\n test_batch_size = test_batch_size.item()\n epoch_test_size += test_batch_size\n\n # for validation/test data, true rxn may not be present!\n batch_idx = batch[2]\n batch_true_ranks_array = self.proposals_data['test'][batch_idx, 2].astype('int')\n batch_true_ranks_valid = batch_true_ranks_array[batch_true_ranks_array < self.args.minibatch_eval]\n batch_true_ranks = torch.as_tensor(batch_true_ranks_array).unsqueeze(dim=-1)\n # slightly tricky as we have to ignore rxns with no 'positive' rxn for loss calculation\n # (bcos nothing in the numerator, loss is undefined)\n loss_numerator = batch_energies[\n np.arange(batch_energies.shape[0])[batch_true_ranks_array < self.args.minibatch_eval],\n batch_true_ranks_valid\n ]\n loss_denominator = batch_energies[\n np.arange(batch_energies.shape[0])[batch_true_ranks_array < self.args.minibatch_eval],\n :\n ]\n batch_loss = (loss_numerator + torch.logsumexp(-loss_denominator, dim=1)).sum().item()\n\n for k in self.k_to_test:\n # index with lowest energy is what the model deems to be the most feasible rxn\n batch_preds = torch.topk(batch_energies, k=k, dim=1, largest=False)[1] \n batch_correct_preds = torch.where(batch_preds == batch_true_ranks)[0].shape[0]\n batch_correct_preds = torch.tensor([batch_correct_preds]).cuda(self.gpu, non_blocking=True)\n dist.all_reduce(batch_correct_preds, dist.ReduceOp.SUM)\n batch_correct_preds = batch_correct_preds.item()\n test_correct_preds[k] += batch_correct_preds\n running_topk_accs[k] = test_correct_preds[k] / epoch_test_size\n\n if k == 1 and self.rank == 0: # overhead is only 5 ms, will check ~5 times each epoch (regardless of batch_size)\n try:\n for j in range(i * self.args.batch_size_eval, (i+1) * self.args.batch_size_eval):\n if j % (self.test_size // 5) == random.randint(0, 3) or j % (self.test_size // 8) == random.randint(0, 5): # peek at a random sample of current batch to monitor training progress\n rxn_idx = random.sample(list(range(self.args.batch_size_eval)), k=1)[0]\n rxn_true_rank = batch_true_ranks_array[rxn_idx]\n rxn_pred_rank = batch_preds[rxn_idx, 0].item()\n rxn_pred_energy = batch_energies[rxn_idx, rxn_pred_rank].item()\n rxn_true_energy = batch_energies[rxn_idx, rxn_true_rank].item() if rxn_true_rank != 9999 else 'NaN'\n rxn_orig_energy = batch_energies[rxn_idx, 0].item()\n rxn_orig_energy2 = batch_energies[rxn_idx, 1].item()\n rxn_orig_energy3 = batch_energies[rxn_idx, 2].item()\n\n rxn_true_prod = self.proposals_data['test'][batch_idx[rxn_idx], 0]\n rxn_true_prec = self.proposals_data['test'][batch_idx[rxn_idx], 1]\n rxn_cand_precs = self.proposals_data['test'][batch_idx[rxn_idx], 3:]\n rxn_pred_prec = rxn_cand_precs[batch_preds[rxn_idx]]\n rxn_orig_prec = rxn_cand_precs[0]\n rxn_orig_prec2 = rxn_cand_precs[1]\n rxn_orig_prec3 = rxn_cand_precs[2]\n logging.info(f'\\ntrue product: \\t\\t\\t\\t{rxn_true_prod}')\n logging.info(f'pred precursor (rank {rxn_pred_rank}, energy = {rxn_pred_energy:+.4f}):\\t\\t\\t{rxn_pred_prec}')\n if rxn_true_energy == 'NaN':\n logging.info(f'true precursor (rank {rxn_true_rank}, energy = {rxn_true_energy}):\\t\\t\\t\\t{rxn_true_prec}')\n else:\n logging.info(f'true precursor (rank {rxn_true_rank}, energy = {rxn_true_energy:+.4f}):\\t\\t\\t{rxn_true_prec}')\n logging.info(f'orig precursor (rank 0, energy = {rxn_orig_energy:+.4f}):\\t\\t\\t{rxn_orig_prec}')\n logging.info(f'orig precursor (rank 1, energy = {rxn_orig_energy2:+.4f}):\\t\\t\\t{rxn_orig_prec2}')\n logging.info(f'orig precursor (rank 2, energy = {rxn_orig_energy3:+.4f}):\\t\\t\\t{rxn_orig_prec3}\\n')\n break\n except Exception as e:\n tb_str = traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)\n logging.info(\"\".join(tb_str))\n logging.info('\\nIndex out of range (last minibatch)')\n \n batch_loss = torch.tensor([batch_loss]).cuda(self.gpu, non_blocking=True)\n dist.all_reduce(batch_loss, dist.ReduceOp.SUM)\n batch_loss = batch_loss.item()\n test_loss += batch_loss\n if self.rank == 0:\n test_loader.set_description(f\"testing...loss={test_loss / test_batch_size:.4f}, top-1 acc={running_topk_accs[1]:.4f}, top-5 acc={running_topk_accs[5]:.4f}, top-10 acc={running_topk_accs[10]:.4f}\")\n test_loader.refresh()\n \n for k in self.k_to_test:\n self.test_topk_accs[k] = test_correct_preds[k] / epoch_test_size\n \n dist.barrier()\n message = f\"{self.args.expt_name}\\n\"\n if self.rank == 0:\n logging.info(f'\\nTest loss: {test_loss / epoch_test_size:.4f}')\n for k in self.k_to_test:\n this_topk_message = f'Test top-{k} accuracy: {100 * self.test_topk_accs[k]:.3f}%'\n logging.info(this_topk_message)\n message += this_topk_message + '\\n'\n try:\n send_message(message)\n except Exception as e:\n pass" ]
[ "0.67484915", "0.63618076", "0.60620654", "0.5980725", "0.5914939", "0.59113973", "0.59051156", "0.58966136", "0.5866718", "0.58654135", "0.5864061", "0.58606243", "0.5860597", "0.58368456", "0.58136004", "0.5806316", "0.57960474", "0.5792729", "0.57621115", "0.5742489", "0.57239044", "0.569961", "0.5672451", "0.5668706", "0.5661857", "0.56564087", "0.5644287", "0.5631706", "0.5630299", "0.5612329", "0.5590765", "0.55883557", "0.5584761", "0.5582133", "0.55783594", "0.5577101", "0.55678415", "0.5559276", "0.5547396", "0.55381685", "0.55189574", "0.551645", "0.55042267", "0.55020875", "0.54991025", "0.5482772", "0.5479434", "0.54767406", "0.54737425", "0.54731154", "0.54608405", "0.54594034", "0.54511577", "0.5441021", "0.5440325", "0.543949", "0.5423866", "0.5418721", "0.5407948", "0.5405914", "0.539233", "0.53855824", "0.5379487", "0.53717524", "0.53656906", "0.53609717", "0.53521115", "0.53507817", "0.5338037", "0.5336815", "0.5334591", "0.5333839", "0.53235865", "0.53233385", "0.53211135", "0.5320059", "0.53178686", "0.5313806", "0.5311581", "0.5310629", "0.52929944", "0.52892596", "0.528833", "0.52874553", "0.5284524", "0.5281343", "0.5273354", "0.5271715", "0.5269219", "0.52614546", "0.5257887", "0.52559125", "0.525254", "0.5249103", "0.5246124", "0.5244158", "0.5243734", "0.5242785", "0.52406144", "0.52377355" ]
0.6672013
1
GPU mnist test for TF Training Instance Type p3.2xlarge Given above parameters, registers a task with family named after this test, runs the task, and waits for the task to be stopped before doing teardown operations of instance and cluster.
def test_ecs_tensorflow_training_mnist_gpu(gpu_only, ecs_container_instance, tensorflow_training, training_cmd, ecs_cluster_name): instance_id, cluster_arn = ecs_container_instance num_gpus = ec2_utils.get_instance_num_gpus(instance_id) ecs_utils.ecs_training_test_executor(ecs_cluster_name, cluster_arn, training_cmd, tensorflow_training, instance_id, num_gpus=num_gpus)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_mnist():\n env = os.environ.copy()\n if not \"CUDA_VISIBLE_DEVICES\" in env:\n env[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n subprocess.run(\n \"edflow -b template_tfe/config.yaml -t --max_batcher_per_epoch --num_epochs 1\",\n shell=True,\n check=True,\n env=env,\n )", "def test_ecs_tensorflow_training_mnist_cpu(cpu_only, ecs_container_instance, tensorflow_training, training_cmd,\n ecs_cluster_name):\n instance_id, cluster_arn = ecs_container_instance\n\n ecs_utils.ecs_training_test_executor(ecs_cluster_name, cluster_arn, training_cmd, tensorflow_training, instance_id)", "def test_ecs_tensorflow_training_fasterrcnn_gpu(gpu_only, ecs_container_instance, tensorflow_training, training_cmd,\n ecs_cluster_name):\n instance_id, cluster_arn = ecs_container_instance\n\n num_gpus = ec2_utils.get_instance_num_gpus(instance_id)\n\n ecs_utils.ecs_training_test_executor(ecs_cluster_name, cluster_arn, training_cmd, tensorflow_training, instance_id,\n num_gpus=num_gpus)", "def run_sm_perf_test(image_uri, num_nodes, region):\n _, framework_version = get_framework_and_version_from_tag(image_uri)\n if framework_version.startswith(\"1.\"):\n pytest.skip(\"Skipping benchmark test on TF 1.x images.\")\n\n processor = \"gpu\" if \"gpu\" in image_uri else \"cpu\"\n device_cuda_str = (\n f\"{processor}-{get_cuda_version_from_tag(image_uri)}\" if processor == \"gpu\" else processor\n )\n\n ec2_instance_type = \"p3.16xlarge\" if processor == \"gpu\" else \"c5.18xlarge\"\n\n py_version = \"py2\" if \"py2\" in image_uri else \"py37\" if \"py37\" in image_uri else \"py3\"\n\n time_str = time.strftime(\"%Y-%m-%d-%H-%M-%S\")\n commit_info = os.getenv(\"CODEBUILD_RESOLVED_SOURCE_VERSION\")\n target_upload_location = os.path.join(\n BENCHMARK_RESULTS_S3_BUCKET,\n \"tensorflow\",\n framework_version,\n \"sagemaker\",\n \"training\",\n device_cuda_str,\n py_version,\n )\n training_job_name = f\"tf{framework_version[0]}-tr-bench-{device_cuda_str}-{num_nodes}-node-{py_version}-{commit_info[:7]}-{time_str}\"\n\n # Inserting random sleep because this test starts multiple training jobs around the same time, resulting in\n # a throttling error for SageMaker APIs.\n time.sleep(Random(x=training_job_name).random() * 60)\n\n test_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"resources\")\n venv_dir = os.path.join(test_dir, \"sm_benchmark_venv\")\n\n ctx = Context()\n\n with ctx.cd(test_dir), ctx.prefix(f\"source {venv_dir}/bin/activate\"):\n log_file = f\"results-{commit_info}-{time_str}-{framework_version}-{device_cuda_str}-{py_version}-{num_nodes}-node.txt\"\n run_out = ctx.run(\n f\"timeout 45m python tf_sm_benchmark.py \"\n f\"--framework-version {framework_version} \"\n f\"--image-uri {image_uri} \"\n f\"--instance-type ml.{ec2_instance_type} \"\n f\"--node-count {num_nodes} \"\n f\"--python {py_version} \"\n f\"--region {region} \"\n f\"--job-name {training_job_name}\"\n f\"2>&1 | tee {log_file}\",\n warn=True,\n echo=True,\n )\n\n if not (run_out.ok or run_out.return_code == 124):\n target_upload_location = os.path.join(target_upload_location, \"failure_log\")\n\n ctx.run(\n f\"aws s3 cp {os.path.join(test_dir, log_file)} {os.path.join(target_upload_location, log_file)}\"\n )\n\n LOGGER.info(f\"Test results can be found at {os.path.join(target_upload_location, log_file)}\")\n\n result_statement, throughput = _print_results_of_test(\n os.path.join(test_dir, log_file), processor\n )\n throughput /= num_nodes\n\n assert run_out.ok, (\n f\"Benchmark Test failed with return code {run_out.return_code}. \"\n f\"Test results can be found at {os.path.join(target_upload_location, log_file)}\"\n )\n\n threshold_table = (\n (\n TENSORFLOW_SM_TRAINING_CPU_1NODE_THRESHOLD\n if num_nodes == 1\n else TENSORFLOW_SM_TRAINING_CPU_4NODE_THRESHOLD\n )\n if processor == \"cpu\"\n else TENSORFLOW_SM_TRAINING_GPU_1NODE_THRESHOLD\n if num_nodes == 1\n else TENSORFLOW_SM_TRAINING_GPU_4NODE_THRESHOLD\n )\n threshold = get_threshold_for_image(framework_version, threshold_table)\n LOGGER.info(\n f\"tensorflow {framework_version} sagemaker training {device_cuda_str} {py_version} \"\n f\"imagenet {num_nodes} nodes Throughput: {throughput} images/sec, threshold: {threshold} images/sec\"\n )\n assert throughput > threshold, (\n f\"tensorflow {framework_version} sagemaker training {processor} {py_version} imagenet {num_nodes} nodes \"\n f\"Benchmark Result {throughput} does not reach the threshold {threshold}\"\n )", "def do_testing(gpu=0):\n # expected environment variables\n os.environ[\"BERT_BASE_DIR\"] = \"pretrained/cased_L-12_H-768_A-12\"\n os.environ[\"DATA_DIR\"] = \"dataset\"\n os.environ[\"OUTPUT_DIR\"] = \"output\"\n assert os.environ.get(\"BERT_BASE_DIR\") is not None\n assert os.environ.get(\"DATA_DIR\") is not None\n assert os.environ.get(\"OUTPUT_DIR\") is not None\n\n # set the gpu index\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpu)\n # set the required flags\n FLAGS.task_name = \"topic\"\n FLAGS.do_predict = True\n FLAGS.data_dir = os.environ.get(\"DATA_DIR\")\n FLAGS.vocab_file = os.path.join(os.environ.get(\"BERT_BASE_DIR\"), \"vocab.txt\")\n FLAGS.bert_config_file = os.path.join(os.environ.get(\"BERT_BASE_DIR\"), \"bert_config.json\")\n FLAGS.init_checkpoint = os.path.join(os.environ.get(\"BERT_BASE_DIR\"), \"bert_model.ckpt\")\n FLAGS.do_lower_case = False\n FLAGS.max_seq_length = 128\n FLAGS.output_dir = os.environ.get(\"OUTPUT_DIR\")\n\n run_classifier.main(0)", "def run_time_operation(self, learning_option, cluster):\r\n\r\n # whether or not test procedure\r\n is_train = tf.placeholder_with_default(True, shape=())\r\n learning_option['is_train'] = is_train\r\n\r\n # get worker info: worker num, device type, device num\r\n device = self.get_attr('device')\r\n num = re.sub('[^0-9]', '', cluster.get('types')[device])\r\n type = cluster.get('types')[device].replace(str(num), '')\r\n\r\n # construct API\r\n def apiConstructor():\r\n # CIFAR-10 images: [224, 224, 3]\r\n # label: [1000]\r\n def train_in():\r\n x, y = learning_option.get('train_imagenet')\r\n return x, y\r\n def test_in():\r\n x, y = learning_option.get('test_imagenet')\r\n return x, y\r\n\r\n images, labels = tf.cond(is_train, train_in, test_in)\r\n # get output dimension\r\n outdim = list(images.get_shape()[i].value for i in xrange(len(images.get_shape())))\r\n\r\n # set output\r\n self.set_output('image', images)\r\n self.set_output('label', labels)\r\n self.set_dimension('image', outdim)\r\n\r\n # set tf summary\r\n tf.summary.image(self.name, images, max_outputs=10)\r\n\r\n with tf.variable_scope(self.name):\r\n # single node, model parallelism: explicit worker mapping\r\n # data parallelism: equally duplicate model\r\n if learning_option.get(\"parallel\", None) != \"DP\":\r\n with tf.device('/job:worker/task:{0}/{1}:{2}'.format(device, type, num)):\r\n apiConstructor()\r\n else:\r\n apiConstructor()", "def run_universal_demo(args, use_gpu: bool = True) -> None:\n if \"scannet\" in args.dataset:\n args.img_name_unique = False\n else:\n args.img_name_unique = True\n\n args.u_classes = names_utils.get_universal_class_names()\n args.print_freq = 10\n\n args.split = \"test\"\n # os.environ[\"CUDA_VISIBLE_DEVICES\"] = ','.join(str(x) for x in args.test_gpu)\n logger.info(args)\n logger.info(\"=> creating model ...\")\n args.num_model_classes = len(args.u_classes)\n\n itask = InferenceTask(\n args,\n base_size=args.base_size,\n crop_h=args.test_h,\n crop_w=args.test_w,\n input_file=args.input_file,\n model_taxonomy=\"universal\",\n eval_taxonomy=\"universal\",\n scales=args.scales,\n )\n itask.execute()", "def train_distributed():\n # Distributed stuff learnt from this repo: https://github.com/GoogleCloudPlatform/cloudml-dist-\n # mnist-example/blob/master/trainer/task.py\n\n # For Distributed TensorFlow\n env = json.loads(os.environ.get('TF_CONFIG', '{}'))\n cluster_info = env.get('cluster')\n cluster_spec = tf.train.ClusterSpec(cluster_info)\n task_info = env.get('task')\n job_name, task_index = task_info['type'], task_info['index']\n\n device_fn = tf.train.replica_device_setter(\n cluster=cluster_spec,\n worker_device='/job:%s/task:%d' % (job_name, task_index))\n\n print(\"Start job:%s, index:%d\" % (job_name, task_index))\n\n server = tf.train.Server(cluster_spec,\n job_name=job_name, task_index=task_index)\n\n # Start a parameter server node\n if job_name == 'ps':\n server.join()\n\n # Start a master/worker node\n if job_name == 'master' or job_name == 'worker':\n is_chief = (job_name == 'master')\n\n with tf.Graph().as_default() as graph: # TODO necessary?\n with tf.device(device_fn):\n # Prepare the data\n train_data, test_data, embeddings_file = prepare_data()\n\n # Create the model\n print(\"(%s,%d) Creating %d layers of %d units.\" %\n (job_name, task_index, FLAGS.num_layers, FLAGS.size))\n model = create_model(False)\n\n # Create train_dir\n if is_chief:\n if not tf.gfile.Exists(FLAGS.train_dir):\n tf.gfile.MkDir(FLAGS.train_dir)\n\n # TensorBoard summaries\n (test_loss, test_perplexity, bucket_loss_placeholders,\n bucket_perplexity_placeholders, summary, summary_writer) = create_summary_objects(graph)\n\n # Create supervisor\n init_op = tf.global_variables_initializer()\n\n # Create Supervisor. Disabling checkpoints and summaries, because we do that manually\n sv = tf.train.Supervisor(is_chief=is_chief, logdir=FLAGS.train_dir, init_op=init_op,\n init_fn=lambda session: after_init(session, model, embeddings_file),\n saver=model.saver, global_step=model.global_step,\n save_model_secs=0, save_summaries_secs=0, summary_op=None,\n summary_writer=None)\n\n with sv.managed_session(server.target) as sess:\n train(sess, model, train_data, test_data, summary, summary_writer, test_loss,\n test_perplexity, bucket_loss_placeholders, bucket_perplexity_placeholders,\n is_chief, job_name, task_index, sv.should_stop)\n sv.stop()", "def run_mnist(flags_obj):\n model_function = model_fn\n config = tf.estimator.RunConfig(protocol='grpc+verbs',\n save_checkpoints_secs=300,\n save_summary_steps=200,\n log_step_count_steps=200)\n data_format = flags_obj.data_format\n if data_format is None:\n data_format = ('channels_first'\n if tf.test.is_built_with_cuda() else 'channels_last')\n mnist_classifier = tf.estimator.Estimator(\n model_fn=model_function,\n model_dir=flags_obj.model_dir,\n config=config,\n params={\n 'data_format': data_format,\n })\n\n # Set up training and evaluation input functions.\n def train_input_fn():\n \"\"\"Prepare data for training.\"\"\"\n # When choosing shuffle buffer sizes, larger sizes result in better\n # randomness, while smaller sizes use less memory. MNIST is a small\n # enough dataset that we can easily shuffle the full epoch.\n ds = dtrain(flags_obj.data_dir)\n ds = ds.cache().shuffle(buffer_size=50000).batch(flags_obj.batch_size)\n\n # Iterate through the dataset a set number (`epochs_between_evals`) of times\n # during each training session.\n ds = ds.repeat()\n return ds\n\n def eval_input_fn():\n return dtest(flags_obj.data_dir).batch(\n 100).make_one_shot_iterator().get_next()\n\n \n train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=flags_obj.train_steps)\n eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn,throttle_secs=300)\n tf.estimator.train_and_evaluate(mnist_classifier, train_spec, eval_spec)\n \n '''# Train and evaluate model.\n for _ in range(flags_obj.train_epochs // flags_obj.epochs_between_evals):\n mnist_classifier.train(input_fn=train_input_fn, hooks=train_hooks)\n eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)\n print('\\nEvaluation results:\\n\\t%s\\n' % eval_results)\n\n if model_helpers.past_stop_threshold(flags_obj.stop_threshold,\n eval_results['accuracy']):\n break\n '''\n # Export the model\n if flags_obj.export_dir is not None:\n image = tf.placeholder(tf.float32, [None, 28, 28])\n input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({\n 'image': image,\n })\n mnist_classifier.export_savedmodel(flags_obj.export_dir, input_fn)", "def Test(self):\n print('Testing:')\n # set mode eval\n torch.cuda.empty_cache()\n self.network.eval()\n transform = transforms.Compose([Rescale(params.rescale_size),\n RandomCrop(params.image_size),\n \n ToTensor()\n ])\n dataset = Cityscapes(params.dataset_root, mode='test', transforms = transform)\n test_loader = DataLoader(dataset,\n batch_size=params.test_batch,\n shuffle=params.shuffle,\n num_workers=params.dataloader_workers)\n # prepare test data\n recal = 0\n precision = 0\n F_one = 0\n IOU = 0\n accuracy_new = 0\n test_size = 1124\n if test_size % self.params.test_batch != 0:\n total_batch = test_size // self.params.test_batch + 1\n else:\n total_batch = test_size // self.params.test_batch\n\n # test for one epoch\n for batch_idx, batch in enumerate(test_loader):\n self.pb.click(batch_idx, total_batch)\n image, label, name = batch['image'], batch['label'], batch['label_name']\n image_cuda, label_cuda = image.cuda(), label.cuda()\n pred = image_cuda\n pred = pred.to(torch.device(\"cpu\"))\n pred = pred.detach()\n img_grid = pred[0]\n #img_grid = torchvision.utils.make_grid(out) \n img_grid = img_grid.numpy().transpose(1, 2, 0)*255\n cv2.imwrite(\"/content/drive/My Drive/Test_images/original%d.jpg\" % batch_idx, img_grid)\n if self.params.should_split:\n image_cuda.requires_grad_()\n out = checkpoint_sequential(self.network, self.params.split, image_cuda)\n else:\n out = self.network(image_cuda)\n TP, FP, TN, FN = confusion(out, label_cuda)\n recal = recal+TP\n precision = precision+FP\n F_one = F_one +TN\n IOU = IOU+ FN \n _,predict = torch.max(out.data,1)\n predict = predict.to(torch.device(\"cpu\"))\n predict = predict.detach()\n img = predict[0]\n img = img.numpy()*255\n #img_grid = torchvision.utils.make_grid(out) \n cv2.imwrite(\"/content/drive/My Drive/Test_images/predict_label%d.png\" % batch_idx, img)\n label = label_cuda.to(torch.device(\"cpu\"))\n label = label.detach()\n label = label[0].numpy()*255\n cv2.imwrite(\"/content/drive/My Drive/Test_images/original_label%d.png\" % batch_idx, label)\n\n accuracy_final = accuracy(out, label_cuda)\n accuracy_new = accuracy_new + accuracy_final\n print(\"\\t\")\n print(recal/total_batch, precision/ total_batch, F_one/ total_batch, IOU/ total_batch)\n print(\"\\t\")\n print(accuracy_new/total_batch)", "def run_test():\n # Get the sets of images and labels for training, validation, and\n # test on MNIST.\n train ,validation,test = datasets_mnist.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data)\n # Tell TensorFlow that the model will be built into the default Graph.\n with tf.Graph().as_default():\n # Generate placeholders for the images and labels.\n images_placeholder, labels_placeholder, phase_pl = placeholder_inputs(\n FLAGS.batch_size)\n\n # Build a Graph that computes predictions from the inference model.\n logits = mnist.inference(images_placeholder,\n FLAGS.hidden1,\n FLAGS.hidden2, \n phase_pl)\n\n eval_correct = mnist.evaluation(logits, labels_placeholder)\n # Add the variable initializer Op.\n all_variable = tf.global_variables()\n \n # Create a saver for writing training checkpoints.\n saver = tf.train.Saver()\n\n # Create a session for running Ops on the Graph.\n with tf.Session() as sess:\n\n saver.restore(sess, \"log/model.ckpt-1999\")\n for variable in all_variable:\n if \"moving\" in variable.name:\n print(variable.name, variable.eval())\n do_eval(sess,\n eval_correct,\n images_placeholder,\n labels_placeholder,\n phase_pl,\n test)", "def Run(benchmark_spec):\n _UpdateBenchmarkSpecWithFlags(benchmark_spec)\n vm = benchmark_spec.vms[0]\n\n if benchmark_spec.tpus:\n mnist_benchmark_script = 'mnist_tpu.py'\n mnist_benchmark_cmd = ('cd tpu/models && '\n 'export PYTHONPATH=$(pwd) && '\n 'cd official/mnist && '\n 'python {script} '\n '--data_dir={data_dir} '\n '--iterations={iterations} '\n '--model_dir={model_dir} '\n '--batch_size={batch_size}'.format(\n script=mnist_benchmark_script,\n data_dir=benchmark_spec.data_dir,\n iterations=benchmark_spec.iterations,\n model_dir=benchmark_spec.model_dir,\n batch_size=benchmark_spec.batch_size))\n else:\n mnist_benchmark_script = 'mnist.py'\n mnist_benchmark_cmd = ('cd models && '\n 'export PYTHONPATH=$(pwd) && '\n 'cd official/mnist && '\n 'python {script} '\n '--data_dir={data_dir} '\n '--model_dir={model_dir} '\n '--batch_size={batch_size} '.format(\n script=mnist_benchmark_script,\n data_dir=benchmark_spec.data_dir,\n model_dir=benchmark_spec.model_dir,\n batch_size=benchmark_spec.batch_size))\n\n if nvidia_driver.CheckNvidiaGpuExists(vm):\n mnist_benchmark_cmd = '{env} {cmd}'.format(\n env=tensorflow.GetEnvironmentVars(vm), cmd=mnist_benchmark_cmd)\n samples = []\n metadata = CreateMetadataDict(benchmark_spec)\n\n if benchmark_spec.train_steps > 0:\n if benchmark_spec.tpus:\n tpu = benchmark_spec.tpu_groups['train'].GetName()\n num_shards = '--num_shards={}'.format(\n benchmark_spec.tpu_groups['train'].GetNumShards())\n else:\n tpu = num_shards = ''\n\n if benchmark_spec.tpus:\n mnist_benchmark_train_cmd = (\n '{cmd} --tpu={tpu} --use_tpu={use_tpu} --train_steps={train_steps} '\n '{num_shards} --noenable_predict'.format(\n cmd=mnist_benchmark_cmd,\n tpu=tpu,\n use_tpu=bool(benchmark_spec.tpus),\n train_steps=benchmark_spec.train_steps,\n num_shards=num_shards))\n else:\n mnist_benchmark_train_cmd = (\n '{cmd} --train_epochs={train_epochs} '.format(\n cmd=mnist_benchmark_cmd,\n train_epochs=benchmark_spec.train_epochs))\n\n start = time.time()\n stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_train_cmd)\n elapsed_seconds = (time.time() - start)\n samples.extend(MakeSamplesFromTrainOutput(\n metadata, stdout + stderr, elapsed_seconds, benchmark_spec.train_steps))\n\n if benchmark_spec.eval_steps > 0:\n if benchmark_spec.tpus:\n mnist_benchmark_eval_cmd = (\n '{cmd} --tpu={tpu} --use_tpu={use_tpu} --eval_steps={eval_steps}'\n .format(\n cmd=mnist_benchmark_cmd,\n use_tpu=bool(benchmark_spec.tpus),\n tpu=benchmark_spec.tpu_groups['eval'].GetName(),\n eval_steps=benchmark_spec.eval_steps))\n else:\n mnist_benchmark_eval_cmd = ('{cmd} --eval_steps={eval_steps}'.format(\n cmd=mnist_benchmark_cmd, eval_steps=benchmark_spec.eval_steps))\n\n stdout, stderr = vm.RobustRemoteCommand(mnist_benchmark_eval_cmd)\n samples.extend(MakeSamplesFromEvalOutput(metadata, stdout + stderr,\n elapsed_seconds))\n return samples", "def run(config_file):\n config = load_config(config_file)\n config_global = config['global']\n\n # setup a logger\n logger = logging.getLogger('experiment')\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler_stdout = logging.StreamHandler(sys.stdout)\n handler_stdout.setLevel(config['logger']['level'])\n handler_stdout.setFormatter(formatter)\n logger.addHandler(handler_stdout)\n\n if 'path' in config['logger']:\n handler_file = logging.FileHandler(config['logger']['path'])\n handler_file.setLevel(config['logger']['level'])\n handler_file.setFormatter(formatter)\n logger.addHandler(handler_file)\n\n logger.setLevel(config['logger']['level'])\n\n # Allow the gpu to be used in parallel\n sess_config = tf.ConfigProto()\n sess_config.gpu_options.allow_growth = True\n if 'max_threads' in config_global:\n sess_config.intra_op_parallelism_threads = config_global['max_threads']\n\n # we allow to set the random seed in the config file for reproducibility. However, when running on GPU, results\n # will still be nondeterministic (due to nondeterministic behavior of tensorflow)\n if 'random_seed' in config_global:\n seed = config_global['random_seed']\n logger.info('Using fixed random seed'.format(seed))\n np.random.seed(seed)\n tf.set_random_seed(seed)\n\n with tf.Session(config=sess_config) as sess:\n # We are now fetching all relevant modules. It is strictly required that these module contain a variable named\n # 'component' that points to a class which inherits from experiment.Data, experiment.Experiment,\n # experiment.Trainer or experiment.Evaluator\n data_module = config['data-module']\n model_module = config['model-module']\n training_module = config['training-module']\n evaluation_module = config.get('evaluation-module', None)\n\n # The modules are now dynamically loaded\n DataClass = importlib.import_module(data_module).component\n ModelClass = importlib.import_module(model_module).component\n TrainingClass = importlib.import_module(training_module).component\n EvaluationClass = importlib.import_module(evaluation_module).component if evaluation_module else None\n\n # We then wire together all the modules and start training\n data = DataClass(config['data'], config_global, logger)\n model = ModelClass(config['model'], config_global, logger)\n training = TrainingClass(config['training'], config_global, logger)\n\n # setup the data (validate, create generators, load data, or else)\n logger.info('Setting up the data')\n data.setup()\n # build the model (e.g. compile it)\n logger.info('Building the model')\n model.build(data, sess)\n # start the training process\n logger.info('Starting the training process')\n training.start(model, data, sess)\n\n # perform evaluation, if required\n if EvaluationClass:\n logger.info('Evaluating')\n evaluation = EvaluationClass(config['evaluation'], config_global, logger)\n evaluation.start(model, data, sess)\n else:\n logger.info('No evaluation')\n\n logger.info('DONE')", "def main(params):\n params = run_train.prepare_experiment_folder(params, FOLDER_EXPERIMENT)\n\n # run_train.check_pathes_patterns(paths)\n tl_expt.set_experiment_logger(params['path_expt'])\n logging.info('COMPUTER: \\n%r', platform.uname())\n logging.info(tl_expt.string_dict(params, desc='PARAMETERS'))\n\n tl_expt.create_subfolders(params['path_expt'], LIST_SUBFOLDER)\n\n path_csv = os.path.join(params['path_expt'], NAME_CSV_TRIPLES)\n df_paths = get_csv_triplets(\n params['path_list'], path_csv, params['path_images'], params['path_segms'], force_reload=FORCE_RERUN\n )\n\n dict_classif = seg_clf.load_classifier(params['path_classif'])\n params_clf = dict_classif['params']\n params_clf.update(params)\n logging.info(tl_expt.string_dict(params, desc='UPDATED PARAMETERS'))\n\n # perform on new images\n df_stat = pd.DataFrame()\n _wrapper_detection = partial(\n load_compute_detect_centers,\n params=params_clf,\n path_classif=params['path_classif'],\n path_output=params['path_expt'],\n )\n iterate = tl_expt.WrapExecuteSequence(_wrapper_detection, df_paths.iterrows(), nb_workers=params['nb_workers'])\n for dict_center in iterate:\n df_stat = df_stat.append(dict_center, ignore_index=True)\n df_stat.to_csv(os.path.join(params['path_expt'], NAME_CSV_TRIPLES_TEMP))\n\n df_stat.set_index(['image'], inplace=True)\n df_stat.to_csv(os.path.join(params['path_expt'], NAME_CSV_TRIPLES))\n logging.info('STATISTIC: \\n %r', df_stat.describe())", "def test_multitask(self):\n args = BASE_ARGS.copy()\n args.update(MULTITASK_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 5.0, 'failed to train image_seq2seq on image+text task'\n )", "def run_custom_training_tests():\n test_custom_training()\n test_custom_distributed_training()\n test_custom_multimodel_training()\n test_custom_distributed_multimodel_training()", "def main(_):\n\n params = create_params()\n\n assert params[\"train_dataset_path\"]\n assert params[\"eval_dataset_path\"]\n\n input_fn = input_fn_from_files(\n params[\"train_dataset_path\"])\n eval_input_fn = input_fn_from_files(\n params[\"eval_dataset_path\"])\n\n feature_columns = create_feature_columns(params)\n\n model_fn = create_model_fn(feature_columns)\n estimator = create_tpu_estimator(model_fn, feature_columns, params)\n\n for cycle_index in range(params[\"train_epochs\"]):\n tf.logging.info(\"Starting a training cycle: {}/{}\".format(\n cycle_index + 1, params[\"train_epochs\"]))\n estimator.train(input_fn=input_fn, steps=params[\"steps_per_epoch\"])\n tf.logging.info(\"Beginning evaluation.\")\n eval_results = estimator.evaluate(eval_input_fn,\n steps=params[\"num_eval_steps\"])\n tf.logging.info(\"Evaluation complete.\")\n\n recall_1 = float(eval_results[\"recall@1\"])\n recall_5 = float(eval_results[\"recall@5\"])\n loss = float(eval_results[\"loss\"])\n tf.logging.info(\n \"Iteration {}: recall@1 = {:.4f}, recall@5 = {:.4f}, Loss = {:.4f}\"\n .format(cycle_index + 1, recall_1, recall_5, loss))", "def test_training():\n config = SmartDict()\n\n config.NETWORK_CLASS = LMBiSeNet\n config.DATASET_CLASS = DummyCamvid\n\n config.IS_DEBUG = False\n config.IMAGE_SIZE = [128, 160]\n config.BATCH_SIZE = 2\n config.TEST_STEPS = 1\n config.MAX_STEPS = 2\n config.SAVE_CHECKPOINT_STEPS = 1\n config.KEEP_CHECKPOINT_MAX = 5\n config.SUMMARISE_STEPS = 1\n config.IS_PRETRAIN = False\n config.TASK = Tasks.SEMANTIC_SEGMENTATION\n\n # network model config\n config.NETWORK = SmartDict()\n config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer\n config.NETWORK.OPTIMIZER_KWARGS = {\"learning_rate\": 0.001}\n config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE\n config.NETWORK.BATCH_SIZE = config.BATCH_SIZE\n config.NETWORK.DATA_FORMAT = \"NHWC\"\n\n # daasegt config\n config.DATASET = SmartDict()\n config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE)\n config.DATASET.BATCH_SIZE = config.BATCH_SIZE\n config.DATASET.DATA_FORMAT = \"NHWC\"\n\n environment.init(\"test_lm_bisenet\")\n prepare_dirs(recreate=True)\n start_training(config, profile_step=1)", "def run_experiments() :\n #%%\n target_size=(32,32)\n g_specs = {\n \"batch_size\" : [ 30 , 60, 100 ],\n \"learning_rate\" : [ 0.0002, 0.0003, 0.0005 ],\n \"drop_out_rate\" : [ 0.2, 0.25, 0.3 ],\n \"rescale_mode\" : [ \"max_q\" , \"max\", \"\" ]\n }\n\n model_traits = MODEL_TRAITS[\"model2\"].copy()\n tt_obj = model_traits[\"trainer_tester_class\"]( model_traits )\n del model_traits[\"trainer_tester_class\"]\n\n cnt = 0\n for batchs, lrate, do_rate, resc_mode in product( g_specs[\"batch_size\"],\n g_specs[\"learning_rate\"],\n g_specs[\"drop_out_rate\"],\n g_specs[\"rescale_mode\"] ) :\n\n tt_obj.model_traits.update( {\"batch_size\" : batchs,\n \"learning_rate\" : lrate,\n \"rescale_mode\" : resc_mode,\n \"drop_out_rate\" : do_rate } )\n\n train_4d, train_gt = tu.make_4d_arrays( images_dir=\"images/train\",\n target_size=target_size )\n\n test_4d, test_gt = tu.make_4d_arrays( images_dir=\"images/test\",\n target_size=target_size )\n\n data = {\"train_4d\" : train_4d,\n \"test_4d\" : test_4d,\n \"train_y\" : train_gt,\n \"test_y\" : test_gt}\n\n valid_accu_log, train_accu_log = tt_obj.train( model_traits, data,\n logl=100 )\n idx_v = int(np.argmax( valid_accu_log))\n idx_t = int(np.argmax( train_accu_log))\n\n model_traits.update({\"valid_accu_log\" : valid_accu_log,\n \"train_accu_log\" : train_accu_log,\n \"best_valid\" : max(valid_accu_log),\n \"best_valid_at\" : idx_v,\n \"train_at_best_valid\" : train_accu_log[idx_v],\n \"best_train\" : max(train_accu_log),\n \"best_train_at\": idx_t })\n\n #print(cnt, pformat(model_traits) )\n print( \"%d : best_train = %.4f, best_valid = %.4f\" % \\\n (cnt, max(train_accu_log), max(valid_accu_log) ))\n\n with open( \"exp_results_%d.json\" % cnt,\n \"wt\" , encoding=\"utf8\" ) as f_out :\n print( json.dumps( model_traits ), file=f_out)\n\n\n cnt += 1\n #%%", "def configure_gpu_tf():\n\n try:\n # locate available devices & set required environment variables\n available_device_ids = GPUtil.getFirstAvailable(order='first', maxLoad=0.7, maxMemory=0.7, attempts=1, interval=10)\n available_device_id = available_device_ids[0]\n os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'\n os.environ['CUDA_VISIBLE_DEVICES'] = str(available_device_id)\n print(f\"\\n GPU Found! running on GPU:{available_device_id}\\n\")\n\n # set GPU configuration (use all GPU memory if device 0, else use <50% of memory)\n tf.debugging.set_log_device_placement(False)\n physical_gpu = tf.config.experimental.list_physical_devices('GPU')[0]\n\n if available_device_id == 0:\n tf.config.experimental.set_memory_growth(physical_gpu, True)\n else:\n tf.config.experimental.set_virtual_device_configuration(\n physical_gpu,\n [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=4500)]\n )\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n assert len(logical_gpus) == 1, \"error creating virtual GPU to fractionally use memory\"\n\n # if we can't find a GPU, or they are all busy, default to using CPU\n except RuntimeError:\n print(\"\\n No GPUs available... running on CPU\\n\")\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1'", "def run_inference(test_loader, model, model_params, testing_params, ofolder, cuda_available,\n i_monte_carlo=None):\n # INIT STORAGE VARIABLES\n preds_npy_list, gt_npy_list = [], []\n pred_tmp_lst, z_tmp_lst, fname_tmp = [], [], ''\n volume = None\n weight_matrix = None\n\n for i, batch in enumerate(tqdm(test_loader, desc=\"Inference - Iteration \" + str(i_monte_carlo))):\n with torch.no_grad():\n # GET SAMPLES\n # input_samples: list of batch_size tensors, whose size is n_channels X height X width X depth\n # gt_samples: idem with n_labels\n # batch['*_metadata']: list of batch_size lists, whose size is n_channels or n_labels\n if model_params[\"name\"] == \"HeMISUnet\":\n input_samples = imed_utils.cuda(imed_utils.unstack_tensors(batch[\"input\"]), cuda_available)\n else:\n input_samples = imed_utils.cuda(batch[\"input\"], cuda_available)\n gt_samples = imed_utils.cuda(batch[\"gt\"], cuda_available, non_blocking=True)\n\n # EPISTEMIC UNCERTAINTY\n if testing_params['uncertainty']['applied'] and testing_params['uncertainty']['epistemic']:\n for m in model.modules():\n if m.__class__.__name__.startswith('Dropout'):\n m.train()\n\n # RUN MODEL\n if model_params[\"name\"] in [\"HeMISUnet\", \"FiLMedUnet\"]:\n metadata = get_metadata(batch[\"input_metadata\"], model_params)\n preds = model(input_samples, metadata)\n else:\n preds = model(input_samples)\n\n if model_params[\"name\"] == \"HeMISUnet\":\n # Reconstruct image with only one modality\n input_samples = batch['input'][0]\n\n if model_params[\"name\"] == \"UNet3D\" and model_params[\"attention\"]:\n imed_utils.save_feature_map(batch, \"attentionblock2\", os.path.dirname(ofolder), model, input_samples,\n slice_axis=test_loader.dataset.slice_axis)\n\n # PREDS TO CPU\n preds_cpu = preds.cpu()\n\n # RECONSTRUCT 3D IMAGE\n last_batch_bool = (i == len(test_loader) - 1)\n\n slice_axis = imed_utils.AXIS_DCT[testing_params['slice_axis']]\n\n # LOOP ACROSS SAMPLES\n for smp_idx in range(len(preds_cpu)):\n if \"bounding_box\" in batch['input_metadata'][smp_idx][0]:\n imed_obj_detect.adjust_undo_transforms(testing_params[\"undo_transforms\"].transforms, batch, smp_idx)\n\n if not model_params[\"name\"].endswith('3D'):\n last_sample_bool = (last_batch_bool and smp_idx == len(preds_cpu) - 1)\n # undo transformations\n preds_idx_undo, metadata_idx = testing_params[\"undo_transforms\"](preds_cpu[smp_idx],\n batch['gt_metadata'][smp_idx],\n data_type='gt')\n # preds_idx_undo is a list n_label arrays\n preds_idx_arr = np.array(preds_idx_undo)\n\n # TODO: gt_filenames should not be a list\n fname_ref = metadata_idx[0]['gt_filenames'][0]\n\n # NEW COMPLETE VOLUME\n if pred_tmp_lst and (fname_ref != fname_tmp or last_sample_bool):\n # save the completely processed file as a nifti file\n fname_pred = os.path.join(ofolder, fname_tmp.split('/')[-1])\n fname_pred = fname_pred.split(testing_params['target_suffix'][0])[0] + '_pred.nii.gz'\n # If Uncertainty running, then we save each simulation result\n if testing_params['uncertainty']['applied']:\n fname_pred = fname_pred.split('.nii.gz')[0] + '_' + str(i_monte_carlo).zfill(2) + '.nii.gz'\n\n output_nii = imed_utils.pred_to_nib(data_lst=pred_tmp_lst,\n z_lst=z_tmp_lst,\n fname_ref=fname_tmp,\n fname_out=fname_pred,\n slice_axis=slice_axis,\n kernel_dim='2d',\n bin_thr=0.9 if testing_params[\"binarize_prediction\"] else -1)\n # TODO: Adapt to multilabel\n preds_npy_list.append(output_nii.get_fdata()[:, :, :, 0])\n gt_npy_list.append(nib.load(fname_tmp).get_fdata())\n\n output_nii_shape = output_nii.get_fdata().shape\n if len(output_nii_shape) == 4 and output_nii_shape[-1] > 1:\n imed_utils.save_color_labels(np.stack(pred_tmp_lst, -1),\n testing_params[\"binarize_prediction\"],\n fname_tmp,\n fname_pred.split(\".nii.gz\")[0] + '_color.nii.gz',\n imed_utils.AXIS_DCT[testing_params['slice_axis']])\n\n # re-init pred_stack_lst\n pred_tmp_lst, z_tmp_lst = [], []\n\n # add new sample to pred_tmp_lst, of size n_label X h X w ...\n pred_tmp_lst.append(preds_idx_arr)\n\n # TODO: slice_index should be stored in gt_metadata as well\n z_tmp_lst.append(int(batch['input_metadata'][smp_idx][0]['slice_index']))\n fname_tmp = fname_ref\n\n else:\n pred_undo, metadata, last_sample_bool, volume, weight_matrix = \\\n imed_utils.volume_reconstruction(batch,\n preds_cpu,\n testing_params['undo_transforms'],\n smp_idx, volume, weight_matrix)\n fname_ref = metadata[0]['gt_filenames'][0]\n # Indicator of last batch\n if last_sample_bool:\n pred_undo = np.array(pred_undo)\n fname_pred = os.path.join(ofolder, fname_ref.split('/')[-1])\n fname_pred = fname_pred.split(testing_params['target_suffix'][0])[0] + '_pred.nii.gz'\n # If uncertainty running, then we save each simulation result\n if testing_params['uncertainty']['applied']:\n fname_pred = fname_pred.split('.nii.gz')[0] + '_' + str(i_monte_carlo).zfill(2) + '.nii.gz'\n\n # Choose only one modality\n output_nii = imed_utils.pred_to_nib(data_lst=[pred_undo],\n z_lst=[],\n fname_ref=fname_ref,\n fname_out=fname_pred,\n slice_axis=slice_axis,\n kernel_dim='3d',\n bin_thr=0.5 if testing_params[\"binarize_prediction\"] else -1)\n preds_npy_list.append(output_nii.get_fdata().transpose(3, 0, 1, 2))\n gt_lst = []\n for gt in metadata[0]['gt_filenames']:\n # For multi-label, if all labels are not in every image\n if gt is not None:\n gt_lst.append(nib.load(gt).get_fdata())\n else:\n gt_lst.append(np.zeros(gt_lst[0].shape))\n\n gt_npy_list.append(np.array(gt_lst))\n # Save merged labels with color\n\n if pred_undo.shape[0] > 1:\n imed_utils.save_color_labels(pred_undo,\n testing_params['binarize_prediction'],\n batch['input_metadata'][smp_idx][0]['input_filenames'],\n fname_pred.split(\".nii.gz\")[0] + '_color.nii.gz',\n slice_axis)\n\n return preds_npy_list, gt_npy_list", "def evaluate(sess, images_ph, labels_ph, softmax, mnist, config, task):\n\n print 'Evaluating on {} task ({}x{}, {} distractors) using {} glimpses (at {} scales)'.format(\n task, config.new_size, config.new_size, config.n_distractors,\n config.num_glimpses, config.n_patches)\n\n # Evaluation\n test_acc = []\n val_acc = []\n\n for k, dataset in enumerate([mnist.validation, mnist.test]):\n\n steps_per_epoch = dataset.num_examples // config.eval_batch_size\n correct_cnt = 0\n num_samples = steps_per_epoch * config.batch_size\n # loc_net.sampling = True\n\n for test_step in tqdm(xrange(steps_per_epoch)):\n\n images, labels = dataset.next_batch(config.batch_size)\n images = images.reshape((-1, config.original_size, config.original_size, 1))\n labels_bak = labels\n\n if task == 'translated':\n images = translate(images, width=config.new_size, height=config.new_size)\n elif task == 'cluttered':\n images = clutter(images,\n dataset.images.reshape((-1, config.original_size, config.original_size, 1)),\n width=config.new_size, height=config.new_size, n_patches=config.n_distractors\n )\n elif task == 'cluttered_var':\n images, _, _, _ = clutter_rnd(images,\n train_data=dataset.images.reshape(\n (-1, config.original_size, config.original_size, 1)),\n lim=config.distractor_range,\n color_digits=config.color_digits,\n color_noise=config.color_noise,\n width=config.new_size, height=config.new_size, norm=True)\n\n # else:\n # print 'original mnist data ({}x{}).'.format(config.original_size,config.original_size)\n\n # Duplicate M times (average prediction over M repeats)\n images = np.tile(images, [config.M, 1, 1, 1])\n labels = np.tile(labels, [config.M])\n\n softmax_val = sess.run(softmax,\n feed_dict={\n images_ph: images,\n labels_ph: labels\n })\n softmax_val = np.reshape(softmax_val,\n [config.M, -1, config.num_classes])\n softmax_val = np.mean(softmax_val, 0)\n\n pred_labels_val = np.argmax(softmax_val, 1)\n correct_cnt += np.sum(pred_labels_val == labels_bak)\n acc = correct_cnt / float(num_samples)\n\n if k == 0:\n print '\\nVal accuracy\\t{:4.4f} ({:4.4f} error)'.format(100 * acc, 100 - 100 * acc)\n val_acc = acc\n else:\n print 'Test accuracy\\t{:4.4f} ({:4.4f} error)\\n'.format(100 * acc, 100 - 100 * acc)\n test_acc = acc\n\n return test_acc, val_acc", "def execute(gpu, exp_batch, exp_alias, suppress_output=True, number_of_workers=12):\n try:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = ','.join(gpu)\n g_conf.VARIABLE_WEIGHT = {}\n\n # At this point the log file with the correct naming is created.\n # You merge the yaml file with the global configuration structure.\n merge_with_yaml(os.path.join('configs', exp_batch, exp_alias + '.yaml'))\n set_type_of_process('train')\n\n # Set the process into loading status.\n coil_logger.add_message('Loading', {'GPU': gpu})\n\n # Put the output to a separate file if it is the case\n if suppress_output:\n if not os.path.exists('_output_logs'):\n os.mkdir('_output_logs')\n sys.stdout = open(os.path.join('_output_logs', exp_alias + '_' +\n g_conf.PROCESS_NAME + '_' + str(os.getpid()) + \".out\"), \"a\",\n buffering=1)\n sys.stderr = open(os.path.join('_output_logs',\n exp_alias + '_err_'+g_conf.PROCESS_NAME + '_'\n + str(os.getpid()) + \".out\"),\n \"a\", buffering=1)\n\n if coil_logger.check_finish('train'):\n coil_logger.add_message('Finished', {})\n return\n\n # Preload option\n if g_conf.PRELOAD_MODEL_ALIAS is not None:\n checkpoint = torch.load(os.path.join('_logs', g_conf.PRELOAD_MODEL_BATCH,\n g_conf.PRELOAD_MODEL_ALIAS,\n 'checkpoints',\n str(g_conf.PRELOAD_MODEL_CHECKPOINT)+'.pth'))\n\n\n # Get the latest checkpoint to be loaded\n # returns none if there are no checkpoints saved for this model\n checkpoint_file = get_latest_saved_checkpoint()\n if checkpoint_file is not None:\n checkpoint = torch.load(os.path.join('_logs', exp_batch, exp_alias,\n 'checkpoints', str(get_latest_saved_checkpoint())))\n iteration = checkpoint['iteration']\n best_loss = checkpoint['best_loss']\n best_loss_iter = checkpoint['best_loss_iter']\n print ('iteration: ', iteration, 'best_loss: ', best_loss)\n else:\n iteration = 0\n best_loss = 10000.0\n best_loss_iter = 0\n\n\n # Define the dataset. This structure is has the __get_item__ redefined in a way\n # that you can access the positions from the root directory as a in a vector.\n full_dataset = os.path.join(os.environ[\"COIL_DATASET_PATH\"], g_conf.TRAIN_DATASET_NAME)\n\n # By instantiating the augmenter we get a callable that augment images and transform them into tensors.\n augmenter = Augmenter(g_conf.AUGMENTATION)\n\n # Instantiate the class used to read the dataset\n dataset = CoILDataset(full_dataset, transform=augmenter, preload_name=str(g_conf.NUMBER_OF_HOURS)+'hours_'+g_conf.TRAIN_DATASET_NAME)\n print (\"Loaded dataset\")\n \n # Creates the sampler, this part is responsible for managing the keys. It divides\n # all keys depending on the measurements and produces a set of keys for each bach.\n # define the sampling strategy for mini-batch, different samplers can be found in 'splitter.py'\n data_loader = select_balancing_strategy(dataset, iteration, number_of_workers)\n\n # Instatiate the network architecture\n model = CoILModel(g_conf.MODEL_TYPE, g_conf.MODEL_CONFIGURATION)\n model.cuda()\n\n optimizer = optim.Adam(model.parameters(), lr=g_conf.LEARNING_RATE) # adabound and adamio can also be used here\n\n if checkpoint_file is not None or g_conf.PRELOAD_MODEL_ALIAS is not None:\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n accumulated_time = checkpoint['total_time']\n loss_window = coil_logger.recover_loss_window('train', iteration)\n else: \n # We accumulate iteration time and keep the average speed\n accumulated_time = 0\n loss_window = []\n\n # freeze the perception module weights if required\n # for m in model.perception.parameters():\n # m.requires_grad = False\n \n # total trainable parameters\n model_parameters = filter(lambda p: p.requires_grad, model.parameters())\n total_params = sum([np.prod(p.size()) for p in model_parameters])\n print ('trainable parameters: ', total_params)\n\n # multi-gpu\n print ('number of gpus: ', torch.cuda.device_count())\n if torch.cuda.device_count() > 1:\n model = nn.DataParallel(model)\n\n criterion = Loss(g_conf.LOSS_FUNCTION)\n\n print ('Start Training')\n\n st = time.time()\n for data in data_loader:\n\n # use this for early stopping if the validation loss is not coming down\n if g_conf.FINISH_ON_VALIDATION_STALE is not None and \\\n check_loss_validation_stopped(iteration, g_conf.FINISH_ON_VALIDATION_STALE):\n break\n\n \"\"\"\n ####################################\n Main optimization loop\n ####################################\n \"\"\"\n\n iteration += 1\n\n if iteration % 1000 == 0:\n adjust_learning_rate_auto(optimizer, loss_window)\n \n # additional learning rate scheduler - cyclic cosine annealing (https://arxiv.org/pdf/1704.00109.pdf)\n # adjust_learning_rate_cosine_annealing(optimizer, loss_window, iteration)\n\n capture_time = time.time()\n controls = data['directions']\n model.zero_grad()\n branches = model(torch.squeeze(data['rgb'].cuda()),\n dataset.extract_inputs(data).cuda())\n loss_function_params = {\n 'branches': branches,\n 'targets': dataset.extract_targets(data).cuda(),\n 'controls': controls.cuda(),\n 'inputs': dataset.extract_inputs(data).cuda(),\n 'branch_weights': g_conf.BRANCH_LOSS_WEIGHT,\n 'variable_weights': g_conf.VARIABLE_WEIGHT\n }\n loss, _ = criterion(loss_function_params)\n loss.backward()\n optimizer.step()\n \"\"\"\n ####################################\n Saving the model if necessary\n ####################################\n \"\"\"\n\n if is_ready_to_save(iteration):\n if torch.cuda.device_count() > 1:\n state_dict_save = model.module.state_dict()\n else:\n state_dict_save = model.state_dict()\n\n state = {\n 'iteration': iteration,\n 'state_dict': state_dict_save,\n 'best_loss': best_loss,\n 'total_time': accumulated_time,\n 'optimizer': optimizer.state_dict(),\n 'best_loss_iter': best_loss_iter\n }\n torch.save(state, os.path.join('_logs', exp_batch, exp_alias\n , 'checkpoints', str(iteration) + '.pth'))\n\n \"\"\"\n ################################################\n Adding tensorboard logs.\n Making calculations for logging purposes.\n These logs are monitored by the printer module.\n #################################################\n \"\"\"\n coil_logger.add_scalar('Loss', loss.data, iteration)\n coil_logger.add_image('Image', torch.squeeze(data['rgb']), iteration)\n if loss.data < best_loss:\n best_loss = loss.data.tolist()\n best_loss_iter = iteration\n\n # Log a random position\n position = random.randint(0, len(data) - 1)\n\n if torch.cuda.device_count() > 1:\n output = model.module.extract_branch(torch.stack(branches[0:4]), controls)\n else:\n output = model.extract_branch(torch.stack(branches[0:4]), controls)\n error = torch.abs(output - dataset.extract_targets(data).cuda())\n\n accumulated_time += time.time() - capture_time\n\n coil_logger.add_message('Iterating',\n {'Iteration': iteration,\n 'Loss': loss.data.tolist(),\n 'Images/s': (iteration * g_conf.BATCH_SIZE) / accumulated_time,\n 'BestLoss': best_loss, 'BestLossIteration': best_loss_iter,\n 'Output': output[position].data.tolist(),\n 'GroundTruth': dataset.extract_targets(data)[\n position].data.tolist(),\n 'Error': error[position].data.tolist(),\n 'Inputs': dataset.extract_inputs(data)[\n position].data.tolist()},\n iteration)\n loss_window.append(loss.data.tolist())\n coil_logger.write_on_error_csv('train', loss.data)\n print(\"Iteration: %d Loss: %f\" % (iteration, loss.data))\n st = time.time()\n\n coil_logger.add_message('Finished', {})\n \n except KeyboardInterrupt:\n coil_logger.add_message('Error', {'Message': 'Killed By User'})\n\n except RuntimeError as e:\n\n coil_logger.add_message('Error', {'Message': str(e)})\n\n except:\n traceback.print_exc()\n coil_logger.add_message('Error', {'Message': 'Something Happened'})", "def test(model_params, dataset_test, testing_params, log_directory, device, cuda_available=True,\n metric_fns=None):\n # DATA LOADER\n test_loader = DataLoader(dataset_test, batch_size=testing_params[\"batch_size\"],\n shuffle=False, pin_memory=True,\n collate_fn=imed_loader_utils.imed_collate,\n num_workers=0)\n\n # LOAD TRAIN MODEL\n fname_model = os.path.join(log_directory, \"best_model.pt\")\n print('\\nLoading model: {}'.format(fname_model))\n model = torch.load(fname_model, map_location=device)\n if cuda_available:\n model.cuda()\n model.eval()\n\n # CREATE OUTPUT FOLDER\n path_3Dpred = os.path.join(log_directory, 'pred_masks')\n if not os.path.isdir(path_3Dpred):\n os.makedirs(path_3Dpred)\n\n # METRIC MANAGER\n metric_mgr = imed_metrics.MetricManager(metric_fns)\n\n # UNCERTAINTY SETTINGS\n if (testing_params['uncertainty']['epistemic'] or testing_params['uncertainty']['aleatoric']) and \\\n testing_params['uncertainty']['n_it'] > 0:\n n_monteCarlo = testing_params['uncertainty']['n_it']\n testing_params['uncertainty']['applied'] = True\n print('\\nComputing model uncertainty over {} iterations.'.format(n_monteCarlo))\n else:\n testing_params['uncertainty']['applied'] = False\n n_monteCarlo = 1\n\n for i_monteCarlo in range(n_monteCarlo):\n preds_npy, gt_npy = run_inference(test_loader, model, model_params, testing_params, path_3Dpred,\n cuda_available, i_monteCarlo)\n metric_mgr(preds_npy, gt_npy)\n\n # COMPUTE UNCERTAINTY MAPS\n if n_monteCarlo > 1:\n imed_utils.run_uncertainty(ifolder=path_3Dpred)\n\n metrics_dict = metric_mgr.get_results()\n metric_mgr.reset()\n print(metrics_dict)\n return metrics_dict", "def run_experiment(hparams):\n\n data_file_name = build_data_file_name(hparams.pair, hparams.time_interval, hparams.data_period)\n\n df = data_pre_processing(data_file_name, hparams.path_to_archives, hparams.path_to_data_dir)\n\n rows = df.shape[0]\n\n train, test = prepare_data(df[rows - 100:rows], hparams.feature_window, hparams.label_window)\n\n print(\"train:{}\".format(train))\n print(\"test:{}\".format(test))\n # my_feature_columns = [tf.feature_column.numeric_column('f')]\n # estimator = tf.estimator.DNNClassifier(\n # feature_columns=[],\n # hidden_units=[1024, 512, 256])\n\n # estimator = tf.estimator.DNNRegressor()", "def main():\n test_runner = TestRunner(\n FLAGS.workspace, FLAGS.bench_home, imagenet_dir=FLAGS.train_data_dir)\n test_runner.run_tests(FLAGS.test_list.split(','))", "def test_net_on_dataset(args, dataset_name, proposal_file, output_dir, multi_gpu=False, gpu_id=0, use_matlab = False, early_stop=False):\n\n \n # print(\"test_net_on_dataset\")\n dataset = JsonDataset(dataset_name)\n test_timer = Timer()\n \n test_timer.tic()\n \n all_boxes = test_net(args, dataset_name, proposal_file, output_dir, gpu_id=gpu_id, early_stop=early_stop)\n test_timer.toc()\n\n logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))\n\n roidb = dataset.get_roidb()\n num_images = len(roidb)\n num_classes = cfg.MODEL.NUM_CLASSES + 1\n final_boxes = empty_results(num_classes, num_images)\n test_corloc = 'train' in dataset_name\n \n\n all_cls_scores = {}\n\n for i, entry in enumerate(roidb):\n\n if early_stop and i > 10: break\n\n boxes = all_boxes[entry['image']]\n \n cls_key = entry['image'].replace('.jpg','').split('/')[-1]\n\n # print(cls_key)\n\n if boxes['scores'] is not None:\n if test_corloc:\n # print(\"corlooking\")\n _, _, cls_boxes_i = box_results_for_corloc(boxes['scores'], boxes['boxes'])\n else:\n _, _, cls_boxes_i = box_results_with_nms_and_limit(boxes['scores'], boxes['boxes'])\n\n extend_results(i, final_boxes, cls_boxes_i)\n else:\n final_boxes = None\n \n results = task_evaluation.evaluate_all(dataset, final_boxes, output_dir, test_corloc, use_matlab = use_matlab)\n return results", "def main(seed, filter_, num_classes, setup, model_name, images_dir, precision_mode, test):\n f1, f2 = filter_\n model_name = 'flex_random_seed_{}_resnet_manual_highres_center_only_f1_{}_f2_{}'.format(seed, f1, f2)\n frozen_graph_filepath = './Models/Frozen_graphs/{}_{}/'.format(f1,f2) + model_name + '_frozen_graph.pb'\n frozen_graph, x_tensor, y_tensor = trt_frozen_graph_and_tensors(\n model_name=model_name, \n frozen_graph_filepath=frozen_graph_filepath, \n precision_mode=precision_mode\n )\n\n elapsed_time_full_dataset = []\n sum_of_confusion_matrices = np.zeros((6, 6))\n \n with tf.compat.v1.Session(graph=frozen_graph) as sess:\n for image_file in [img for img in os.listdir(images_dir) if img.endswith('.JPG')]:\n\n img = Image.open(images_dir + image_file)\n sx,sy = img.size\n\n print(\"Image size is %i x %i\" % (sx,sy)) # sx = 4912, sy = 3264\n print(\"Loading image %s\" % image_file)\n\n img_np = np.array(img)/255.0\n del img\n\n print(\"Predicting for image %s (%i x %i pixel)\" % (image_file,sx,sy))\n\n start = time.time()\n predictions_flex = sess.run(y_tensor, feed_dict={x_tensor:np.expand_dims(img_np, 0)})\n elapsed = time.time() - start\n elapsed_time_full_dataset.append(elapsed)\n del img_np #deleting afterwards to not take the deleting time into account\n\n print(\"Prediction took %f seconds (inference on full image)\" % elapsed)\n print(\"Merging predictions\")\n # merge the predictions on the quarter images\n predictions_flex_combined = np.zeros(predictions_flex.shape)\n\n elapsed = time.time()-start\n if embedded_version:\n print(\"Prediction took %f seconds (inference on split up image)\" % elapsed)\n\n if embedded_version:\n predictions_flex = predictions_flex_combined\n\n if save_annotations:\n print(\"Computing annotations...\")\n annotations = []\n d = 4\n for x in range(100, sx-101, d):\n for y in range(100, sy-101, d):\n x0 = int(round(float(x-100)/4) + 15)\n y0 = int(round(float(y-100)/4) + 15)\n probs_flex = np.squeeze(predictions_flex[0, y0, x0, :])\n annotations.append((probs_flex, x, y))\n\n if test: # add a prefix for test to not replace real experiments\n model_name = 'TEST_' + model_name\n\n # saving annotations\n annotation_dir = images_dir.replace('Data', 'Results/seeds/annotations_trt') + image_file\n annotate_and_save(annotations, d, annotation_dir, model_name, precision_mode)\n classes_image = annotate_and_save_per_class(\n annotations, \n d, \n annotation_dir, \n model_name, \n precision_mode\n )\n\n labels = load_labels(annotation_dir)\n confusion_matrix = np.zeros((num_classes, num_classes))\n for (c_name, x, y) in labels:\n if 100 <= x < sx-101 and 100 <= y < sy-101:\n x0 = int(round(float(x-100)/4) + 15 )\n y0 = int(round(float(y-100)/4) + 15)\n probs_flex = np.squeeze(predictions_flex[0, y0, x0, :])\n\n predicted_class = np.argmax(probs_flex)\n c = train_model.get_classes().index(c_name)\n confusion_matrix[c, predicted_class] += 1\n print(confusion_matrix)\n sum_of_confusion_matrices += confusion_matrix\n\n print(sum_of_confusion_matrices)\n sum_of_cm_fp = './Results/seeds/preds_trt/{}/{}_{}/sum_of_cm_'\\\n .format(precision_mode.lower(), f1,f2) + model_name + '_fp32.npy'\n elapsed_time_fp = './Results/seeds/elapsed_trt/{}/{}_{}/time_taken_'\\\n .format(precision_mode.lower(), f1,f2) + model_name + '_fp32.npy'\n\n\n np.save(sum_of_cm_fp, sum_of_confusion_matrices)\n np.save(elapsed_time_fp, elapsed_time_full_dataset)\n tf.reset_default_graph()", "def generate_mnist_datasets(\n datapoints_per_task,\n K_list,\n cir_inner_loop_list, \n test_task_idx, \n val_task_idx,\n n_finetune_sets):\n\n # arbitrarily chosen, class-imbalance rate in outer and inner training loops\n cir_outer_loop = 0.5\n cir_inner_loop = 0.5\n # class-imbalance rate in the test sets of the test and validation tasks\n cir_test = 0.5\n # arbitrarily chosen, percentage of data that will be used in the inner training loop\n percent_data_inner_loop = 0.5\n\n percent_data_finetune_val = 0.8\n\n n_test_set = 4000\n\n test_task_idx, val_task_idx = test_task_idx, val_task_idx\n\n finetune_sets_per_K_cir = {}\n test_task_test_set, val_task = {}, {}\n \n\n train_task_list_inner, train_task_list_outer = [], []\n\n train_tasks_idxs = [i for i in range(0,10) if i not in [val_task_idx, test_task_idx]]\n\n base_path = '/home/USER/Documents'\n if (not (os.path.exists(base_path))):\n base_path = '/home/ubuntu/Projects'\n train_images, train_labels = loadlocal_mnist(\n images_path= base_path + '/MAML/raw_data/MNIST_data/train-images-idx3-ubyte', \n labels_path= base_path + '/MAML/raw_data/MNIST_data/train-labels-idx1-ubyte')\n\n test_images, test_labels = loadlocal_mnist(\n images_path= base_path + '/MAML/raw_data/MNIST_data/t10k-images-idx3-ubyte', \n labels_path= base_path + '/MAML/raw_data/MNIST_data/t10k-labels-idx1-ubyte')\n\n\n train_images, test_images = train_images.reshape((-1,28,28))/255.0, test_images.reshape((-1,28,28))/255.0\n images = np.concatenate((train_images, test_images))\n labels = np.concatenate((train_labels, test_labels))\n\n test_task_normal_indexes, val_task_normal_indexes = list(np.nonzero(labels == test_task_idx)[0]), list(np.nonzero(train_labels == val_task_idx)[0])\n test_task_X_normal, val_task_X_normal = images[test_task_normal_indexes],train_images[val_task_normal_indexes]\n test_task_Y_normal, val_task_Y_normal = np.zeros_like(labels[test_task_normal_indexes]), np.zeros_like(train_labels[val_task_normal_indexes])\n\n\n # val and test task have anomalies (samples of other numbers) that are not used for training\n # besides the two sets of anomalies (one for val task and one for test task are disjoint)\n test_task_anomalous_indexes = list(np.nonzero(test_labels[:5000] != test_task_idx)[0])\n val_task_anomalous_indexes= [index for index, element in enumerate(list(test_labels[5000:])) if element not in [val_task_idx, test_task_idx]]\n\n\n test_task_X_anomalous, val_task_X_anomalous = test_images[:5000][test_task_anomalous_indexes],test_images[5000:][val_task_anomalous_indexes]\n test_task_Y_anomalous, val_task_Y_anomalous = np.ones_like(test_labels[:5000][test_task_anomalous_indexes]), np.ones_like(test_labels[5000:][val_task_anomalous_indexes])\n\n test_task_X, val_task_X = np.concatenate((test_task_X_normal, test_task_X_anomalous)), np.concatenate((val_task_X_normal, val_task_X_anomalous))\n test_task_Y, val_task_Y = np.expand_dims(np.concatenate((test_task_Y_normal, test_task_Y_anomalous)),-1), np.expand_dims(np.concatenate((val_task_Y_normal, val_task_Y_anomalous)),-1)\n\n\n train_tasks_X_list, train_tasks_Y_list = [], []\n for task_idx in train_tasks_idxs:\n train_task_normal_indexes = list(np.nonzero(train_labels == task_idx)[0]) \n train_task_anomalous_indexes = [index for index, element in enumerate(list(train_labels)) if element not in [task_idx, val_task_idx, test_task_idx]]\n assert(len(np.nonzero(train_labels[train_task_anomalous_indexes] == val_task_idx)[0]) == 0)\n assert(len(np.nonzero(train_labels[train_task_anomalous_indexes] == test_task_idx)[0]) == 0)\n train_task_X_normal, train_task_X_anomalous = train_images[train_task_normal_indexes], train_images[train_task_anomalous_indexes]\n train_task_Y_normal, train_task_Y_anomalous = np.zeros_like(train_labels[train_task_normal_indexes]), np.ones_like(train_labels[train_task_anomalous_indexes])\n train_task_X, train_task_Y = np.concatenate((train_task_X_normal, train_task_X_anomalous)), np.concatenate((train_task_Y_normal, train_task_Y_anomalous))\n train_tasks_X_list.append(train_task_X)\n train_tasks_Y_list.append(np.expand_dims(train_task_Y,-1))\n\n\n\n # building test task sets of data\n normal_indexes, anomaly_indexes = list(np.nonzero(test_task_Y == 0)[0]), list(np.nonzero(test_task_Y == 1)[0])\n n_test_set_normal = int(n_test_set*cir_test)\n test_set_normal_indexes = random.sample(normal_indexes, n_test_set_normal)\n test_set_anomaly_indexes = random.sample(anomaly_indexes, n_test_set - n_test_set_normal)\n test_set_indexes = []\n test_set_indexes += test_set_normal_indexes\n test_set_indexes += test_set_anomaly_indexes\n\n test_task_test_set['test_X'], test_task_test_set['test_Y'] = test_task_X[test_set_indexes], test_task_Y[test_set_indexes]\n\n\n #shuffle\n s_test = np.arange(test_task_test_set['test_X'].shape[0])\n np.random.shuffle(s_test)\n test_task_test_set['test_X'], test_task_test_set['test_Y'] = test_task_test_set['test_X'][s_test], test_task_test_set['test_Y'][s_test]\n\n rest_normal_indexes = [index for index in normal_indexes if index not in test_set_normal_indexes]\n rest_anomaly_indexes = [index for index in anomaly_indexes if index not in test_set_anomaly_indexes]\n\n\n for K in K_list:\n finetune_sets_per_cir = {}\n for cir in cir_inner_loop_list:\n\n rest_normal_indexes = [index for index in normal_indexes if index not in test_set_normal_indexes]\n rest_anomaly_indexes = [index for index in anomaly_indexes if index not in test_set_anomaly_indexes]\n \n finetune_sets_list = []\n\n disjoint = False\n if(cir*K*n_finetune_sets<len(rest_normal_indexes)):\n disjoint = True\n\n n_finetune_normal = int(K*cir)\n n_finetune_anomaly = K - n_finetune_normal\n for i in range(n_finetune_sets):\n # if enough for disjoint do that\n # else sample randomly\n # store in a dict with keys cir_K\n finetune_normal_indexes = random.sample(rest_normal_indexes, n_finetune_normal)\n finetune_anomaly_indexes = random.sample(rest_anomaly_indexes, n_finetune_anomaly)\n finetune_indexes = []\n finetune_indexes += finetune_normal_indexes\n finetune_indexes += finetune_anomaly_indexes\n finetune_set = {}\n finetune_set['finetune_X'], finetune_set['finetune_Y'] = test_task_X[finetune_indexes], test_task_Y[finetune_indexes]\n\n #shuffle\n s_finetune = np.arange(finetune_set['finetune_X'].shape[0])\n np.random.shuffle(s_finetune)\n finetune_set['finetune_X'], finetune_set['finetune_Y'] = finetune_set['finetune_X'][s_finetune], finetune_set['finetune_Y'][s_finetune]\n\n finetune_sets_list.append(finetune_set)\n \n if(disjoint):\n rest_normal_indexes = [index for index in rest_normal_indexes if index not in finetune_normal_indexes]\n rest_anomaly_indexes = [index for index in rest_anomaly_indexes if index not in finetune_anomaly_indexes]\n\n finetune_sets_per_cir[str(cir)] = finetune_sets_list\n\n finetune_sets_per_K_cir[str(K)] = finetune_sets_per_cir\n\n\n #building val task sets of data\n normal_indexes, anomaly_indexes = list(np.nonzero(val_task_Y == 0)[0]), list(np.nonzero(val_task_Y == 1)[0])\n n_val_finetune = int(percent_data_finetune_val*datapoints_per_task)\n n_val_test_set = datapoints_per_task - n_val_finetune\n n_val_test_set_normal = int(n_val_test_set*cir_test)\n val_test_set_normal_indexes = random.sample(normal_indexes, n_val_test_set_normal)\n\n\n val_test_set_anomaly_indexes = random.sample(anomaly_indexes, n_val_test_set - n_val_test_set_normal)\n val_test_set_indexes = []\n val_test_set_indexes += val_test_set_normal_indexes\n val_test_set_indexes += val_test_set_anomaly_indexes\n val_task['test_X'], val_task['test_Y'] = val_task_X[val_test_set_indexes], val_task_Y[val_test_set_indexes]\n\n\n rest_normal_indexes = [index for index in normal_indexes if index not in val_test_set_normal_indexes]\n rest_anomaly_indexes = [index for index in anomaly_indexes if index not in val_test_set_anomaly_indexes]\n\n n_val_finetune_normal = int(n_val_finetune*cir_inner_loop)\n val_finetune_normal_indexes = random.sample(rest_normal_indexes, n_val_finetune_normal)\n val_finetune_anomaly_indexes = random.sample(rest_anomaly_indexes, n_val_finetune - n_val_finetune_normal)\n val_finetune_indexes = []\n val_finetune_indexes += val_finetune_normal_indexes\n val_finetune_indexes += val_finetune_anomaly_indexes\n\n val_task['finetune_X'], val_task['finetune_Y'] = val_task_X[val_finetune_indexes], val_task_Y[val_finetune_indexes]\n\n #shuffle\n s_val_finetune = np.arange(val_task['finetune_X'].shape[0])\n s_val_test = np.arange(val_task['test_X'].shape[0])\n np.random.shuffle(s_val_finetune)\n np.random.shuffle(s_val_test)\n\n val_task['finetune_X'], val_task['finetune_Y'] = val_task['finetune_X'][s_val_finetune], val_task['finetune_Y'][s_val_finetune]\n val_task['test_X'], val_task['test_Y'] = val_task['test_X'][s_val_test], val_task['test_Y'][s_val_test]\n\n\n\n # building sets of data of the training tasks\n for task_X, task_Y in zip(train_tasks_X_list, train_tasks_Y_list):\n normal_indexes, anomaly_indexes = list(np.nonzero(task_Y == 0)[0]), list(np.nonzero(task_Y == 1)[0])\n\n n_inner_loop = int(percent_data_inner_loop*datapoints_per_task)\n n_inner_loop_normal = int(n_inner_loop*cir_inner_loop)\n n_outer_loop = datapoints_per_task - n_inner_loop\n n_outer_loop_normal = int(n_outer_loop*cir_outer_loop)\n \n inner_loop_normal_indexes = random.sample(normal_indexes, n_inner_loop_normal)\n inner_loop_anomaly_indexes = random.sample(anomaly_indexes, n_inner_loop - n_inner_loop_normal)\n inner_loop_indexes = []\n inner_loop_indexes += inner_loop_normal_indexes\n inner_loop_indexes += inner_loop_anomaly_indexes\n\n train_task_inner_X, train_task_inner_Y = task_X[inner_loop_indexes], task_Y[inner_loop_indexes]\n\n rest_normal_indexes = [index for index in normal_indexes if index not in inner_loop_normal_indexes]\n rest_anomaly_indexes = [index for index in anomaly_indexes if index not in inner_loop_anomaly_indexes]\n\n \n outer_loop_normal_indexes = random.sample(rest_normal_indexes, n_outer_loop_normal)\n outer_loop_anomaly_indexes = random.sample(rest_anomaly_indexes, n_outer_loop - n_outer_loop_normal)\n outer_loop_indexes = []\n outer_loop_indexes += outer_loop_normal_indexes\n outer_loop_indexes += outer_loop_anomaly_indexes\n\n train_task_outer_X, train_task_outer_Y = task_X[outer_loop_indexes], task_Y[outer_loop_indexes]\n\n\n s_inner = np.arange(train_task_inner_X.shape[0])\n s_outer = np.arange(train_task_outer_X.shape[0])\n np.random.shuffle(s_inner)\n np.random.shuffle(s_outer)\n train_task_list_inner.append([train_task_inner_X[s_inner],train_task_inner_Y[s_inner]])\n train_task_list_outer.append([train_task_outer_X[s_outer],train_task_outer_Y[s_outer]])\n\n\n\n train_tasks_inner_X = np.stack([train_task_list_inner[i][0]\n for i in range(len(train_task_list_inner))], 0)\n train_tasks_inner_Y = np.stack([train_task_list_inner[i][1]\n for i in range(len(train_task_list_inner))], 0)\n train_tasks_outer_X = np.stack([train_task_list_outer[i][0]\n for i in range(len(train_task_list_outer))], 0)\n train_tasks_outer_Y = np.stack([train_task_list_outer[i][1]\n for i in range(len(train_task_list_outer))], 0)\n\n \n train_tasks = {'X_train_inner': train_tasks_inner_X,\n 'Y_train_inner': train_tasks_inner_Y,\n 'X_train_outer': train_tasks_outer_X,\n 'Y_train_outer': train_tasks_outer_Y\n }\n\n\n return train_tasks, val_task, test_task_test_set, finetune_sets_per_K_cir", "def test_multitask_early_fusion(self):\n args = BASE_ARGS.copy()\n args.update(MULTITASK_ARGS)\n args.update(EARLY_FUSION_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 5.0, 'failed to train image_seq2seq on image+text task'\n )", "def train(hparams, summary_dir, num_gpus, model_type, max_steps, save_step,\n data_dir, num_targets, dataset, validate, seed, shuffled, shift,\n pad, batch_size=128):\n summary_dir += '/train/'\n with tf.Graph().as_default():\n # Build model\n features = get_features('train', batch_size, num_gpus, data_dir,\n num_targets, dataset, validate, evaluate=False,\n seed=seed, shuffled=shuffled, shift=shift,\n pad=pad)\n model = models[model_type](hparams)\n result, _ = model.multi_gpu(features, num_gpus)\n # Print stats\n param_stats = tf.profiler.profile(\n tf.get_default_graph(),\n options=tf.contrib.tfprof.model_analyzer.\n TRAINABLE_VARS_PARAMS_STAT_OPTIONS)\n sys.stdout.write('total_params: %d\\n' % param_stats.total_parameters)\n writer = tf.summary.FileWriter(summary_dir)\n run_experiment(load_training, summary_dir, writer, train_experiment,\n model, result, max_steps, save_step)\n writer.close()", "def main():\r\n # assert tf.__version__[0] == \"2\"\r\n\r\n \"\"\" Load Config \"\"\"\r\n with open('./config/config_origin.json', 'r') as f:\r\n CONFIG = json.load(f)\r\n BATCH_SIZE = CONFIG[\"BATCH_SIZE\"]\r\n ROOT_PATH = CONFIG[\"ROOT_PATH\"]\r\n TRAIN_DATA_DIR = CONFIG[\"TRAIN_DATA_DIR\"]\r\n TEST_DATA_DIR = CONFIG[\"TEST_DATA_DIR\"]\r\n TRAIN_DATA_DIR = os.path.join(ROOT_PATH, TRAIN_DATA_DIR)\r\n TEST_DATA_DIR = os.path.join(ROOT_PATH, TEST_DATA_DIR)\r\n MODEL_CKPT = CONFIG[\"MODEL_CKPT\"]\r\n\r\n \"\"\" Prepare Model \"\"\"\r\n n = 6 # order of ResNetv2\r\n version = 2\r\n depth = model_depth(n, version)\r\n MODEL_TYPE = 'ResNet%dv%d' % (depth, version)\r\n SAVES_DIR = \"models-%s/\" % MODEL_TYPE\r\n SAVES_DIR = os.path.join(ROOT_PATH, SAVES_DIR)\r\n MODEL_CKPT = os.path.join(SAVES_DIR, MODEL_CKPT)\r\n\r\n # Features directory\r\n FEATURE_DIR = os.path.join(ROOT_PATH, \"features\")\r\n FEATURE_DIR = os.path.join(FEATURE_DIR, \"models-%s/\" % MODEL_TYPE)\r\n if not os.path.exists(FEATURE_DIR):\r\n os.mkdir(FEATURE_DIR)\r\n\r\n if not os.path.exists(SAVES_DIR):\r\n os.mkdir(SAVES_DIR)\r\n model = resnet_v2(input_shape=INPUT_SHAPE, depth=depth, num_classes=2)\r\n model.compile(loss='categorical_crossentropy',\r\n optimizer=Adam(learning_rate=lr_schedule(TRAINING_EPOCHS)),\r\n metrics=METRICS)\r\n # model.summary()\r\n print(MODEL_TYPE)\r\n\r\n \"\"\" Load Weights \"\"\"\r\n model_ckpt_file = os.path.join(SAVES_DIR, MODEL_CKPT)\r\n if os.path.exists(model_ckpt_file):\r\n print(\"Model ckpt found! Loading...:%s\" % model_ckpt_file)\r\n model.load_weights(model_ckpt_file)\r\n\r\n \"\"\" Extract Testing Data \"\"\"\r\n _train_filenames = os.listdir(os.path.join(TRAIN_DATA_DIR, \"bad_1\"))\r\n train_bad_df = pd.DataFrame({\r\n 'filename': _train_filenames\r\n })\r\n n_bad_samples = train_bad_df.shape[0]\r\n train_bad_df.to_csv(os.path.join(\r\n FEATURE_DIR, \"bad_samples_list.csv\"), index=False)\r\n\r\n \"\"\" Extract good samples \"\"\"\r\n _train_filenames = os.listdir(os.path.join(TRAIN_DATA_DIR, \"good_0\"))\r\n train_good_df = pd.DataFrame({\r\n 'filename': _train_filenames\r\n })\r\n n_good_samples = train_good_df.shape[0]\r\n train_good_df.to_csv(os.path.join(\r\n FEATURE_DIR, \"good_samples_list.csv\"), index=False)\r\n\r\n \"\"\" Create bad sample validation generator \"\"\"\r\n train_bad_datagen = ImageDataGenerator(rescale=1./255)\r\n train_bad_generator = train_bad_datagen.flow_from_dataframe(\r\n train_bad_df,\r\n os.path.join(TRAIN_DATA_DIR, \"bad_1\"),\r\n x_col='filename',\r\n y_col=None,\r\n class_mode=None,\r\n target_size=IMAGE_SIZE,\r\n color_mode=\"grayscale\",\r\n batch_size=BATCH_SIZE,\r\n shuffle=False\r\n )\r\n\r\n \"\"\" Create bad sample validation generator \"\"\"\r\n train_good_datagen = ImageDataGenerator(rescale=1./255)\r\n train_good_generator = train_good_datagen.flow_from_dataframe(\r\n train_good_df,\r\n os.path.join(TRAIN_DATA_DIR, \"good_0\"),\r\n x_col='filename',\r\n y_col=None,\r\n class_mode=None,\r\n target_size=IMAGE_SIZE,\r\n color_mode=\"grayscale\",\r\n batch_size=BATCH_SIZE,\r\n shuffle=False\r\n )\r\n\r\n \"\"\" Extractor \"\"\"\r\n extractor = Model(\r\n model.inputs, model.layers[-2].output) # flatten_2 (Flatten) (None, 12544)\r\n # features = extractor.predict(data)\r\n\r\n \"\"\" Extract train set 的特征 \"\"\"\r\n import time\r\n # bad samples\r\n start = time.perf_counter()\r\n print(\"Start extracting bad samples...\")\r\n features = extractor.predict_generator(\r\n train_bad_generator, steps=np.ceil(n_bad_samples / BATCH_SIZE),\r\n workers=4, verbose=1)\r\n print(\"features.shape:\", features.shape) # (16/32/etc, 12544)\r\n np.save(os.path.join(FEATURE_DIR, \"features_train_bad.npy\"), features)\r\n\r\n elapsed = (time.perf_counter() - start)\r\n print(\"Prediction time used:\", elapsed)\r\n # TODO 用 pandas 存储\r\n # good samples\r\n start = time.perf_counter()\r\n print(\"Start extracting good samples...\")\r\n features = extractor.predict_generator(\r\n train_good_generator, steps=np.ceil(n_good_samples / BATCH_SIZE),\r\n workers=4, verbose=1)\r\n print(\"features.shape:\", features.shape) # (16/32/etc, 12544)\r\n np.save(os.path.join(FEATURE_DIR, \"features_train_good.npy\"), features)\r\n\r\n elapsed = (time.perf_counter() - start)\r\n print(\"Prediction time used:\", elapsed)", "def test_image_task(self):\n args = BASE_ARGS.copy()\n args.update(IMAGE_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 8.6, 'failed to train image_seq2seq on image task'\n )", "def mini_imagenet_tasksets(\n train_ways=5,\n train_samples=10,\n test_ways=5,\n test_samples=10,\n root='~/data',\n data_augmentation=None,\n device=None,\n **kwargs,\n):\n if data_augmentation is None:\n train_data_transforms = None\n test_data_transforms = None\n elif data_augmentation == 'normalize':\n train_data_transforms = Compose([\n lambda x: x / 255.0,\n ])\n test_data_transforms = train_data_transforms\n elif data_augmentation == 'lee2019':\n normalize = Normalize(\n mean=[120.39586422/255.0, 115.59361427/255.0, 104.54012653/255.0],\n std=[70.68188272/255.0, 68.27635443/255.0, 72.54505529/255.0],\n )\n train_data_transforms = Compose([\n ToPILImage(),\n RandomCrop(84, padding=8),\n ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),\n RandomHorizontalFlip(),\n ToTensor(),\n normalize,\n ])\n test_data_transforms = Compose([\n normalize,\n ])\n else:\n raise ValueError('Invalid data_augmentation argument.')\n\n train_dataset = l2l.vision.datasets.MiniImagenet(\n root=root,\n mode='train',\n download=True,\n )\n valid_dataset = l2l.vision.datasets.MiniImagenet(\n root=root,\n mode='validation',\n download=True,\n )\n test_dataset = l2l.vision.datasets.MiniImagenet(\n root=root,\n mode='test',\n download=True,\n )\n if device is None:\n train_dataset.transform = train_data_transforms\n valid_dataset.transform = test_data_transforms\n test_dataset.transform = test_data_transforms\n else:\n train_dataset = l2l.data.OnDeviceDataset(\n dataset=train_dataset,\n transform=train_data_transforms,\n device=device,\n )\n valid_dataset = l2l.data.OnDeviceDataset(\n dataset=valid_dataset,\n transform=test_data_transforms,\n device=device,\n )\n test_dataset = l2l.data.OnDeviceDataset(\n dataset=test_dataset,\n transform=test_data_transforms,\n device=device,\n )\n train_dataset = l2l.data.MetaDataset(train_dataset)\n valid_dataset = l2l.data.MetaDataset(valid_dataset)\n test_dataset = l2l.data.MetaDataset(test_dataset)\n\n train_transforms = [\n NWays(train_dataset, train_ways),\n KShots(train_dataset, train_samples),\n LoadData(train_dataset),\n RemapLabels(train_dataset),\n ConsecutiveLabels(train_dataset),\n ]\n valid_transforms = [\n NWays(valid_dataset, test_ways),\n KShots(valid_dataset, test_samples),\n LoadData(valid_dataset),\n ConsecutiveLabels(valid_dataset),\n RemapLabels(valid_dataset),\n ]\n test_transforms = [\n NWays(test_dataset, test_ways),\n KShots(test_dataset, test_samples),\n LoadData(test_dataset),\n RemapLabels(test_dataset),\n ConsecutiveLabels(test_dataset),\n ]\n\n _datasets = (train_dataset, valid_dataset, test_dataset)\n _transforms = (train_transforms, valid_transforms, test_transforms)\n return _datasets, _transforms", "def run_fn(fn_args: TrainerFnArgs):\n tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)\n\n train_dataset = _input_fn(\n fn_args.train_files,\n tf_transform_output,\n constants.TRAIN_BATCH_SIZE,\n is_train=True\n )\n\n eval_dataset = _input_fn(\n fn_args.eval_files,\n tf_transform_output,\n constants.EVAL_BATCH_SIZE,\n is_train=False\n )\n\n # # check for availabe tpu and gpu units\n # try:\n # tpu = tf.distribute.cluster_resolver.TPUClusterResolver()\n # tf.config.experimental_connect_to_cluster(tpu)\n # tf.tpu.experimental.initialize_tpu_system(tpu)\n # strategy = tf.distribute.experimental.TPUStrategy(tpu)\n # except ValueError:\n # strategy = tf.distribute.MirroredStrategy()\n\n # with strategy.scope():\n model = get_model(fn_args)\n\n try:\n log_dir = fn_args.model_run_dir\n except KeyError:\n log_dir = os.path.join(os.path.dirname(fn_args.serving_model_dir), \"logs\")\n\n absl.logging.info('Tensorboard logging to {}'.format(log_dir))\n\n callbacks = [\n # tf.keras.callbacks.ModelCheckpoint(\"DeepLabV3plus.ckpt\", verbose=1, save_weights_only=True, save_best_only=True),\n tf.keras.callbacks.ReduceLROnPlateau(monitor=\"iou_score\", factor=0.2, patience=6, verbose=1, mode=\"max\"),\n tf.keras.callbacks.EarlyStopping(monitor=\"iou_score\", patience=16, mode=\"max\", verbose=1, restore_best_weights=True),\n tf.keras.callbacks.TensorBoard(log_dir=log_dir, update_freq=\"batch\")\n ]\n\n absl.logging.info('Start training the top classifier')\n \n model.fit(\n train_dataset,\n epochs=constants.EPOCHS,\n steps_per_epoch=fn_args.train_steps,\n validation_data=eval_dataset,\n validation_steps=fn_args.eval_steps,\n callbacks=callbacks\n )\n\n signatures = {\n 'serving_default':\n _get_serve_image_fn(model).get_concrete_function(\n tf.TensorSpec(\n shape=[None, constants.HEIGHT, constants.WIDTH, 3],\n dtype=tf.float32,\n name=_transformed_name(constants.IMAGE_KEY)\n )\n )\n }\n\n model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)", "def meta_train(tasks, model, args, device, method='random', meta_iters=10000, num_updates=5, meta_batch_size=5):\n # Define logging\n os.makedirs(args.save_path, exist_ok=True)\n writer = SummaryWriter(\n os.path.join(args.save_path, 'runs', '{}'.format(datetime.now()).replace(\":\", \"_\")))\n\n header = ' Time Task Iteration Loss Accuracy'\n log_template = '{:>10} {:>25} {:10.0f} {:10.6f} {:10.6f}'\n test_template = 'Test mean: {}, Test std: {}'\n\n print(header)\n start = time.time()\n\n # Define optimizers, lr schedulers and loss function\n optimizer_bert = AdamW(params=model.proto_net.encoder.bert.parameters(), lr=args.bert_lr)\n optimizer = optim.Adam(params=chain(model.proto_net.encoder.mlp.parameters(),\n model.output_layer.parameters()),\n lr=args.lr)\n scheduler_bert = get_cosine_schedule_with_warmup(optimizer_bert, 200, meta_iters)\n scheduler = get_cosine_schedule_with_warmup(optimizer, 0, meta_iters)\n # ProtoNets always have CrossEntropy loss due to softmax output\n cross_entropy = nn.CrossEntropyLoss()\n\n print('Loading Tokenizer..')\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)\n special_tokens_dict = {'additional_special_tokens': [\"[MNT]\", \"[URL]\"]}\n\n num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)\n print('We have added', num_added_toks, 'tokens')\n model.proto_net.encoder.bert.resize_token_embeddings(len(tokenizer))\n # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.\n\n # setup task sampler and task model\n sampler = TaskSampler(tasks, method=method, custom_task_ratio=args.custom_task_ratio, supp_query_split=True)\n task_model = type(model)(args)\n task_model.proto_net.encoder.bert.resize_token_embeddings(len(tokenizer))\n\n iterations = 0\n # Iterate over the data\n train_iter = sampler.get_iter('train', tokenizer, batch_size=args.batch_size, shuffle=True)\n model.train()\n\n # setup validation task and episodes for evaluation\n val_task = get_validation_task(args)\n episodes = torch.load(args.episodes)\n\n # dummy data to overwrite old values of task model output layer\n dummy_w = torch.randn((args.mlp_dims[-1], 2))\n dummy_b = torch.randn(2)\n\n average_query_loss = 0\n best_query_loss = 1e+9\n best_test_mean = -1\n best_test_last = -1\n convergence_tolerance_cnt = 0\n # outer loop (meta-iterations)\n for i in range(meta_iters):\n grads = []\n task_losses_inner = {}\n task_accuracies_inner = {}\n task_losses_outer = {}\n task_accuracies_outer = {}\n # inner loop (sample different tasks)\n for task_sample in range(meta_batch_size):\n # clone original model\n task_model.proto_net.load_state_dict(model.proto_net.state_dict())\n task_model.initialize_classifier(nn.Parameter(dummy_w), nn.Parameter(dummy_b), hard_replace=True)\n task_model.to(device)\n task_model.train()\n\n # new optimizer for every new task model\n task_optimizer_bert = optim.SGD(params=task_model.proto_net.encoder.bert.parameters(), lr=args.bert_lr)\n task_optimizer = optim.SGD(params=chain(task_model.proto_net.encoder.mlp.parameters(),\n task_model.output_layer.parameters()),\n lr=args.inner_lr)\n\n # prepare support and query set\n batch = next(train_iter)\n support = batch[:3]\n query = batch[3:]\n\n # setup output layer (via meta-model's prototype network)\n proto_embeddings = model.proto_net(support[0].to(device), attention_mask=support[2].to(device))\n prototypes = model.proto_net.calculate_centroids((proto_embeddings, support[1]), sampler.get_num_classes())\n W, b = task_model.calculate_output_params(prototypes.detach())\n task_model.initialize_classifier(W, b)\n\n # train some iterations on support set\n for update in range(num_updates):\n task_optimizer_bert.zero_grad()\n task_optimizer.zero_grad()\n predictions = task_model(support[0].to(device), attention_mask=support[2].to(device))\n task_loss = cross_entropy(predictions, support[1].long().squeeze().to(device))\n task_loss.backward()\n task_optimizer.step()\n task_optimizer_bert.step()\n\n # record task losses and accuracies for logging\n task_losses_inner[sampler.get_name()] = task_loss.item()\n task_accuracies_inner[sampler.get_name()] = sampler.calculate_accuracy(predictions, support[1].to(device))\n\n # trick to add prototypes back to computation graph\n W = 2 * prototypes + (W - 2 * prototypes).detach()\n b = -prototypes.norm(dim=1)**2 + (b + prototypes.norm(dim=1)**2).detach()\n task_model.initialize_classifier(W, b, hard_replace=True)\n\n # calculate gradients for meta update on the query set\n predictions = task_model(query[0].to(device), attention_mask=query[2].to(device))\n query_loss = cross_entropy(predictions, query[1].long().squeeze().to(device))\n query_loss.backward()\n\n # record task losses and accuracies for logging\n task_losses_outer[sampler.get_name()] = query_loss.item()\n task_accuracies_outer[sampler.get_name()] = sampler.calculate_accuracy(predictions, query[1].to(device))\n average_query_loss += query_loss.item()\n\n # register W and b parameters again to avoid error in weight update\n W = nn.Parameter(W)\n b = nn.Parameter(b)\n task_model.initialize_classifier(W, b, hard_replace=True)\n\n # save gradients of first task model\n if task_sample == 0:\n for param in task_model.parameters():\n if param.requires_grad and param.grad is not None:\n grads.append(param.grad.clone())\n # add the gradients of all task samples\n else:\n p = 0\n for param in task_model.parameters():\n if param.requires_grad and param.grad is not None:\n grads[p] += param.grad.clone()\n p += 1\n\n # perform meta update\n # first load/add the calculated gradients in the meta-model\n # (already contains gradients from prototype calculation)\n p = 0\n for param in model.parameters():\n if param.requires_grad and param.grad is not None:\n param.grad += grads[p]\n p += 1\n # update model parameters according to the gradients from inner loop (clear gradients afterwards)\n optimizer.step()\n optimizer_bert.step()\n scheduler.step()\n scheduler_bert.step()\n optimizer.zero_grad()\n optimizer_bert.zero_grad()\n\n iterations += 1\n if iterations % args.log_every == 0:\n average_query_loss /= (args.log_every*meta_batch_size)\n iter_loss = sum(task_losses_outer.values()) / len(task_losses_outer.values())\n iter_acc = sum(task_accuracies_outer.values()) / len(task_accuracies_outer.values())\n writer.add_scalar('Meta_Average/Loss/outer'.format(sampler.get_name()), iter_loss, iterations)\n writer.add_scalar('Meta_Average/Accuracy/outer'.format(sampler.get_name()), iter_acc, iterations)\n for t in tasks:\n task_name = t.get_name()\n if task_name in task_losses_inner.keys():\n writer.add_scalar('{}/Loss/inner'.format(task_name), task_losses_inner[task_name], iterations)\n writer.add_scalar('{}/Accuracy/inner'.format(task_name), task_accuracies_inner[task_name], iterations)\n writer.add_scalar('{}/Loss/outer'.format(task_name), task_losses_outer[task_name], iterations)\n writer.add_scalar('{}/Accuracy/outer'.format(task_name), task_accuracies_outer[task_name], iterations)\n print(log_template.format(\n str(timedelta(seconds=int(time.time() - start))),\n sampler.get_name(),\n iterations,\n iter_loss,\n iter_acc))\n\n # save best snapshot\n if average_query_loss < best_query_loss:\n best_query_loss = average_query_loss\n average_query_loss = 0\n snapshot_prefix = os.path.join(args.save_path, 'best_query')\n snapshot_path = (\n snapshot_prefix +\n '_loss_{:.5f}_iter_{}_model.pt'\n ).format(best_query_loss, iterations)\n model.save_model(snapshot_path)\n # Keep only the best snapshot\n for f in glob.glob(snapshot_prefix + '*'):\n if f != snapshot_path:\n os.remove(f)\n\n # evaluate in k shot fashion\n if iterations % args.eval_every == 0:\n task_model.proto_net.load_state_dict(model.proto_net.state_dict())\n task_model.initialize_classifier(nn.Parameter(dummy_w), nn.Parameter(dummy_b), hard_replace=True)\n test_mean, test_std = k_shot_testing(task_model, episodes, val_task, device, num_updates=args.inner_updates,\n num_test_batches=args.num_test_batches)\n writer.add_scalar('{}/Acc'.format(val_task.get_name()), test_mean, iterations)\n writer.add_scalar('{}/STD'.format(val_task.get_name()), test_std, iterations)\n print(test_template.format(test_mean, test_std), flush=True)\n if test_mean > best_test_mean:\n best_test_mean = test_mean\n snapshot_prefix = os.path.join(args.save_path, 'best_test_{}'.format(val_task.get_name()))\n snapshot_path = (\n snapshot_prefix +\n '_acc_{:.5f}_iter_{}_model.pt'\n ).format(best_test_mean, iterations)\n model.save_model(snapshot_path)\n # Keep only the best snapshot\n for f in glob.glob(snapshot_prefix + '*'):\n if f != snapshot_path:\n os.remove(f)\n \n if test_mean > best_test_last:\n best_test_last = best_test_mean\n convergence_tolerance_cnt = 0\n else:\n convergence_tolerance_cnt += 1\n\n if convergence_tolerance_cnt == args.convergence_tolerance:\n break\n\n\n # saving redundant parameters\n # Save model checkpoints.\n if iterations % args.save_every == 0:\n iter_loss = sum(task_losses_outer.values()) / len(task_losses_outer.values())\n snapshot_prefix = os.path.join(args.save_path, 'snapshot')\n snapshot_path = (\n snapshot_prefix +\n '_iter_{}_loss_{}_model.pt'\n ).format(iterations, iter_loss)\n logging.debug('Saving model...')\n model.save_model(snapshot_path)\n # Keep only the last snapshot\n for f in glob.glob(snapshot_prefix + '*'):\n if f != snapshot_path:\n os.remove(f)\n\n writer.close()", "def setup(params):\n os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'\n os.environ['TF_SYNC_ON_FINISH'] = str(int(params.sync_on_finish))\n argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n\n # Sets GPU thread settings\n params = params._replace(gpu_thread_mode=params.gpu_thread_mode.lower())\n os.environ['TF_GPU_THREAD_MODE'] = params.gpu_thread_mode\n\n # Default to two threads. One for the device compute and the other for\n # memory copies.\n per_gpu_thread_count = params.per_gpu_thread_count or 2\n total_gpu_thread_count = per_gpu_thread_count * params.num_gpus\n\n os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count)\n\n if not params.num_inter_threads and params.gpu_thread_mode in [\n 'gpu_private', 'gpu_shared'\n ]:\n cpu_count = multiprocessing.cpu_count()\n main_thread_count = max(cpu_count - total_gpu_thread_count, 1)\n params = params._replace(num_inter_threads=main_thread_count)\n\n platforms_util.initialize(params, create_config_proto(params))\n\n return params", "def run_test(**kwargs):\n cmd = 'python yolov3/test.py'\n pms_list = [\n 'batch_size', 'model_def',\n 'data_config', 'weights_path',\n 'class_path', 'iou_thres',\n 'nms_thres', 'conf_thres',\n 'n_cpu', 'img_size'\n ]\n call_command(pms_list, cmd, kwargs)", "def test_image_task_early_fusion(self):\n args = BASE_ARGS.copy()\n args.update(IMAGE_ARGS)\n args.update(EARLY_FUSION_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 8.6, 'failed to train image_seq2seq on image task'\n )", "def main():\n\n # Experiment Start\n start_time = datetime.now()\n logger.info(\n '################ Bergson Team Experiment Start #################')\n logger.info(\n f'Starting Bergson Astro Pi team experiment at {start_time.strftime(\"%Y-%m-%d %H:%M:%S\")}')\n\n '''\n # Load simple Conv2D AI Model\n logger.info(\"Loading AI Convolutional Model\")\n conv2D_model = load_model(\"Conv2D_TF114\")\n '''\n\n # Load TFLite Model\n logger.info(\"Loading TFLite Mobilenetv2 Model\")\n mobilenetv2_interpreter = load_tflite_model(\"./Mobilenetv2_TF114.tflite\")\n\n # Create Log File\n logger.info(f'Creating Log file at {str(data_file)}')\n with open(data_file, 'w') as f:\n writer = csv.writer(f)\n header = (\"Date/time\", \"Location\", \"Picture Name\", \"Predicted NO2\")\n writer.writerow(header)\n\n # Start Loop over 3 hours\n\n now_time = datetime.now()\n i = 0\n # run a loop for 2 minutes\n while (now_time < start_time + timedelta(minutes=175)):\n\n # Take Earth Picture\n timestamp = datetime.now().strftime(\"%Y-%m-%d_%H:%M:%S\")\n pic_name = f'bergson_img_{timestamp}.jpg'\n capture(rpi_cam, str(dir_path/pic_name))\n logger.info(f'Experiment Pipeline {i} on picture {pic_name}')\n\n # NDVI Preprocessing\n ndvi_image = get_ndvi(str(dir_path/pic_name))\n ndvi_image = np.expand_dims(ndvi_image, axis=2)\n\n # RGB Prepprocessing for expected shape by Mobilenetv2 - comment below line when using simple Conv2D model\n ndvi_rgb_image = get_ndvi_rgb(ndvi_image)\n\n '''\n # Do Inference with simple Conv2D AI Model\n prediction = make_inference(ndvi_image,conv2D_model)\n '''\n \n # Do Inference with TFLite Model\n ndvi_rgb_image = ndvi_rgb_image.astype('float32')\n prediction = make_tflite_inference(\n ndvi_rgb_image, mobilenetv2_interpreter)\n\n # Get Decoded Inference results\n decoded_prediction = decode_prediction(prediction)\n\n # Write Prediction as CSV to disk\n logger.info(\n f'Logging NO2 prediction \\\"{decoded_prediction}\\\" for {pic_name}')\n exif_data = get_img_exif(pic_name, iss, decoded_prediction)\n row = (exif_data['Date/Time'], exif_data['Location'],\n pic_name, exif_data['NO2'])\n with open(data_file, mode='a') as f:\n writer = csv.writer(f)\n writer.writerow(row)\n\n # update the current time\n now_time = datetime.now()\n i = i+1\n\n # End Loop over 3 hours\n\n # Experiment End\n end_time = datetime.now()\n logger.info(\n f'Finishing Bergson Astro Pi team experiment at {end_time.strftime(\"%Y-%m-%d %H:%M:%S\")}')\n experiment_time = end_time - start_time\n logger.info(f'Bergson Astro Pi team experiment run time {experiment_time}')\n logger.info('################ Bergson Team Experiment End #################')", "def main():\n\n dataset = ConvMNIST(64)\n print(dataset.get_train().x.shape)\n\n\n inputs = Value(type=tf.float32, shape=(None, 28, 28, 1), cls = None)\n targets = Value(type=tf.int64, shape=(None), cls = 10)\n learning_rate = 0.0001\n\n fc_hidden = [1024, 500]\n c_h = [\n (3, 3, 1, 32),\n (3, 3, 32, 64)\n ]\n conv_hidden = ConvHidden(conv_weights=c_h, fc_weights=fc_hidden)\n\n config = Config(inputs, targets, conv_hidden, learning_rate)\n\n network = ConvNetworkBuilder(config)\n hidden = FFConvHiddenBuilder()\n _ = network.build_network(hidden)\n\n\n train_config = TrainerConfig(\n epochs = EPOCHS, display_after = DISPLAY_STEP, \n keep_prob = KEEP_PROB,checkpoint_path=None, \n summary_path=None\n )\n\n trainer = Trainer(network, train_config)\n trainer.train(dataset)", "def construct_test_model(self):\n # Set the placeholder for the input episode\n self.inputa = tf.placeholder(tf.float32)\n self.inputb = tf.placeholder(tf.float32)\n self.labela = tf.placeholder(tf.float32)\n self.labelb = tf.placeholder(tf.float32)\n\n with tf.variable_scope('meta-test-model', reuse=None) as training_scope: \n # construct the model weights \n self.ss_weights = ss_weights = self.construct_resnet_ss_weights()\n self.weights = weights = self.construct_resnet_weights()\n self.fc_weights = fc_weights = self.construct_fc_weights()\n\n # Load test base epoch number from FLAGS\n num_updates = FLAGS.test_base_epoch_num\n\n def task_metalearn(inp, reuse=True):\n \"\"\"The function to process one episode in a meta-batch.\n Args:\n inp: the input episode.\n reuse: whether reuse the variables for the normalization.\n Returns:\n A serious outputs like losses and accuracies.\n \"\"\"\n # Seperate inp to different variables\n inputa, inputb, labela, labelb = inp\n # Generate empty list to record accuracies\n accb_list = []\n\n # Embed the input images to embeddings with ss weights\n emb_outputa = self.forward_resnet(inputa, weights, ss_weights, reuse=reuse)\n emb_outputb = self.forward_resnet(inputb, weights, ss_weights, reuse=True)\n\n # This part is similar to the meta-train function, you may refer to the comments above\n outputa = self.forward_fc(emb_outputa, fc_weights)\n lossa = self.loss_func(outputa, labela) \n grads = tf.gradients(lossa, list(fc_weights.values()))\n gradients = dict(zip(fc_weights.keys(), grads))\n fast_fc_weights = dict(zip(fc_weights.keys(), [fc_weights[key] - \\\n self.update_lr*gradients[key] for key in fc_weights.keys()]))\n outputb = self.forward_fc(emb_outputb, fast_fc_weights)\n accb = tf.contrib.metrics.accuracy(tf.argmax(tf.nn.softmax(outputb), 1), tf.argmax(labelb, 1))\n accb_list.append(accb)\n \n for j in range(num_updates - 1):\n lossa = self.loss_func(self.forward_fc(emb_outputa, fast_fc_weights), labela)\n grads = tf.gradients(lossa, list(fast_fc_weights.values()))\n gradients = dict(zip(fast_fc_weights.keys(), grads))\n fast_fc_weights = dict(zip(fast_fc_weights.keys(), [fast_fc_weights[key] - \\\n self.update_lr*gradients[key] for key in fast_fc_weights.keys()]))\n outputb = self.forward_fc(emb_outputb, fast_fc_weights)\n accb = tf.contrib.metrics.accuracy(tf.argmax(tf.nn.softmax(outputb), 1), tf.argmax(labelb, 1))\n accb_list.append(accb)\n\n lossb = self.loss_func(outputb, labelb)\n\n task_output = [lossb, accb, accb_list]\n\n return task_output\n\n if FLAGS.norm is not 'None':\n unused = task_metalearn((self.inputa[0], self.inputb[0], self.labela[0], self.labelb[0]), False)\n\n out_dtype = [tf.float32, tf.float32, [tf.float32]*num_updates]\n\n result = tf.map_fn(task_metalearn, elems=(self.inputa, self.inputb, self.labela, self.labelb), \\\n dtype=out_dtype, parallel_iterations=FLAGS.meta_batch_size)\n lossesb, accsb, accsb_list = result\n\n self.metaval_total_loss = total_loss = tf.reduce_sum(lossesb)\n self.metaval_total_accuracy = total_accuracy = tf.reduce_sum(accsb)\n self.metaval_total_accuracies = total_accuracies =[tf.reduce_sum(accsb_list[j]) for j in range(num_updates)]", "def benchmark_fake_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, data_dir=self.fake_data_dir, data_name='imagenet')\n self._run_benchmark(params)", "def finetune(ft_ds, model, task, epochs=10, eval_ds=None):\n\n print('==========FINETUNE==========')\n\n # Filter out undesired examples with excluded_label\n ds = ft_ds.filter(lambda x: x['label'] != task['excluded_label'])\n ds = ds.map(data_utils.finetune_preprocess)\n ds = ds.shuffle(1000)\n ds = ds.batch(FLAGS.finetune_bs)\n\n # loss, metrics, optimizers\n train_loss= tf.keras.metrics.Mean(name='train_loss')\n train_sup_acc = tf.keras.metrics.Accuracy(name='train_supervised_accuracy')\n criterion_sup = tf.nn.softmax_cross_entropy_with_logits \n optimizer = tf.keras.optimizers.Adam(learning_rate=0.001) \n for epoch in range(epochs): \n train_loss.reset_states()\n train_sup_acc.reset_states()\n for x in ds:\n with tf.GradientTape() as tape:\n image = x['image']\n labels = x[task['name']]\n out = model(image, mode='supervised', sup_layers=1, training=True)\n # print(tf.math.argmax(out, axis=-1))\n metrics.update_supervised_accuracy(train_sup_acc, labels, out)\n loss = criterion_sup(tf.one_hot(labels, depth=task['num_classes']), out)\n loss = tf.math.reduce_mean(loss)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(\n filter(lambda gv: gv[0] is not None, zip(gradients, model.trainable_variables))\n )\n train_loss.update_state(loss)\n print('supervised loss')\n print(train_loss.result())\n print('supervised accuracy')\n print(train_sup_acc.result())\n\n # Evaluate results on eval_ds if possible\n if eval_ds: \n evaluate(eval_ds, model, task)", "def run_test(filepath):\n num_class = 120 # dogbreeds class\n model = Resnet50MO(num_class, checkpoint_path=None)\n\n # image settings\n crop_size = model.input_size\n scale_size = model.input_size\n input_size = model.input_size\n input_mean = model.input_mean\n input_std = model.input_std\n\n # hyperparams settings\n epochs = 1\n batch_size = 32 # mini-batch-size\n learning_rate = 0.01\n momentum = 0.5\n decay_factor = 10\n eval_freq = 5 # in epochs\n\n # data generator settings: dataset and dataloader\n train_dataset = DogImageset(filepath, input_size,\n input_mean=input_mean, input_std=input_std)\n val_dataset = DogImageset(filepath, input_size,\n input_mean=input_mean, input_std=input_std)\n \n train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)\n val_loader = DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=False)\n\n # Loss and backprop settings\n model.cuda()\n criterion = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(\n model.parameters(),\n lr=learning_rate,\n momentum=momentum\n )\n\n run_model_train_test(model, train_loader, criterion, optimizer)", "def main(_):\n\n spec = cluster_spec(args.num_workers, 1)\n cluster = tf.train.ClusterSpec(spec).as_cluster_def()\n\n def shutdown(signal, frame):\n logger.warn(\"Received signal {}: exiting\".format(signal))\n sys.exit(128+signal)\n signal.signal(signal.SIGHUP, shutdown)\n signal.signal(signal.SIGINT, shutdown)\n signal.signal(signal.SIGTERM, shutdown)\n\n if args.job_name == 'worker':\n config = tf.ConfigProto(intra_op_parallelism_threads=1,\n inter_op_parallelism_threads=2)\n\n server = tf.train.Server(cluster, job_name='worker',\n task_index=args.task,\n config=config)\n run(args, server)\n else:\n config = tf.ConfigProto(device_filters=['/job:ps'])\n server = tf.train.Server(cluster, job_name='ps', task_index=args.task,\n config=config)\n while True:\n time.sleep(1000)", "def main():\n dataset = MNIST(BATCH_SIZE)\n \n inputs = Value(type=tf.float32, shape=(None, 784), cls=None)\n targets = Value(type=tf.int64, shape=(None), cls=10)\n fc_hidden = FCHidden(weights=[300, 150])\n\n config = Config(inputs, targets, fc_hidden, LEARNING_RATE)\n\n network_builder = FFNetworkBuilder(config)\n hidden_builder = FFHiddenBuilder()\n _ = network_builder.build_network(hidden_builder)\n\n train_config = TrainerConfig(\n epochs = EPOCHS, display_after = DISPLAY_STEP, \n keep_prob = KEEP_PROB,checkpoint_path=None, \n summary_path=None\n )\n trainer = Trainer(network_builder, train_config)\n trainer.train(dataset)", "def run_experiment(param_grid, n_processes=-1):\n if type(param_grid) is not list:\n param_grid = [param_grid]\n\n for params in param_grid:\n if 'task' not in params:\n raise Exception('Please define a task function in the param_grid '\n 'to execute.')\n\n if 'num_exp' not in params:\n params['run'] = [0]\n else:\n params['run'] = range(params['num_exp'])\n del params['num_exp']\n\n if type(params['task']) is not list:\n params['task'] = [params['task']]\n\n # Convert parameter grid to iterable list\n params = list(ParameterGrid(param_grid))\n for i in range(len(params)):\n params[i]['id'] = i\n\n print(\"\\033[1mNumber of processes: %d\\033[0m\" % len(params))\n\n if n_processes == -1:\n n_processes = cpu_count()\n if n_processes > 1:\n with Pool(n_processes) as p:\n r = list(tqdm(p.imap_unordered(process_task, params), total=len(params)))\n else:\n for single_param in params:\n process_task(single_param)", "def _tpu_build(self):\n def _define_model(features, labels, mode, params):\n data_source = (features, labels)\n self.outputs = {}\n self.losses = {}\n self.otters = {}\n outputs, losses, others = self.define_model(data_source, mode)\n\n if mode == tf.estimator.ModeKeys.EVAL:\n return tpu.TPUEstimatorSpec(\n mode=mode, loss=losses, eval_metrics=others)\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tpu.TPUEstimatorSpec(\n mode=mode, predictions=outputs\n )\n if mode == tf.estimator.ModeKeys.TRAIN:\n self.losses['train'] = losses\n self._build_optimizer(tpu_support=True)\n if not len(self.optimize_ops) == 1:\n logging.error('Implementati Error: More than one optimizer defined')\n logging.warning(' [*] Selecting only the first optimizer')\n return tpu.TPUEstimatorSpec(\n mode=mode, loss=losses[0], train_op=self.optimize_ops[0]\n )\n\n tpu_name = ['node-1'] # TODO Bring outside\n tpu_iterations = 500 # TODO Bring outside\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n tpu_name)\n\n run_config = tf.contrib.tpu.RunConfig(\n model_dir=self.output_path,\n cluster=tpu_cluster_resolver,\n session_config=tf.ConfigProto(\n allow_soft_placement=True, log_device_placement=True),\n tpu_config=tpu.TPUConfig(tpu_iterations),\n )\n\n self.estimator = tpu.TPUEstimator(\n model_fn=_define_model,\n use_tpu=True,\n train_batch_size=32*4, #self.dataset['train'].batch_size,\n eval_batch_size=32*4, #self.dataset['validation'].batch_size,\n config=run_config,\n params={\"data_dir\": self.data_dir}\n )", "def train():\n args = arguments_st_train()\n\n h, w = map(int, args.input_size.split(','))\n input_size = (h, w)\n\n \n if args.use_random_seed:\n tf.set_random_seed(args.random_seed)\n \n # Create queue coordinator.\n coord = tf.train.Coordinator()\n \n # Load reader.\n with tf.name_scope(\"create_inputs\"):\n reader = ImageReader(\n args.data_dir,\n args.data_list,\n input_size=input_size,\n random_scale=args.random_scale,\n random_mirror=args.random_mirror,\n random_crop=args.random_crop,\n ignore_label=args.ignore_label,\n img_mean=IMG_MEAN,\n coord=coord,\n task=args.task)\n image_batch, label_batch = reader.dequeue(args.batch_size)\n \n # Create network.\n with slim.arg_scope(vgg_arg_scope(weight_decay=args.weight_decay, use_batch_norm=True, is_training=True)):\n if args.network == 'vgg_16_deeplab_st':\n net, end_points = vgg_16_deeplab_st(image_batch, num_classes=args.num_classes, is_training=True, dropout_keep_prob=args.keep_prob)\n elif args.network == 'vgg_16_shortcut_deeplab_st':\n net, end_points = vgg_16_shortcut_deeplab_st(image_batch, num_classes=args.num_classes, is_training=True, dropout_keep_prob=args.keep_prob)\n else:\n raise Exception('network name is not recognized!')\n \n \n # Predictions.\n raw_output = end_points['vgg_16/fc8_voc12']\n\n # gt labels\n raw_gt = prepare_label(label_batch, tf.stack(raw_output.get_shape()[1:3]), num_classes=args.num_classes,\n one_hot=False, task=args.task) # [batch_size, h, w]\n\n # losses\n if args.task == 'normal':\n loss = get_normal_loss(raw_output, raw_gt, args.num_classes, args.ignore_label) * args.loss_scale\n elif args.task == 'seg':\n loss = get_seg_loss(raw_output, raw_gt, args.num_classes, args.ignore_label) * args.loss_scale\n\n l2_losses = [args.weight_decay * tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'weights' in v.name]\n reduced_loss = tf.reduce_mean(loss) + tf.add_n(l2_losses)\n \n # Image summary for visualisation.\n raw_output_up = tf.image.resize_bilinear(raw_output, tf.shape(image_batch)[1:3,])\n raw_output_up = tf.argmax(raw_output_up, axis=3)\n pred = tf.expand_dims(raw_output_up, dim=3)\n \n images_summary = tf.py_func(inv_preprocess, [image_batch, args.save_num_images, IMG_MEAN], tf.uint8)\n labels_summary = tf.py_func(decode_labels, [label_batch, args.save_num_images, args.num_classes, args.task], tf.uint8)\n preds_summary = tf.py_func(decode_labels, [pred, args.save_num_images, args.num_classes, args.task], tf.uint8)\n \n total_summary = tf.summary.image('images', \n tf.concat(axis=2, values=[images_summary, labels_summary, preds_summary]), \n max_outputs=args.save_num_images) # Concatenate row-wise.\n summary_writer = tf.summary.FileWriter(args.snapshot_dir,\n graph=tf.get_default_graph())\n \n # Define loss and optimisation parameters.\n train_op, step_ph = create_train_ops_st(reduced_loss, args)\n \n # Set up tf session and initialize variables. \n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n init = tf.global_variables_initializer()\n \n sess.run(init)\n\n # Load variables if the checkpoint is provided.\n if args.restore_from is not None:\n load_st(sess, args)\n \n # Saver for storing checkpoints of the model.\n save_op = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=args.max_to_keep)\n \n # Start queue threads.\n threads = tf.train.start_queue_runners(coord=coord, sess=sess)\n\n # Iterate over training steps.\n for step in range(args.num_steps):\n start_time = time.time()\n feed_dict = { step_ph : step }\n \n if step % args.save_pred_every == 0:\n loss_value, images, labels, preds, summary, _ = sess.run([reduced_loss, image_batch, label_batch, pred, total_summary, train_op], feed_dict=feed_dict)\n summary_writer.add_summary(summary, step)\n save(save_op, sess, args.snapshot_dir, step)\n else:\n loss_value, _ = sess.run([reduced_loss, train_op], feed_dict=feed_dict)\n duration = time.time() - start_time\n print('step {:d} \\t loss = {:.3f}, ({:.3f} sec/step)'.format(step, loss_value, duration))\n coord.request_stop()\n coord.join(threads)", "def run_task(data_dir, task_id):\n print(\"Train and test for task %d ...\" % task_id)\n\n print(\"We are going to use this\")\n \n\n # Parse data\n train_files = glob.glob('%s/qa3_*_train.txt' % (data_dir, task_id))\n test_files = glob.glob('%s/qa3_*_test.txt' % (data_dir, task_id))\n\n dictionary = {\"nil\": 0}\n train_story, train_questions, train_qstory = parse_babi_task(train_files, dictionary, False)\n test_story, test_questions, test_qstory = parse_babi_task(test_files, dictionary, False)\n\n general_config = BabiConfig(train_story, train_questions, dictionary)\n\n\n # #### R: this line build a empty model to train\n # memory, model, loss = build_model(general_config)\n\n # if general_config.linear_start:\n # train_linear_start(train_story, train_questions, train_qstory, memory, model, loss, general_config)\n # else:\n # train(train_story, train_questions, train_qstory, memory, model, loss, general_config)\n\n\n\n # memory, model, loss = build_model(general_config)\n\n # this line\n test(test_story, test_questions, test_qstory, memory, model, loss, general_config)", "def main():\n # lr_decay = 0.5\n # decay_every = 100\n args = get_arguments()\n \n h, w = map(int, args.input_size.split(','))\n input_size = (h, w)\n \n tf.set_random_seed(args.random_seed)\n \n coord = tf.train.Coordinator()\n \n with tf.name_scope(\"create_inputs\"):\n reader = ImageReader(\n args.data_list,\n input_size,\n args.random_scale,\n args.random_mirror,\n args.ignore_label,\n IMG_MEAN,\n coord)\n image_batch, label_batch = reader.dequeue(args.batch_size)\n \n # Set up tf session and initialize variables. \n config = tf.ConfigProto()\n # config.gpu_options.allow_growth = True\n # config.allow_soft_placement = True\n # config.intra_op_parallelism_threads = 1\n sess = tf.Session(config = config)\n net = unext(image_batch, is_train = True, reuse = False, n_out = NUM_CLASSES)\n \n # Predictions: ignoring all predictions with labels greater or equal than n_classes\n raw_output = net.outputs\n raw_prediction = tf.reshape(raw_output, [-1, args.num_classes])\n label_proc = prepare_label(label_batch, tf.stack(raw_output.get_shape()[1:3]), num_classes=args.num_classes, one_hot=False) # [batch_size, h, w]\n raw_gt = tf.reshape(label_proc, [-1,])\n indices = tf.squeeze(tf.where(tf.less_equal(raw_gt, args.num_classes - 1)), 1)\n gt = tf.cast(tf.gather(raw_gt, indices), dtype = tf.int32)\n prediction = tf.gather(raw_prediction, indices)\n \n main_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = prediction, labels = gt)\n\n t_vars = tf.trainable_variables()\n l2_losses = [args.weight_decay * tf.nn.l2_loss(v) for v in t_vars if 'kernel' in v.name]\n #reduced_loss = 0.5 * tf.reduce_mean(main_loss) + generalised_dice_loss(prediction, gt) + tf.add_n(l2_losses)\n reduced_loss = tf.reduce_mean(main_loss) + tf.add_n(l2_losses)\n\n # Processed predictions: for visualisation.\n raw_output_up = tf.image.resize_bilinear(raw_output, tf.shape(image_batch)[1:3,])\n raw_output_up = tf.argmax(raw_output_up, dimension = 3)\n pred = tf.expand_dims(raw_output_up, dim = 3)\n \n # Image summary.\n images_summary = tf.py_func(inv_preprocess, [image_batch, args.save_num_images, IMG_MEAN], tf.uint8)\n labels_summary = tf.py_func(decode_labels, [label_batch, args.save_num_images, args.num_classes], tf.uint8)\n preds_summary = tf.py_func(decode_labels, [pred, args.save_num_images, args.num_classes], tf.uint8)\n \n total_summary = tf.summary.image('images', \n tf.concat(axis=2, values=[images_summary, labels_summary, preds_summary]), \n max_outputs=args.save_num_images) # Concatenate row-wise.\n loss_summary = tf.summary.scalar('TotalLoss', reduced_loss)\n summary_writer = tf.summary.FileWriter(args.snapshot_dir,\n graph=tf.get_default_graph())\n\n # Using Poly learning rate policy \n base_lr = tf.constant(args.learning_rate)\n step_ph = tf.placeholder(dtype=tf.float32, shape=())\n learning_rate = tf.train.exponential_decay(base_lr, step_ph, args.num_steps, args.power)\n\n lr_summary = tf.summary.scalar('LearningRate', learning_rate)\n #train_op = tf.train.MomentumOptimizer(learning_rate, args.momentum).minimize(reduced_loss, var_list = t_vars)\n train_op = tf.train.AdamOptimizer(learning_rate).minimize(reduced_loss, var_list = t_vars)\n init = tf.global_variables_initializer()\n sess.run(init)\n \n # Saver for storing checkpoints of the model.\n saver = tf.train.Saver(var_list = tf.global_variables(), max_to_keep = 10)\n\n ckpt = tf.train.get_checkpoint_state(SNAPSHOT_DIR)\n if ckpt and ckpt.model_checkpoint_path:\n #restore_vars = list([t for t in tf.global_variables() if not 'uconv1' in t.name])\n loader = tf.train.Saver(var_list = tf.global_variables())\n load_step = int(os.path.basename(ckpt.model_checkpoint_path).split('-')[1])\n load(loader, sess, ckpt.model_checkpoint_path)\n else:\n print('No checkpoint file found.')\n load_step = 0\n\n # Start queue threads.\n threads = tf.train.start_queue_runners(coord = coord, sess = sess)\n\n # Iterate over training steps.\n save_summary_every = 10\n for step in range(args.num_steps):\n start_time = time.time()\n \n feed_dict = {step_ph: step}\n if not step % args.save_pred_every == 0:\n loss_value, _, l_summary, lr_summ = sess.run([reduced_loss, train_op, loss_summary, lr_summary], feed_dict=feed_dict)\n duration = time.time() - start_time\n elif step % args.save_pred_every == 0:\n loss_value, _, summary, l_summary, lr_summ = sess.run([reduced_loss, train_op, total_summary, loss_summary, lr_summary], feed_dict=feed_dict)\n duration = time.time() - start_time\n save(saver, sess, args.snapshot_dir, step)\n summary_writer.add_summary(summary, step)\n\n if step % save_summary_every == 0:\n \n summary_writer.add_summary(l_summary, step)\n summary_writer.add_summary(lr_summ, step)\n \n print('step {:d} \\t loss = {:.3f}, ({:.3f} sec/step)'.format(step, loss_value, duration))\n \n coord.request_stop()\n coord.join(threads)", "def run(args):\n # CONFIG\n run_name = get_run_name(args)\n logger.info(f'*** Starting run {run_name} ***')\n data_dir = f'gs://{args.bucket_name}/{args.project_name}/finetune/finetune_data/{args.finetune_data}'\n output_dir = f'gs://{args.bucket_name}/{args.project_name}/finetune/runs/{run_name}'\n\n # Get configs\n pretrained_model_config_path = get_model_config_path(args)\n model_config = get_model_config(pretrained_model_config_path)\n\n # Meta data/label mapping\n input_meta_data = get_input_meta_data(data_dir)\n label_mapping = get_label_mapping(data_dir)\n logger.info(f'Loaded training data meta.json file: {input_meta_data}')\n\n # Calculate steps, warmup steps and eval steps\n train_data_size = input_meta_data['train_data_size']\n num_labels = input_meta_data['num_labels']\n max_seq_length = input_meta_data['max_seq_length']\n if args.limit_train_steps is None:\n steps_per_epoch = int(train_data_size / args.train_batch_size)\n else:\n steps_per_epoch = args.limit_train_steps\n warmup_steps = int(args.num_epochs * train_data_size * args.warmup_proportion/ args.train_batch_size)\n if args.limit_eval_steps is None:\n eval_steps = int(math.ceil(input_meta_data['eval_data_size'] / args.eval_batch_size))\n else:\n eval_steps = args.limit_eval_steps\n\n # some logging\n if args.init_checkpoint is None:\n logger.info(f'Finetuning on datset {args.finetune_data} using default pretrained model {args.model_class}')\n else:\n logger.info(f'Finetuning on datset {args.finetune_data} using pretrained model in {args.init_checkpoint} of type {args.model_class}')\n logger.info(f'Running {args.num_epochs} epochs with {steps_per_epoch:,} steps per epoch')\n logger.info(f'Using warmup proportion of {args.warmup_proportion}, resulting in {warmup_steps:,} warmup steps')\n logger.info(f'Using learning rate: {args.learning_rate}, training batch size: {args.train_batch_size}, num_epochs: {args.num_epochs}')\n\n # Get model\n classifier_model, core_model = get_model(args, model_config, steps_per_epoch, warmup_steps, num_labels, max_seq_length)\n optimizer = classifier_model.optimizer\n loss_fn = get_loss_fn(num_labels)\n try:\n if ',' in args.validation_freq:\n validation_freq = args.validation_freq.split(',')\n validation_freq = [int(v) for v in validation_freq]\n else:\n validation_freq = int(args.validation_freq)\n except:\n raise ValueError(f'Invalid argument for validation_freq!')\n logger.info(f'Using a validation frequency of {validation_freq}')\n\n # Restore checkpoint\n if args.init_checkpoint:\n checkpoint_path = f'gs://{args.bucket_name}/{args.project_name}/pretrain/runs/{args.init_checkpoint}'\n checkpoint = tf.train.Checkpoint(model=core_model)\n checkpoint.restore(checkpoint_path).assert_existing_objects_matched()\n logger.info(f'Successfully restored checkpoint from {checkpoint_path}')\n\n # Run keras compile\n logger.info(f'Compiling keras model...')\n classifier_model.compile(\n optimizer=optimizer,\n loss=loss_fn,\n metrics=get_metrics())\n logger.info(f'... done')\n\n # Create all custom callbacks\n summary_dir = os.path.join(output_dir, 'summaries')\n summary_callback = tf.keras.callbacks.TensorBoard(summary_dir, profile_batch=0)\n time_history_callback = keras_utils.TimeHistory(\n batch_size=args.train_batch_size,\n log_steps=args.time_history_log_steps,\n logdir=summary_dir)\n custom_callbacks = [summary_callback, time_history_callback]\n if args.save_model:\n logger.info('Using save_model option...')\n checkpoint_path = os.path.join(output_dir, 'checkpoint')\n checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, verbose=1)\n custom_callbacks.append(checkpoint_callback)\n if args.early_stopping_epochs > 0:\n logger.info(f'Using early stopping of after {args.early_stopping_epochs} epochs of val_loss not decreasing')\n early_stopping_callback = tf.keras.callbacks.EarlyStopping(patience=args.early_stopping_epochs, monitor='val_loss')\n custom_callbacks.append(early_stopping_callback)\n\n # Generate dataset_fn\n train_input_fn = get_dataset_fn(\n os.path.join(data_dir, 'tfrecords', 'train.tfrecords'),\n max_seq_length,\n args.train_batch_size,\n is_training=True)\n eval_input_fn = get_dataset_fn(\n os.path.join(data_dir, 'tfrecords', 'dev.tfrecords'),\n max_seq_length,\n args.eval_batch_size,\n is_training=False)\n\n # Add mertrics callback to calculate performance metrics at the end of epoch\n performance_metrics_callback = Metrics(\n eval_input_fn,\n label_mapping,\n os.path.join(summary_dir, 'metrics'),\n eval_steps,\n args.eval_batch_size,\n validation_freq)\n custom_callbacks.append(performance_metrics_callback)\n\n # Run keras fit\n time_start = time.time()\n logger.info('Run training...')\n history = classifier_model.fit(\n x=train_input_fn(),\n validation_data=eval_input_fn(),\n steps_per_epoch=steps_per_epoch,\n epochs=args.num_epochs,\n validation_steps=eval_steps,\n validation_freq=validation_freq,\n callbacks=custom_callbacks,\n verbose=1)\n time_end = time.time()\n training_time_min = (time_end-time_start)/60\n logger.info(f'Finished training after {training_time_min:.1f} min')\n\n # Write training log\n all_scores = performance_metrics_callback.scores\n all_predictions = performance_metrics_callback.predictions\n if len(all_scores) > 0:\n final_scores = all_scores[-1]\n logger.info(f'Final eval scores: {final_scores}')\n else:\n final_scores = {}\n full_history = history.history\n if len(full_history) > 0:\n final_val_loss = full_history['val_loss'][-1]\n final_loss = full_history['loss'][-1]\n logger.info(f'Final training loss: {final_loss:.2f}, Final validation loss: {final_val_loss:.2f}')\n else:\n final_val_loss = None\n final_loss = None\n data = {\n 'created_at': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'run_name': run_name,\n 'final_loss': final_loss,\n 'final_val_loss': final_val_loss,\n 'max_seq_length': max_seq_length,\n 'num_train_steps': steps_per_epoch * args.num_epochs,\n 'eval_steps': eval_steps,\n 'steps_per_epoch': steps_per_epoch,\n 'training_time_min': training_time_min,\n 'data_dir': data_dir,\n 'output_dir': output_dir,\n 'all_scores': all_scores,\n 'all_predictions': all_predictions,\n 'num_labels': num_labels,\n 'label_mapping': label_mapping,\n **full_history,\n **final_scores,\n **vars(args),\n }\n # Write run_log\n f_path_training_log = os.path.join(output_dir, 'run_logs.json')\n logger.info(f'Writing training log to {f_path_training_log}...')\n save_to_json(data, f_path_training_log)\n # Write bert config\n model_config.id2label = label_mapping\n model_config.label2id = {v:k for k, v in label_mapping.items()}\n model_config.max_seq_length = max_seq_length\n model_config.num_labels = num_labels\n f_path_bert_config = os.path.join(output_dir, 'bert_config.json')\n logger.info(f'Writing BERT config to {f_path_bert_config}...')\n save_to_json(model_config.to_dict(), f_path_bert_config)", "def one_shot_test(self, model, support_set_size, number_of_tasks_per_alphabet,\n is_validation):\n\n # Set some variables that depend on dataset\n if is_validation:\n alphabets = self._validation_alphabets\n print('\\nMaking One Shot Task on validation alphabets:')\n else:\n alphabets = self._evaluation_alphabets\n print('\\nMaking One Shot Task on evaluation alphabets:')\n\n mean_global_accuracy = 0\n\n for alphabet in alphabets:\n mean_alphabet_accuracy = 0\n for _ in range(number_of_tasks_per_alphabet):\n images, _ = self.get_one_shot_batch(\n support_set_size, is_validation=is_validation)\n probabilities = model.predict_on_batch(images)\n\n # Added this condition because noticed that sometimes the outputs\n # of the classifier was almost the same in all images, meaning that\n # the argmax would be always by defenition 0.\n if np.argmax(probabilities) == 0 and probabilities.std()>0.01:\n accuracy = 1.0\n else:\n accuracy = 0.0\n\n mean_alphabet_accuracy += accuracy\n mean_global_accuracy += accuracy\n\n mean_alphabet_accuracy /= number_of_tasks_per_alphabet\n\n print(alphabet + ' alphabet' + ', accuracy: ' +\n str(mean_alphabet_accuracy))\n if is_validation:\n self._current_validation_alphabet_index += 1\n else:\n self._current_evaluation_alphabet_index += 1\n\n mean_global_accuracy /= (len(alphabets) *\n number_of_tasks_per_alphabet)\n\n print('\\nMean global accuracy: ' + str(mean_global_accuracy))\n\n # reset counter\n if is_validation:\n self._current_validation_alphabet_index = 0\n else:\n self._current_evaluation_alphabet_index = 0\n\n return mean_global_accuracy", "def main():\n\n args = define_and_process_args()\n print('\\n', 'ARGUMENTS', '\\n\\n', args, '\\n')\n\n log_dir = get_log_dir(args)\n print('\\n', 'LOG DIRECTORY', '\\n\\n', log_dir, '\\n')\n\n standardized_data_path = os.path.join(args.data_dir, args.data_filename)\n if not os.path.exists(standardized_data_path):\n message = '%s does not exist.' % standardized_data_path\n raise ValueError(message)\n\n dataset = data.Dataset(standardized_data_path)\n train_raw_seqs, test_raw_seqs = dataset.get_splits(args.test_users)\n train_triplets = [data.prepare_raw_seq(seq) for seq in train_raw_seqs]\n test_triplets = [data.prepare_raw_seq(seq) for seq in test_raw_seqs]\n\n train_input_seqs, train_reset_seqs, train_label_seqs = zip(*train_triplets)\n test_input_seqs, test_reset_seqs, test_label_seqs = zip(*test_triplets)\n\n Model = eval('models.' + args.model_type + 'Model')\n input_size = dataset.input_size\n target_size = dataset.num_classes\n\n # This is just to satisfy a low-CPU requirement on our cluster\n # when using GPUs.\n if 'CUDA_VISIBLE_DEVICES' in os.environ:\n config = tf.ConfigProto(intra_op_parallelism_threads=2,\n inter_op_parallelism_threads=2)\n else:\n config = None\n\n with tf.Session(config=config) as sess:\n model = Model(input_size, target_size, args.num_layers,\n args.hidden_layer_size, args.init_scale,\n args.dropout_keep_prob)\n optimizer = optimizers.Optimizer(\n model.loss, args.num_train_sweeps, args.initial_learning_rate,\n args.num_initial_sweeps, args.num_sweeps_per_decay,\n args.decay_factor, args.max_global_grad_norm)\n train(sess, model, optimizer, log_dir, args.batch_size,\n args.num_sweeps_per_summary, args.num_sweeps_per_save,\n train_input_seqs, train_reset_seqs, train_label_seqs,\n test_input_seqs, test_reset_seqs, test_label_seqs)", "def run_sequence(seq: Sequence, tracker: Tracker, debug=False, num_gpu=8):\n '''2021.1.2 Add multiple gpu support'''\n try:\n worker_name = multiprocessing.current_process().name\n worker_id = int(worker_name[worker_name.find('-') + 1:]) - 1\n gpu_id = worker_id % num_gpu\n torch.cuda.set_device(gpu_id)\n except:\n pass\n\n def _results_exist():\n if seq.object_ids is None:\n if seq.dataset in ['trackingnet', 'got10k']:\n base_results_path = os.path.join(tracker.results_dir, seq.dataset, seq.name)\n bbox_file = '{}.txt'.format(base_results_path)\n else:\n bbox_file = '{}/{}.txt'.format(tracker.results_dir, seq.name)\n return os.path.isfile(bbox_file)\n else:\n bbox_files = ['{}/{}_{}.txt'.format(tracker.results_dir, seq.name, obj_id) for obj_id in seq.object_ids]\n missing = [not os.path.isfile(f) for f in bbox_files]\n return sum(missing) == 0\n\n if _results_exist() and not debug:\n print('FPS: {}'.format(-1))\n return\n\n print('Tracker: {} {} {} , Sequence: {}'.format(tracker.name, tracker.parameter_name, tracker.run_id, seq.name))\n\n if debug:\n output = tracker.run_sequence(seq, debug=debug)\n else:\n try:\n output = tracker.run_sequence(seq, debug=debug)\n except Exception as e:\n print(e)\n return\n\n sys.stdout.flush()\n\n if isinstance(output['time'][0], (dict, OrderedDict)):\n exec_time = sum([sum(times.values()) for times in output['time']])\n num_frames = len(output['time'])\n else:\n exec_time = sum(output['time'])\n num_frames = len(output['time'])\n\n print('FPS: {}'.format(num_frames / exec_time))\n\n if not debug:\n _save_tracker_output(seq, tracker, output)", "def run_task(snapshot_config, *_):\n with LocalTFRunner(snapshot_config) as runner:\n env = TfEnv(gym.make('InvertedDoublePendulum-v2'))\n\n action_noise = GaussianStrategy(env.spec, max_sigma=0.1, min_sigma=0.1)\n\n policy = ContinuousMLPPolicy(env_spec=env.spec,\n hidden_sizes=[400, 300],\n hidden_nonlinearity=tf.nn.relu,\n output_nonlinearity=tf.nn.tanh)\n\n qf = ContinuousMLPQFunction(name='ContinuousMLPQFunction',\n env_spec=env.spec,\n hidden_sizes=[400, 300],\n action_merge_layer=0,\n hidden_nonlinearity=tf.nn.relu)\n\n qf2 = ContinuousMLPQFunction(name='ContinuousMLPQFunction2',\n env_spec=env.spec,\n hidden_sizes=[400, 300],\n action_merge_layer=0,\n hidden_nonlinearity=tf.nn.relu)\n\n replay_buffer = SimpleReplayBuffer(env_spec=env.spec,\n size_in_transitions=int(1e6),\n time_horizon=250)\n\n td3 = TD3(env_spec=env.spec,\n policy=policy,\n policy_lr=1e-4,\n qf_lr=1e-3,\n qf=qf,\n qf2=qf2,\n replay_buffer=replay_buffer,\n target_update_tau=1e-2,\n n_epoch_cycles=20,\n n_train_steps=1,\n smooth_return=False,\n discount=0.99,\n buffer_batch_size=100,\n min_buffer_size=1e4,\n exploration_strategy=action_noise,\n policy_optimizer=tf.train.AdamOptimizer,\n qf_optimizer=tf.train.AdamOptimizer)\n\n runner.setup(td3, env)\n runner.train(n_epochs=500, n_epoch_cycles=20, batch_size=250)", "def run_mnist_test():\n mnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n train_x, train_y = mnist.train.images, mnist.train.labels,\n test_x, test_y = mnist.test.images, mnist.test.labels\n # Reshape right off the bat to save some time.\n train_x = train_x.reshape(-1, 28, 28, 1)\n test_x = test_x.reshape(-1, 28, 28, 1)\n\n conv1 = LeNetClassifier.ConvLayer(kernel_width=5, kernel_height=5,\n feature_maps=1)\n conv2 = LeNetClassifier.ConvLayer(kernel_width=3, kernel_height=3,\n feature_maps=32)\n conv3 = LeNetClassifier.ConvLayer(kernel_width=3, kernel_height=3,\n feature_maps=64)\n network = LeNetClassifier((28, 28, 1), [conv1, conv2, conv3],\n [4 * 4 * 128, 625], 10, batch_size=128)\n\n saver = tf.train.Saver()\n\n sess = tf.Session()\n init = tf.initialize_all_variables()\n sess.run(init)\n\n writer = tf.train.SummaryWriter(\"mnist_logs\", sess.graph_def)\n\n print(\"Tensorflow: Starting MNIST test...\")\n\n accuracy = 0\n start_time = time.time()\n iterations = 0\n while iterations < 2000:\n if iterations % 500 == 0:\n test_batch = mnist.test.next_batch(128)\n result = sess.run(network.predict(),\n feed_dict={network.inputs(): test_batch[0],\n network.expected_outputs(): test_batch[1]})\n argmax = np.argmax(test_batch[1], axis=1)\n accuracy = np.mean(argmax == result)\n print(\"Tensorflow: step %d, testing accuracy %s\" % \\\n (iterations, accuracy))\n\n batch = mnist.train.next_batch(128)\n sess.run(network.train(), feed_dict={network.inputs(): batch[0],\n network.expected_outputs(): batch[1]})\n iterations += 1\n\n # Save the network at the end.\n #saver.save(sess, \"Variables/test.ckpt\")\n\n elapsed = time.time() - start_time\n speed = iterations / elapsed\n print(\"Tensorflow: Ran %d training iterations. (%f iter/s)\" % \\\n (iterations, speed))\n print(\"Tensorflow: MNIST test completed in %f seconds.\" % (elapsed))\n return (elapsed, speed)", "def warmup_resnet_imagenet_128_gpu_8_real(self):\n test_id = 'warmup_resnet_imagenet.gpu_8.128.real'\n args = {}\n config = self.build_resnet_test_config(test_id, args, batch_size=128, gpus=8,\n repeat=1, total_batches=1300)\n self.run_test_suite(config)", "def test_mnist(args):\n # type: () -> None\n\n # Build dataset and model\n dataset = MNIST(path=args.path)\n model = MEMMNIST(input_shape=dataset.shape, code_length=64, cpd_channels=100, mem_dim=100, shrink_thres=0.5/100).cuda().eval()\n\n # Set up result helper and perform test\n helper = MEMResultHelper(dataset, model, checkpoints_dir=args.checkpoints, output_file='mem_mnist.txt')\n helper.test_one_class_classification()", "def TpuTrainStep(self, *args):\n with tf.name_scope('tpu_train'):\n with py_utils.OpportunisticVariableReuseScope(True):\n with contextlib.ExitStack() as stack:\n if py_utils.IsEagerMode():\n stack.enter_context(py_utils.GradientTape(persistent=True))\n self._model.ConstructFPropBPropGraph()\n per_step_eval_metrics = self._eval_metrics.PackStepMetricsForAccumulation(\n self.task.eval_metrics, args)\n outfeed_op = self._OutfeedEnqueue(self.task.per_example_tensors)\n summed_metrics = []\n assert len(per_step_eval_metrics) == len(args)\n with tf.control_dependencies([outfeed_op]):\n for x, y in zip(per_step_eval_metrics, args):\n summed_metrics.append(x + y)\n return summed_metrics + [self.task.train_op]", "def main():\n args = get_arguments()\n \n # Create queue coordinator.\n coord = tf.train.Coordinator()\n \n # Load reader.\n with tf.name_scope(\"create_inputs\"):\n reader = ImageReader(\n args.data_dir,\n args.data_list,\n None, # No defined input size.\n False, # No random scale.\n False, # No random mirror.\n args.ignore_label,\n IMG_MEAN,\n coord)\n image, label = reader.image, reader.label\n image_batch, label_batch = tf.expand_dims(image, dim=0), tf.expand_dims(label, dim=0) # Add one batch dimension.\n\n # Create network.\n net = DeepLabResNetModel({'data': image_batch}, is_training=False, num_classes=args.num_classes)\n\n # Which variables to load.\n restore_var = tf.global_variables()\n \n # Predictions.\n raw_output = net.layers['fc_out']\n raw_output = tf.image.resize_bilinear(raw_output, tf.shape(image_batch)[1:3,])\n raw_output = tf.argmax(raw_output, dimension=3)\n pred = tf.expand_dims(raw_output, dim=3) # Create 4-d tensor.\n\n # mIoU\n\n pred_flatten = tf.reshape(pred, [-1,])\n gt = tf.reshape(label_batch, [-1,])\n weights = tf.cast(tf.less_equal(gt, args.num_classes - 1), tf.int32) # Ignoring all labels greater than or equal to n_classes.\n mIoU, update_op = tf.contrib.metrics.streaming_mean_iou(predictions=pred_flatten, labels=gt, num_classes=args.num_classes, weights=weights)\n \n # Set up tf session and initialize variables. \n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True \n sess = tf.Session(config=config)\n init = tf.global_variables_initializer()\n \n sess.run(init)\n sess.run(tf.local_variables_initializer())\n \n # Load weights.\n loader = tf.train.Saver(var_list=restore_var)\n\n ckpt = tf.train.get_checkpoint_state(SNAPSHOT_DIR)\n\n if ckpt and ckpt.model_checkpoint_path:\n loader = tf.train.Saver(var_list=restore_var)\n load_step = int(os.path.basename(ckpt.model_checkpoint_path).split('-')[1])\n load(loader, sess, ckpt.model_checkpoint_path)\n else:\n print('No checkpoint file found.')\n load_step = 0\n\n # Start queue threads.\n threads = tf.train.start_queue_runners(coord=coord, sess=sess)\n\n if not os.path.exists(SAVE_DIR):\n os.makedirs(SAVE_DIR)\n\n for step in range(args.num_steps):\n preds, _ = sess.run([pred, update_op])\n\n if IS_SAVE == True:\n msk = decode_labels(preds, num_classes=args.num_classes)\n im = Image.fromarray(msk[0])\n filename = 'mask' + str(step) + '.png'\n im.save(SAVE_DIR + filename)\n\n if step % 10 == 0:\n print('step {0} mIoU: {1}'.format(step, mIoU.eval(session=sess)))\n\n coord.request_stop()\n coord.join(threads)", "def test_is_trainable(estimator_fn, machine_settings):\n # Setup\n batch_size = 128 # Must be divisible by number of replicas (8 for TPU v2)\n crop_size = 24\n eval_count = 1024\n eval_steps = int(eval_count / batch_size)\n assert eval_steps * batch_size == eval_count\n estimator = estimator_fn(\n micronet.cifar.linear_model.create_model, batch_size, batch_size)\n\n # Replace with lambda?\n def input_fn(params):\n # Only the TPUEstimator needs to pass batch_size to the input_fn.\n if 'batch_size' in params:\n assert params['batch_size'] == batch_size\n del params\n mini_ds = cifar_ds.train_dataset(\n cloud_storage=machine_settings.is_cloud)\n mini_ds = mini_ds.map(\n cifar_ds.preprocess_fn(augment=False, crop_to=crop_size))\n # Take a small amount and repeat so that the test can show training\n # in a smaller amount of steps (so the test runs quickly).\n mini_ds.take(500).repeat()\n return mini_ds.batch(batch_size, drop_remainder=True)\n\n # Test\n # 1. Check that the untrained model predicts randomly.\n #\n # I want the test to pass 99% of the time.\n # For a 1000 trial experiment with success probability of 1% (100 classes),\n # CDF_inverse(0.01) ~= 3\n # CDF_inverse(0.99) ~= 19\n # (from binomial dist calculator:\n # https://www.di-mgt.com.au/binomial-calculator.html)\n # TODO: is it valid to assume a random output from the untrained model?\n results = estimator.evaluate(input_fn, steps=eval_steps)\n assert 3/eval_count < results[micronet.estimator.TOP_1_ACCURACY_KEY] \\\n <= 19/eval_count\n\n # 2. Check that the model can be trained.\n # Using the eval_steps as the max training steps. Could use something else.\n estimator.train(input_fn, max_steps=eval_steps)\n\n # 3. Check that the training has increased the model's accuracy.\n # Results is a dict containing the metrics defined by the model_fn.\n # FIXME 4: I should encapsulate/separate the metric creation so that it\n # is easy to assume that certain metrics are present.\n results = estimator.evaluate(input_fn, steps=eval_steps)\n # We should expect some improvement over the random case, 1/100. Running\n # it a few times gave ~4.5%, so using a value a little lower to make sure\n # the test reliably passes (while still being useful).\n assert results[micronet.estimator.TOP_1_ACCURACY_KEY] >= 0.040", "def backend_train_test_loop(e=None, timeout=30,\n is_compute_contributivity='True',\n is_parallelize=''):\n if is_parallelize == '':\n is_parallelize = None\n else:\n is_parallelize = strtobool(is_parallelize)\n\n from databoard.db_tools import backend_train_test_loop\n is_compute_contributivity = strtobool(is_compute_contributivity)\n backend_train_test_loop(\n e, timeout, is_compute_contributivity, is_parallelize)", "def codeepneat_mnist_example(_):\n # Set standard configuration specific to TFNE but not the neuroevolution process\n logging_level = logging.INFO\n config_file_path = './codeepneat_mnist_example_config.cfg'\n backup_dir_path = './tfne_state_backups/'\n max_generations = 20\n max_fitness = None\n\n # Read in optionally supplied flags, changing the just set standard configuration\n if flags.FLAGS.logging_level is not None:\n logging_level = flags.FLAGS.logging_level\n if flags.FLAGS.config_file is not None:\n config_file_path = flags.FLAGS.config_file\n if flags.FLAGS.backup_dir is not None:\n backup_dir_path = flags.FLAGS.backup_dir\n if flags.FLAGS.max_generations is not None:\n max_generations = flags.FLAGS.max_generations\n if flags.FLAGS.max_fitness is not None:\n max_fitness = flags.FLAGS.max_fitness\n\n # Set logging, parse config\n logging.set_verbosity(logging_level)\n config = tfne.parse_configuration(config_file_path)\n\n # Initialize the environment and the specific NE algorithm\n environment = tfne.environments.MNISTEnvironment(weight_training=True, config=config, verbosity=logging_level)\n ne_algorithm = tfne.algorithms.CoDeepNEAT(config)\n\n # Initialize evolution engine and supply config as well as initialized NE algorithm and evaluation environment.\n engine = tfne.EvolutionEngine(ne_algorithm=ne_algorithm,\n environment=environment,\n backup_dir_path=backup_dir_path,\n max_generations=max_generations,\n max_fitness=max_fitness)\n\n # Start training process, returning the best genome when training ends\n best_genome = engine.train()\n print(\"Best genome returned by evolution:\\n\")\n print(best_genome)\n\n # Increase epoch count in environment for a final training of the best genome. Train the genome and then replay it.\n print(\"Training best genome for 200 epochs...\\n\")\n environment.epochs = 20\n environment.eval_genome_fitness(best_genome)\n environment.replay_genome(best_genome)\n\n # Serialize and save genotype and Tensorflow model to demonstrate serialization\n best_genome.save_genotype(save_dir_path='./best_genome_genotype/')\n best_genome.save_model(file_path='./best_genome_model/')", "def run(model_dir,\n schedule,\n problem_class=gin.REQUIRED,\n optimizer_class=gin.REQUIRED,\n dataset_name=gin.REQUIRED,\n batch_size=gin.REQUIRED,\n eval_batch_size=64,\n train_steps=gin.REQUIRED,\n eval_steps=gin.REQUIRED,\n base_optimizer_class=gin.REQUIRED,\n base_optimizer_conditioning_class=None,\n iterations_per_loop=gin.REQUIRED,\n eval_weights=None,\n training_params_class=gin.REQUIRED,\n training_params_conditioning_class=None,\n preprocess=\"\",\n preprocess_eval=\"\",\n save_checkpoints_steps=None,\n keep_checkpoint_max=0,\n eval_on_test=False):\n assert schedule in (\"train\", \"eval\")\n\n if save_checkpoints_steps:\n kwargs = {\"save_checkpoints_steps\": save_checkpoints_steps}\n else:\n kwargs = {\"save_checkpoints_secs\": 60*10} # Every 10 minutes.\n\n run_config = tf_estimator.tpu.RunConfig(\n keep_checkpoint_max=keep_checkpoint_max,\n master=FLAGS.master,\n evaluation_master=FLAGS.master,\n tpu_config=tf_estimator.tpu.TPUConfig(\n iterations_per_loop=iterations_per_loop),\n **kwargs)\n # We use one estimator (potentially on TPU) for training and evaluation.\n problem = problem_class()\n model_fn = construct_model_fn(\n problem, optimizer_class, base_optimizer_class,\n eval_weights=eval_weights,\n base_optimizer_conditioning_class=base_optimizer_conditioning_class,\n training_params_class=training_params_class,\n training_params_conditioning_class=training_params_conditioning_class)\n tpu_estimator = tf_estimator.tpu.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=model_fn,\n model_dir=model_dir,\n train_batch_size=batch_size,\n eval_batch_size=eval_batch_size,\n config=run_config)\n\n\n def input_fn_train(params):\n preprocess_fn = preprocessing.get_preprocess_fn(preprocess)\n return data.get_dataset(dataset_name, data.DatasetSplit.TRAIN,\n FLAGS.validation_percent, params[\"batch_size\"],\n preprocess_fn)\n\n def input_fn_eval(params, split):\n preprocess_fn = preprocessing.get_preprocess_fn(preprocess_eval)\n return data.get_dataset(dataset_name, split, FLAGS.validation_percent,\n params[\"batch_size\"], preprocess_fn).repeat()\n\n path_to_finished_file = os.path.join(model_dir, \"FINISHED\")\n if schedule == \"train\":\n gin_hook = gin.tf.GinConfigSaverHook(model_dir, summarize_config=True)\n tpu_estimator.train(input_fn=input_fn_train,\n hooks=[gin_hook],\n max_steps=train_steps)\n with tf.gfile.GFile(path_to_finished_file, \"w\") as finished_file:\n finished_file.write(\"1\")\n else:\n for checkpoint in iterate_checkpoints_until_file_exists(\n model_dir, path_to_finished_file):\n if eval_on_test:\n train_split = data.DatasetSplit.TRAIN_FULL\n test_split = data.DatasetSplit.TEST\n test_summary_name = \"test\"\n else:\n train_split = data.DatasetSplit.TRAIN\n test_split = data.DatasetSplit.VALID\n test_summary_name = \"valid\"\n\n eval_train = tpu_estimator.evaluate(\n input_fn=functools.partial(input_fn_eval, split=train_split),\n checkpoint_path=checkpoint,\n steps=eval_steps,\n name=\"train\")\n eval_test = tpu_estimator.evaluate(\n input_fn=functools.partial(input_fn_eval, split=test_split),\n checkpoint_path=checkpoint,\n steps=eval_steps,\n name=\"test\")\n\n current_step = eval_train[\"global_step\"]\n\n\n hub_modules_dir = os.path.join(model_dir, \"hub_modules\")\n if not tf.gfile.Exists(hub_modules_dir):\n tf.gfile.MkDir(hub_modules_dir)\n else:\n if not tf.gfile.IsDirectory(hub_modules_dir):\n raise ValueError(\"{0} exists and is not a directory\".format(\n hub_modules_dir))\n\n hub_module_path = os.path.join(hub_modules_dir,\n \"step-{:0>9}\".format(current_step))\n if not tf.gfile.Exists(hub_module_path):\n problem.module_spec.export(hub_module_path,\n checkpoint_path=checkpoint)\n else:\n logging.info(\"Not saving the hub module, since the path\"\n \" %s already exists\", hub_module_path)", "def train(args):\r\n print('Create generators')\r\n generators = train_valid_test_generators(\r\n valid_proportion=args.valid_proportion,\r\n test_proportion=args.test_proportion,\r\n seed=args.seed,\r\n shape=(args.height, args.width),\r\n batch_size=args.batch_size,\r\n shuffle=True\r\n )\r\n print('Create model')\r\n model = create_mobilenetv2(\r\n input_shape=(args.height, args.width, 3),\r\n alpha=args.alpha,\r\n depth_multiplier=args.depth_multiplier,\r\n l2_reg=args.l2_reg,\r\n seed=args.seed\r\n )\r\n\r\n print('Training freezed model')\r\n freeze_model(model, 'global_max_pooling2d_1')\r\n callbacks = callbacks_factory(\r\n callbacks_list=[\r\n 'early_stopping',\r\n 'tensorboard',\r\n ],\r\n model_mask='mobilenetv2_multiclassification_freezed'\r\n )\r\n model = train_pipeline(\r\n model,\r\n generators['hard_train_generator'],\r\n generators['valid_generator'],\r\n callbacks,\r\n optimizer_lr=args.optimizer_lr,\r\n optimizer_decay=args.optimizer_decay,\r\n epochs=args.epochs\r\n )\r\n\r\n print('Training unfreezed model')\r\n unfreeze_model(model)\r\n callbacks = callbacks_factory(\r\n callbacks_list=[\r\n 'best_model_checkpoint',\r\n 'early_stopping',\r\n 'tensorboard',\r\n 'learning_rate_scheduler'\r\n ],\r\n model_mask='mobilenetv2_multiclassification'\r\n )\r\n model = train_pipeline(\r\n model,\r\n generators['easy_train_generator'],\r\n generators['valid_generator'],\r\n callbacks,\r\n optimizer_lr=args.optimizer_lr,\r\n optimizer_decay=args.optimizer_decay,\r\n epochs=3 * args.epochs\r\n )\r\n\r\n print('Save test evaluation')\r\n results = model.evaluate_generator(generators['test_generator'])\r\n pd.DataFrame({\r\n 'MetricsNames': model.metrics_names,\r\n 'Results': results\r\n }).to_csv(os.path.join('../logs/solution_1_test_generator_evaluation.csv'), index=False)", "def test():\n image_reader = reader.ImageReader(FLAGS.test_dir,FLAGS.image_size,FLAGS.channels)\n num_images = image_reader.num_images()\n model = tf_model.GanModel(FLAGS.batch_size,FLAGS.image_size,FLAGS.gen_arch,FLAGS.batch_norm,training=False)\n\n graph = tf.Graph()\n with graph.as_default():\n placeholder = get_placeholder(FLAGS.batch_size,FLAGS.channels)\n test_fetch = get_test_fetch(placeholder, model)\n\n with tf.Session() as session:\n if FLAGS.load is not None:\n saver=tf.train.Saver()\n saver.restore(session, FLAGS.load)\n else:\n print(\"Need to specify a valid model to load: --load=path\")\n return\n\n #need to loop based on the size of the test set\n for i in range(0,num_images,FLAGS.batch_size):\n\n synth_batch = image_reader.next(min(FLAGS.batch_size,num_images-1))\n feed_dict = {placeholder:synth_batch}\n generated_images = session.run(test_fetch,feed_dict=feed_dict)\n\n #write generated_images to file\n print_images(generated_images,FLAGS.output_dir,i,FLAGS.pair_images,synth_batch)", "def run_single_experiment_reg(method='sgd', batch_size = 1024, learning_rate = 0.001, ratio = 32, num_iterations = 400, fix_batch = False, data = input_data.read_data_sets('MNIST_data', one_hot=True)):\r\n tf.reset_default_graph()\r\n #mnist = input_data.read_data_sets('MNIST_data', one_hot=True)\r\n \r\n ## Create the Dataframe that we outputs to plot some results\r\n columns = [\"iteration\", \"num_used_data\", \"batch_size\", \"update_time\", \"training_loss\", \"val_loss\"]\r\n export_data = pd.DataFrame(data = np.zeros(shape =(num_iterations, len(columns))), columns =columns)\r\n \r\n\t# Build placeholders and networks.\r\n x = tf.placeholder(tf.float32, shape=[None, data.input_shape])\r\n y_ = tf.placeholder(tf.float32, shape=[None, 1])\r\n logits = build_network_reg(x)\r\n batch_loss = reg_loss(logits, y_)\r\n loss_op = tf.reduce_mean(batch_loss) \r\n \r\n if method == 'sgd':\r\n train_op = training(loss_op, learning_rate)\r\n elif method == 'adam':\r\n train_op = training_ADAM(loss_op, learning_rate)\r\n elif method == 'scsg':\r\n optimizer = SCSGOptimizer(loss_op, learning_rate)\r\n elif method == 'scsg_v2':\r\n optimizer = SCSGOptimizer_v2(loss_op, learning_rate)\r\n \r\n num_used_data = 0\r\n\r\n with tf.Session() as sess:\r\n # sess = tf.Session()\r\n sess.run(tf.global_variables_initializer())\r\n ttqqddmm = tqdm(range(num_iterations))\r\n \r\n for j in ttqqddmm:\r\n \r\n export_data.loc[j,\"iteration\"] = j\r\n # j=0\r\n if not fix_batch: \r\n batch_size = int((j+1) ** 1.5) \r\n\r\n mini_batchsize = max(1,int(batch_size / float(ratio))) # mini-batch size. \r\n batch = data.next_batch_train(batch_size)\r\n feed_dict = {x:batch[0], y_:batch[1]}\r\n \r\n t_start_j = time.time()\r\n if method == 'scsg' or method == 'scsg_v2':\r\n optimizer.batch_update(sess, feed_dict, batch_size, mini_batchsize, lr = 1.0/(j+1) if not fix_batch else None) \r\n elif method == 'sgd' or method == 'adam':\r\n sess.run(train_op, feed_dict = feed_dict) \r\n t_stop_j = time.time()\r\n \r\n num_used_data += batch_size\r\n export_data.loc[j,\"num_used_data\"] = num_used_data\r\n export_data.loc[j,\"batch_size\"] = batch_size\r\n export_data.loc[j,\"update_time\"] = t_stop_j - t_start_j\r\n \r\n samples = np.random.choice(range(data.train_num_examples), size = 10000, replace = False)\r\n train_loss_val = sess.run(loss_op, feed_dict = {x: data.train_x.iloc[samples], y_: data.train_y.iloc[samples]})\r\n val_loss_val = sess.run(loss_op, feed_dict = {x: data.validation_x, y_: data.validation_y}) \r\n \r\n export_data.loc[j,\"training_loss\"] = train_loss_val\r\n export_data.loc[j,\"val_loss\"] = val_loss_val\r\n \r\n train_loss_val, val_loss_val = round(train_loss_val,3), round(val_loss_val,3)\r\n \r\n \r\n ttqqddmm.set_description('# data used: {} Train loss: {} Val loss: {}'.format(num_used_data, train_loss_val, val_loss_val))\r\n if j == num_iterations //2 :\r\n print(\"\\n\")\r\n return export_data", "def evaluate(eval_ds, model, task):\n\n print('==========EVAL==========')\n # Testing contrastive accuracy\n if task['name'] == 'contrastive_accuracy':\n ds = eval_ds.map(data_utils.pretrain_preprocess)\n ds = ds.batch(128)\n test_contrast_acc = tf.keras.metrics.Accuracy(name='test_constrastive_accuracy')\n for x in ds:\n image = x['image']\n image = tf.transpose(image, [1, 0, 2, 3, 4])\n image = tf.reshape(\n image, \n (image.shape[0]*image.shape[1], image.shape[2], image.shape[3], image.shape[4])\n )\n out = model(image, mode='unsupervised', training=False)\n metrics.update_contrastive_accuracy2(test_contrast_acc, out, TEMP)\n print('test contrastive accuracy')\n print(test_contrast_acc.result())\n return \n\n # Testing classification accuracy \n ds = eval_ds.filter(lambda x: x['label'] != task['excluded_label'])\n ds = ds.map(data_utils.eval_preprocess)\n ds = ds.batch(FLAGS.eval_bs)\n test_class_acc = tf.keras.metrics.Accuracy(name='test_class_accuracy')\n for x in ds:\n image = x['image']\n labels = x[task['name']]\n if task['name'] == 'extr':\n out = model(image, mode='eval', sup_layers=2, training=False)\n else:\n out = model(image, mode='eval', sup_layers=1, training=False)\n metrics.update_supervised_accuracy(test_class_acc, labels, out)\n \n if FLAGS.debug:\n print(tf.math.argmax(out, axis=-1))\n print('test classification accuracy')\n print(test_class_acc.result())", "def discovery():\n launch_training_on_all_splits(experiment='discovery', splits=DISCOVERY_SPLIT, base_model='pretrained', dropout=0.7987, learning_rate=0.00009659)", "def main():\n\n experiment_config_path = _parse_input()\n all_experiments = read_experiments_config(experiment_config_path)\n\n for experiment_name, experiment_config in all_experiments.items():\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n results, model = perform_experiment(experiment_config)\n weights_file_name = save_model_weights(experiment_name, model)\n testing_layers_files = save_layers_logs(results['Layers Testing Output'], 'Testing')\n training_layers_files = save_layers_logs(results['Layers Training Output'], 'Training')\n\n results.pop('Layers Training Output')\n results.pop('Layers Testing Output')\n print(\"Testing Data Confusion Matrix\")\n print(np.array2string(results['Confusion Matrix']))\n results['Confusion Matrix'] = str(results['Confusion Matrix'].tolist())\n print(\"Experiment Results:\")\n print(json.dumps(results, indent=2, sort_keys=True))\n\n results_file = save_experiment_log(results, experiment_name)\n upload_to_s3([], [], [results_file], [weights_file_name], testing_layers_files + training_layers_files)", "def step(self):\n # Fast learning\n task_embedding = self._ilp.infer_task()\n\n # Posterior update\n #self._skip_flag = self._is_graph_same(task_embedding, self._prev_task_embedding)\n self._skip_flag = False # XXX do not skip test\n if not self._skip_flag:\n self._grprop.observe_task(task_embedding)\n self._prev_task_embedding = task_embedding\n else:\n print(\"skipping!\")", "def run(run_py, func):\n args = eval(\"test_args.{}\".format(func))\n print(args)\n\n res = {}\n\n default_args = {}\n for arg, value in args.items():\n default_args[arg] = value[0]\n\n current_args = dict2argstr(default_args)\n cmd = \"export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7; \\\n python -m paddle.distributed.launch --selected_gpus=0,1,2,3 {} {}\".format(\n run_py, current_args)\n status = os.system(cmd)\n if status != 0:\n res[cmd] = \"FAIL\"\n else:\n res[cmd] = \"SUCCESS\"\n cmd = \"rm -rf checkpoints\"\n os.system(cmd)\n\n for arg, value in args.items():\n if len(value) <= 1:\n continue\n current_args_dict = copy.deepcopy(default_args)\n for item in value[1:]:\n current_args_dict[arg] = item\n current_args = dict2argstr(current_args_dict)\n cmd = \"export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7; \\\n python -m paddle.distributed.launch --selected_gpus=0,1,2,3 {} {}\".format(\n run_py, current_args)\n status = os.system(cmd)\n if status != 0:\n res[cmd] = \"FAIL\"\n else:\n res[cmd] = \"SUCCESS\"\n cmd = \"rm -rf checkpoints\"\n os.system(cmd)\n\n total_num = len(res)\n fail_num = 0\n for cmd, status in res.items():\n if status == \"FAIL\":\n fail_num += 1\n print(\"-\" * 30)\n print(\"Failure Rate: {} / {}\".format(str(fail_num), str(total_num)))\n print(\"-\" * 30)\n print(\"Detail:\")\n for cmd, status in res.items():\n print(\"{} : {}\".format(status, cmd))", "def run_time_operation(self, learning_option, cluster):\r\n # get input\r\n # TODO: to be change \"inputs\" : ['input'] to \"inputs\": ['input', 'cell']\r\n cells_ = self.get_input('input')\r\n input_ = learning_option.get('static_rnn_input')\r\n #input_ = self.get_input('input')\r\n #indim = self.get_dimension('input')\r\n\r\n # get attr\r\n # optional field\r\n init = self.get_attr('initial_state', default=None)\r\n length = self.get_attr('length', default=None)\r\n scope = self.get_attr('scope', default='default')\r\n # TODO: tmp\r\n if scope is None:\r\n scope = self.name\r\n\r\n num_steps = learning_option.get('num_steps')\r\n is_train = learning_option.get('is_train')\r\n\r\n # get worker info: worker num, device type, device num\r\n device = self.get_attr('device')\r\n num = re.sub('[^0-9]', '', cluster.get('types')[device])\r\n type = cluster.get('types')[device].replace(str(num), '')\r\n\r\n # construct API\r\n def apiConstructor():\r\n batch_size = tf.cond(is_train, lambda: tf.constant(learning_option.get('batch_size'), dtype=tf.int32),\r\n lambda: tf.constant(learning_option.get('test_batch_size'), dtype=tf.int32))\r\n if init == 'ZERO': # WARNING: only support zero initial state in this version\r\n initial_state = cells_.zero_state(batch_size, tf.float32)\r\n else:\r\n initial_state = None\r\n learning_option['initial_state'] = initial_state\r\n\r\n input_unstack = tf.unstack(input_, num=num_steps, axis=1)\r\n output, state = tf.contrib.rnn.static_rnn(cells_, input_unstack, initial_state=initial_state,\r\n dtype=tf.float32, sequence_length=length)\r\n\r\n # set output\r\n self.set_output('output', output)\r\n self.set_output('state', state)\r\n\r\n with tf.variable_scope(self.name):\r\n # single node, model parallelism: explicit worker mapping\r\n # data parallelism: equally duplicate model\r\n if learning_option.get(\"parallel\", None) != \"DP\":\r\n with tf.device('/job:worker/task:{0}/{1}:{2}'.format(device, type, num)):\r\n apiConstructor()\r\n else:\r\n apiConstructor()", "def multi_gpu_test_retinanet_on_dataset(num_images, output_dir, dataset):\n # Retrieve the test_net binary path\n binary_dir = envu.get_runtime_dir()\n binary_ext = envu.get_py_bin_ext()\n binary = os.path.join(binary_dir, 'test_net' + binary_ext)\n assert os.path.exists(binary), 'Binary \\'{}\\' not found'.format(binary)\n\n # Run inference in parallel in subprocesses\n outputs = subprocess_utils.process_in_parallel(\n 'retinanet_detections', num_images, binary, output_dir)\n\n # Combine the results from each subprocess now\n boxes, scores, classes, image_ids = [], [], [], []\n for det_data in outputs:\n boxes.extend(det_data['boxes'])\n scores.extend(det_data['scores'])\n classes.extend(det_data['classes'])\n image_ids.extend(det_data['ids'])\n return boxes, scores, classes, image_ids,", "def compute_taskemb(args, train_dataset, model):\n tb_writer = SummaryWriter()\n\n train_sampler = RandomSampler(train_dataset)\n train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.batch_size)\n\n t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_epochs\n\n if args.finetune_feature_extractor and not args.finetune_classifier:\n raise ValueError(\"finetune_classifier should be True when finetune_feature_extractor is True.\")\n\n if args.finetune_classifier:\n model.train()\n\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = ['bias', 'LayerNorm.weight']\n\n if args.finetune_feature_extractor:\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n 'weight_decay': args.weight_decay},\n {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n 'weight_decay': 0.0}\n ]\n else:\n optimizer_grouped_parameters = [\n {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)\n and args.model_type not in n],\n 'weight_decay': args.weight_decay},\n {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)\n and args.model_type not in n],\n 'weight_decay': 0.0}\n ]\n\n optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,\n num_training_steps=t_total)\n\n else:\n model.eval()\n optimizer = None\n scheduler = None\n\n logger.info(\"***** Compute TaskEmb *****\")\n logger.info(\"Num examples = %d\", len(train_dataset))\n logger.info(\"Batch size = %d\", args.batch_size)\n\n total_num_examples = 0\n model.zero_grad()\n train_iterator = trange(int(args.num_epochs), desc=\"Epoch\", disable=False)\n set_seed(args) # Added here for reproductibility (even between python 2 and 3)\n global_feature_dict = {}\n for _ in train_iterator:\n num_examples = 0\n epoch_iterator = tqdm(train_dataloader, desc=\"Iteration\", disable=False)\n for step, batch in enumerate(epoch_iterator):\n model.train()\n batch = tuple(t.to(args.device) for t in batch)\n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'token_type_ids': batch[2],\n 'start_positions': batch[3],\n 'end_positions': batch[4]}\n\n outputs = model(**inputs)\n\n loss, start_logits, end_logits = outputs[0], outputs[1], outputs[2]\n\n input_mask = inputs['attention_mask']\n\n if not args.use_labels:\n feature_dict = compute_Fisher_no_labels(args, model, input_mask, start_logits, end_logits)\n else:\n feature_dict = compute_Fisher_with_labels(args, model, input_mask, loss)\n ###\n if len(global_feature_dict) == 0:\n for key in feature_dict:\n global_feature_dict[key] = feature_dict[key].detach().cpu().numpy()\n else:\n for key in feature_dict:\n global_feature_dict[key] += feature_dict[key].detach().cpu().numpy()\n\n if ((not args.use_labels) and args.finetune_classifier):\n model.zero_grad()\n loss.backward()\n\n if args.finetune_classifier:\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)\n scheduler.step() # Update learning rate schedule\n optimizer.step()\n\n model.zero_grad()\n num_examples += inputs['input_ids'].size(0)\n total_num_examples += num_examples\n\n # Normalize\n for key in global_feature_dict:\n global_feature_dict[key] = global_feature_dict[key] / total_num_examples\n\n # Save features\n for key in global_feature_dict:\n np.save(os.path.join(args.output_dir, '{}.npy'.format(key)), global_feature_dict[key])", "def test_run(self):\n sut = ExperimentEmail()\n train = os.path.join(os.path.dirname(__file__), \"data\", \"sample.csv\")\n val = os.path.join(os.path.dirname(__file__), \"data\", \"sample.csv\")\n outdir = tempfile.mkdtemp()\n\n # Act\n sut.run(train, val, outdir, batch_size=32, epochs=2)", "def Run(benchmark_spec):\n _UpdateBenchmarkSpecWithFlags(benchmark_spec)\n vm = benchmark_spec.vms[0]\n if benchmark_spec.tpus:\n # For MLPerf 1.0, the benchmake code of different hardware are different.\n if (benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-32' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-128' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-256' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-512' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-1024' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-2048'):\n run_path = (\n '$HOME/training_results_{version}/Google/benchmarks/{model}/tpu-{tpus}'\n .format(\n version=VERSION.value,\n model=benchmark_spec.benchmark,\n tpus=benchmark_spec.tpu_groups['train'].GetAcceleratorType()))\n code_path = (\n '$HOME/training_results_{version}/Google/benchmarks/{model}/implementations/tpu-{tpus}-{model}'\n .format(\n version=VERSION.value,\n model=benchmark_spec.benchmark,\n tpus=benchmark_spec.tpu_groups['train'].GetAcceleratorType()))\n\n if MASK in benchmark_spec.benchmark:\n model = 'mask_rcnn'\n elif GNMT in benchmark_spec.benchmark:\n model = 'nmt'\n else:\n model = benchmark_spec.benchmark\n\n mlperf_benchmark_cmd = (\n 'cd {code_path} && '\n 'export PYTHONPATH=$(pwd):$(pwd)/{model} && '\n 'cd {model} && '\n '{run_path}/run_and_time.sh'.format(\n code_path=code_path,\n model=model,\n run_path=run_path))\n\n if SSD in benchmark_spec.benchmark:\n mlperf_benchmark_cmd = (\n 'export '\n 'MLP_GCS_RESNET_CHECKPOINT={checkpoint}'\n ' && {cmd}'.format(\n checkpoint=FLAGS.mlperf_gcs_resnet_checkpoint,\n cmd=mlperf_benchmark_cmd))\n else:\n raise ValueError(\n 'MLPerf configurations do not support the hardware in PKB. PKB may '\n 'need to be updated if this is a new TPU type.')\n\n else:\n run_sub_paths = {RESNET: 'resnet/implementations/mxnet',\n TRANSFORMER: 'transformer/implementations/pytorch',\n MINIGO: 'minigo/implementations/tensorflow',\n MASK: 'maskrcnn/implementations/pytorch',\n GNMT: 'gnmt/implementations/pytorch',\n SSD: 'ssd/implementations/pytorch',\n BERT: 'bert/implementations/pytorch',}\n benchmark_path = f'$HOME/training_results_{VERSION.value}/NVIDIA/benchmarks'\n run_path = posixpath.join(benchmark_path,\n run_sub_paths[benchmark_spec.benchmark])\n env = {\n 'DGXSYSTEM': DGXSYSTEM,\n 'NEXP': 1,\n 'PULL': 0,\n 'LOGDIR': f'/tmp/{benchmark_spec.benchmark}',\n }\n envs = {\n RESNET: {},\n TRANSFORMER: {'DATADIR': '/data/wmt/utf8'},\n MINIGO: {'CONT': 'mlperf-nvidia:minigo'},\n MASK: {},\n GNMT: {'DATADIR': '/data/gnmt'},\n SSD: {'DATADIR': '/data'},\n BERT: {}\n }\n env.update(envs[benchmark_spec.benchmark])\n\n run_script = posixpath.join(run_path, 'run_with_docker.sh')\n vm_util.ReplaceText(vm, 'SYSLOGGING=1', 'SYSLOGGING=0', run_script)\n vm_util.ReplaceText(vm, 'docker exec -it', 'docker exec -t', run_script)\n if benchmark_spec.benchmark == RESNET:\n vm_util.ReplaceText(vm, r'mpirun.*run_and_time\\.sh',\n r'.\\/run_and_time.sh', run_script)\n\n env = ' '.join(f'{key}={value}' for key, value in env.items())\n if nvidia_driver.CheckNvidiaGpuExists(vm):\n env = f'{tensorflow.GetEnvironmentVars(vm)} {env}'\n\n mlperf_benchmark_cmd = (\n f'chmod 755 {run_script} && '\n f'cd {run_path} && '\n f'{env} {run_script}')\n\n samples = []\n metadata = _CreateMetadataDict(benchmark_spec)\n stdout, _ = vm.RobustRemoteCommand(mlperf_benchmark_cmd)\n if NONE in FLAGS.mlperf_profiler:\n samples.extend(\n MakeSamplesFromOutput(\n metadata,\n stdout,\n use_tpu=bool(benchmark_spec.tpus),\n model=benchmark_spec.benchmark))\n return samples", "def main():\n TOTAL_NUM = 500\n IMG_NAME = 'img'\n LABEL_NAME = 'label'\n\n img = fluid.layers.data(name=IMG_NAME, shape=[1, 28, 28], dtype='float32')\n # gradient should flow\n img.stop_gradient = False\n label = fluid.layers.data(name=LABEL_NAME, shape=[1], dtype='int64')\n logits = mnist_cnn_model(img)\n cost = fluid.layers.cross_entropy(input=logits, label=label)\n avg_cost = fluid.layers.mean(x=cost)\n\n # use CPU\n place = fluid.CPUPlace()\n # use GPU\n # place = fluid.CUDAPlace(0)\n exe = fluid.Executor(place)\n\n BATCH_SIZE = 1\n train_reader = paddle.batch(\n paddle.reader.shuffle(\n paddle.dataset.mnist.train(), buf_size=128 * 10),\n batch_size=BATCH_SIZE)\n\n test_reader = paddle.batch(\n paddle.reader.shuffle(\n paddle.dataset.mnist.test(), buf_size=128 * 10),\n batch_size=BATCH_SIZE)\n\n fluid.io.load_params(\n exe, \"./mnist/\", main_program=fluid.default_main_program())\n\n # advbox demo\n m = PaddleModel(\n fluid.default_main_program(),\n IMG_NAME,\n LABEL_NAME,\n logits.name,\n avg_cost.name, (-1, 1),\n channel_axis=1)\n attack = JSMA(m)\n attack_config = {\n \"max_iter\": 2000,\n \"theta\": 0.1,\n \"max_perturbations_per_pixel\": 7\n }\n\n # use train data to generate adversarial examples\n total_count = 0\n fooling_count = 0\n for data in train_reader():\n total_count += 1\n adversary = Adversary(data[0][0], data[0][1])\n\n # JSMA non-targeted attack\n adversary = attack(adversary, **attack_config)\n\n # JSMA targeted attack\n # tlabel = 0\n # adversary.set_target(is_targeted_attack=True, target_label=tlabel)\n # adversary = attack(adversary, **attack_config)\n\n # JSMA may return None\n if adversary is not None and adversary.is_successful():\n fooling_count += 1\n print(\n 'attack success, original_label=%d, adversarial_label=%d, count=%d'\n % (data[0][1], adversary.adversarial_label, total_count))\n # plt.imshow(adversary.target, cmap='Greys_r')\n # plt.show()\n # np.save('adv_img', adversary.target)\n else:\n print('attack failed, original_label=%d, count=%d' %\n (data[0][1], total_count))\n\n if total_count >= TOTAL_NUM:\n print(\n \"[TRAIN_DATASET]: fooling_count=%d, total_count=%d, fooling_rate=%f\"\n % (fooling_count, total_count,\n float(fooling_count) / total_count))\n break\n\n # use test data to generate adversarial examples\n total_count = 0\n fooling_count = 0\n for data in test_reader():\n total_count += 1\n adversary = Adversary(data[0][0], data[0][1])\n\n # JSMA non-targeted attack\n adversary = attack(adversary, **attack_config)\n\n # JSMA targeted attack\n # tlabel = 0\n # adversary.set_target(is_targeted_attack=True, target_label=tlabel)\n # adversary = attack(adversary, **attack_config)\n\n # JSMA may return None\n if adversary is not None and adversary.is_successful():\n fooling_count += 1\n print(\n 'attack success, original_label=%d, adversarial_label=%d, count=%d'\n % (data[0][1], adversary.adversarial_label, total_count))\n # plt.imshow(adversary.target, cmap='Greys_r')\n # plt.show()\n # np.save('adv_img', adversary.target)\n else:\n print('attack failed, original_label=%d, count=%d' %\n (data[0][1], total_count))\n\n if total_count >= TOTAL_NUM:\n print(\n \"[TEST_DATASET]: fooling_count=%d, total_count=%d, fooling_rate=%f\"\n % (fooling_count, total_count,\n float(fooling_count) / total_count))\n break\n print(\"jsma attack done\")", "def benchmark_xla_fake_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1,\n data_dir=self.fake_data_dir,\n data_name='imagenet',\n variable_update='parameter_server',\n xla=True)\n self._run_benchmark(params)", "def benchmark_xla_fake_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1,\n data_dir=self.fake_data_dir,\n data_name='imagenet',\n variable_update='parameter_server',\n xla=True)\n self._run_benchmark(params)", "def benchmark_xla_fake_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1,\n data_dir=self.fake_data_dir,\n data_name='imagenet',\n variable_update='parameter_server',\n xla=True)\n self._run_benchmark(params)", "def main(unused_argv):\n # Load data\n (train_adj, full_adj, train_feats, test_feats, y_train, y_val, y_test,\n train_mask, val_mask, test_mask, _, val_data, test_data, num_data,\n visible_data) = load_data(FLAGS.data_prefix, FLAGS.dataset, FLAGS.precalc)\n\n # Partition graph and do preprocessing\n if FLAGS.bsize > 1:\n _, parts = partition_utils.partition_graph(train_adj, visible_data,\n FLAGS.num_clusters)\n parts = [np.array(pt) for pt in parts]\n else:\n (parts, features_batches, support_batches, y_train_batches,\n train_mask_batches) = utils.preprocess(train_adj, train_feats, y_train,\n train_mask, visible_data,\n FLAGS.num_clusters,\n FLAGS.diag_lambda)\n\n (_, val_features_batches, val_support_batches, y_val_batches,\n val_mask_batches) = utils.preprocess(full_adj, test_feats, y_val, val_mask,\n np.arange(num_data),\n FLAGS.num_clusters_val,\n FLAGS.diag_lambda)\n\n (_, test_features_batches, test_support_batches, y_test_batches,\n test_mask_batches) = utils.preprocess(full_adj, test_feats, y_test,\n test_mask, np.arange(num_data),\n FLAGS.num_clusters_test,\n FLAGS.diag_lambda)\n idx_parts = list(range(len(parts)))\n\n # Some preprocessing\n model_func = models.GCN\n\n # Define placeholders\n placeholders = {\n 'support':\n tf.sparse_placeholder(tf.float32),\n 'features':\n tf.placeholder(tf.float32),\n 'labels':\n tf.placeholder(tf.float32, shape=(None, y_train.shape[1])),\n 'labels_mask':\n tf.placeholder(tf.int32),\n 'dropout':\n tf.placeholder_with_default(0., shape=()),\n 'num_features_nonzero':\n tf.placeholder(tf.int32) # helper variable for sparse dropout\n }\n\n # Create model\n model = model_func(\n placeholders,\n input_dim=test_feats.shape[1],\n logging=True,\n multilabel=FLAGS.multilabel,\n norm=FLAGS.layernorm,\n precalc=FLAGS.precalc,\n num_layers=FLAGS.num_layers)\n\n # Initialize session\n sess = tf.Session()\n tf.set_random_seed(seed)\n\n # Init variables\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n cost_val = []\n total_training_time = 0.0\n # Train model\n for epoch in range(FLAGS.epochs):\n t = time.time()\n np.random.shuffle(idx_parts)\n if FLAGS.bsize > 1:\n (features_batches, support_batches, y_train_batches,\n train_mask_batches) = utils.preprocess_multicluster(\n train_adj, parts, train_feats, y_train, train_mask,\n FLAGS.num_clusters, FLAGS.bsize, FLAGS.diag_lambda)\n for pid in range(len(features_batches)):\n # Use preprocessed batch data\n features_b = features_batches[pid]\n support_b = support_batches[pid]\n y_train_b = y_train_batches[pid]\n train_mask_b = train_mask_batches[pid]\n # Construct feed dictionary\n feed_dict = utils.construct_feed_dict(features_b, support_b, y_train_b,\n train_mask_b, placeholders)\n feed_dict.update({placeholders['dropout']: FLAGS.dropout})\n # Training step\n outs = sess.run([model.opt_op, model.loss, model.accuracy],\n feed_dict=feed_dict)\n else:\n np.random.shuffle(idx_parts)\n for pid in idx_parts:\n # Use preprocessed batch data\n features_b = features_batches[pid]\n support_b = support_batches[pid]\n y_train_b = y_train_batches[pid]\n train_mask_b = train_mask_batches[pid]\n # Construct feed dictionary\n feed_dict = utils.construct_feed_dict(features_b, support_b, y_train_b,\n train_mask_b, placeholders)\n feed_dict.update({placeholders['dropout']: FLAGS.dropout})\n # Training step\n outs = sess.run([model.opt_op, model.loss, model.accuracy],\n feed_dict=feed_dict)\n\n total_training_time += time.time() - t\n print_str = 'Epoch: %04d ' % (epoch + 1) + 'training time: {:.5f} '.format(\n total_training_time) + 'train_acc= {:.5f} '.format(outs[2])\n\n # Validation\n if FLAGS.validation:\n cost, acc, micro, macro = evaluate(sess, model, val_features_batches,\n val_support_batches, y_val_batches,\n val_mask_batches, val_data,\n placeholders)\n cost_val.append(cost)\n print_str += 'val_acc= {:.5f} '.format(\n acc) + 'mi F1= {:.5f} ma F1= {:.5f} '.format(micro, macro)\n\n tf.logging.info(print_str)\n\n if epoch > FLAGS.early_stopping and cost_val[-1] > np.mean(\n cost_val[-(FLAGS.early_stopping + 1):-1]):\n tf.logging.info('Early stopping...')\n break\n\n tf.logging.info('Optimization Finished!')\n\n # Save model\n saver.save(sess, FLAGS.save_name)\n\n # Load model (using CPU for inference)\n with tf.device('/cpu:0'):\n sess_cpu = tf.Session(config=tf.ConfigProto(device_count={'GPU': 0}))\n sess_cpu.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n saver.restore(sess_cpu, FLAGS.save_name)\n # Testing\n test_cost, test_acc, micro, macro = evaluate(\n sess_cpu, model, test_features_batches, test_support_batches,\n y_test_batches, test_mask_batches, test_data, placeholders)\n print_str = 'Test set results: ' + 'cost= {:.5f} '.format(\n test_cost) + 'accuracy= {:.5f} '.format(\n test_acc) + 'mi F1= {:.5f} ma F1= {:.5f}'.format(micro, macro)\n tf.logging.info(print_str)", "def tf_mlp_multiclass_clf(X_train , y_train, Gazes_train,\n X_val, y_val, Gazes_val,\n X_test, y_test, Gazes_test,\n SAVE_PATH, MAX_NB_ITER, BATCH_SIZE, trainbool, category, category_number):\n with tf.Graph().as_default(): \n batch_X_train, batch_y_train, batch_Gazes_train, batch_X_val, batch_y_val, batch_Gazes_val = \\\n myio.batch_batch(X_train, y_train, Gazes_train, X_val, y_val, Gazes_val, BATCH_SIZE) \n #total number of instances for one example\n instance_number = np.shape(X_test)[1]\n def save_model(saver,sess,save_path):\n path = saver.save(sess, save_path)\n print 'model save in %s'%path\n def model(x, w_h, w_o, b_h, b_o):\n h = tf.nn.relu(tf.matmul(x, w_h)+b_h)\n output = tf.nn.softmax(tf.matmul(h, w_o) + b_o)\n \n return output\n def evaluation(X, y, gaze_pred, sess):\n X = np.reshape(X, [-1, 2048])\n gaze_pred = sess.run(gaze_pred, feed_dict={x:X})\n gaze_pred = np.reshape(gaze_pred, [-1, instance_number*category_number])\n# ap=metric.getAP(zip(y, np.min(gaze_pred,axis=1)))\n label_pred = np.argmax(gaze_pred,axis=1)%category_number\n y = np.where(y==1)[1]\n\n if np.shape(y)[0] == np.shape(label_pred)[0] * instance_number:\n \"\"\"The training dataset needs to reshape label vector,\n ,but the testset (organised as a bag) does not need this.\"\"\"\n y = y[::instance_number]\n return accuracy_score(label_pred, y)\n # lr is just X*w so this model line is pretty simple\n W1 = tf.Variable(tf.random_normal([2048, 1000], stddev=0.01,dtype=tf.float64))\n W2 = tf.Variable(tf.random_normal([1000, category_number],stddev=0.01,dtype=tf.float64))\n b1 = tf.Variable(tf.zeros([1, 1000],dtype=tf.float64))\n b2 = tf.Variable(tf.zeros([1, category_number],dtype=tf.float64))\n # w = tf.get_variable(\"w1\", [28*28, 10])\n x = tf.placeholder(tf.float64, shape=[None, 2048],name=\"input\")\n y_ = tf.placeholder(tf.float64, shape=[None, category_number],name=\"gt_output\")\n gaze_ = tf.placeholder(tf.float64, shape=[None, category_number],name=\"gaze\")\n gaze_pred = model(x, W1, W2, b1, b2)\n \n# regularizers = (tf.nn.l2_loss(W1) + tf.nn.l2_loss(W2) +\n# tf.nn.l2_loss(b1) + tf.nn.l2_loss(b2))\n# cross entropy \n \n loss_mean = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(gaze_pred), reduction_indices=[1]))\n# L2-loss\n# loss_mean = tf.reduce_mean(tf.pow(gaze_-gaze_pred, 2))\n# loss_mean += regularizers*5e-4\n train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(loss_mean)\n\n best_val_acc= -1\n EARLY_STOP_PATIENCE=10\n current_epoch=0\n min_nb_iter=50\n saver = tf.train.Saver()\n init_op = tf.initialize_all_variables()\n # Launch the graph in a session\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)\n with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:\n with tf.device('/cpu:0'):\n if trainbool:\n sess.run(init_op)\n print \"begin training\"\n for i in range(MAX_NB_ITER):\n mean_train_lm=0\n batches = range(np.shape(batch_X_train)[0])\n random.shuffle(batches)\n for j in batches:\n _, loss_train = sess.run([train_op, loss_mean], feed_dict={x:batch_X_train[j],\n gaze_:batch_Gazes_train[j],\n y_:batch_y_train[j]})\n mean_train_lm += loss_train\n \n print \"epoch:%d, mean_train_loss:%f\"%(i, mean_train_lm/np.shape(batch_X_train)[0])\n loss_val = sess.run(loss_mean, feed_dict={x:batch_X_val[0],\n gaze_:batch_Gazes_val[0],\n y_:batch_y_val[0]})\n print \"epoch:%d, mean_val_loss:%f\"%(i, loss_val)\n train_acc = evaluation(X_train, y_train, gaze_pred, sess)\n print \"epoch:%d, acc_train:%f\"%(i, train_acc,)\n val_acc = evaluation(X_val, y_val, gaze_pred, sess)\n print \"epoch:%d, acc_val:%f\"%(i, val_acc)\n test_acc = evaluation(X_test, y_test, gaze_pred, sess)\n print \"epoch:%d, acc_test:%f\"%(i, test_acc,)\n \n if val_acc > best_val_acc:\n print \"save model of epoch:%d\"%i\n current_epoch = i\n best_val_acc = val_acc\n save_model(saver, sess, SAVE_PATH)\n elif i > min_nb_iter and (i - current_epoch) >= EARLY_STOP_PATIENCE:\n print 'early stopping at epoch %d'%i\n break \n \n f= open(\"/local/wangxin/results/upmc_food/tf_mlp_multiclass_clf/ap_res.txt\",\"a+\")\n f.write(\" \".join([SAVE_PATH, category, str(train_acc), str(val_acc), str(test_acc)])+'\\n')\n f.close()\n if not trainbool:\n print \"load model from %s\"%SAVE_PATH\n saver.restore(sess, SAVE_PATH)", "def create_cub200_task_distribution(path_to_pkl,\n num_training_samples_per_class=10,\n num_test_samples_per_class=15,\n num_training_classes=20,\n meta_batch_size=5):\n\n global cub200_trainX\n global cub200_trainY\n\n global cub200_valX\n global cub200_valY\n\n global cub200_testX\n global cub200_testY\n\n\n with open(path_to_pkl, 'rb') as f:\n d = pickle.load(f)\n cub200_X, cub200_Y = d['dataset']\n\n cub200_X = cub200_X.astype(np.float32) / 255.0\n cub200_X = (cub200_X - np.asarray((0.4914, 0.4822, 0.4465))) / np.asarray((0.2023, 0.1994, 0.2010))\n\n #\n # TODO\n # random horiz flip + normalize by: \n # transforms.Normalize((0.4914, 0.4822, 0.4465),\n # (0.2023, 0.1994, 0.2010)) (mean, std)\n\n\n\n #np.random.seed(0)\n # TODO: shuffle allocation of class indices to train/val/test\n num_train = 100\n num_val = 50\n num_test = 50\n\n classes = list(set(cub200_Y))\n train_classes = classes[:num_train]\n val_classes = classes[num_train:(num_train+num_val)]\n test_classes = classes[(num_train+num_val):]\n\n train_indices = []\n val_indices = []\n test_indices = []\n\n for i in range(len(cub200_Y)):\n if cub200_Y[i] in train_classes:\n train_indices.append(i)\n elif cub200_Y[i] in val_classes:\n val_indices.append(i)\n elif cub200_Y[i] in test_classes:\n test_indices.append(i)\n\n cub200_trainX = cub200_X[train_indices]\n cub200_trainY = cub200_Y[train_indices]\n\n cub200_valX = cub200_X[val_indices]\n cub200_valY = cub200_Y[val_indices]\n\n cub200_testX = cub200_X[test_indices]\n cub200_testY = cub200_Y[test_indices]\n\n\n train_tasks_list = [ClassificationTask(cub200_trainX,\n cub200_trainY,\n num_training_samples_per_class,\n num_test_samples_per_class,\n num_training_classes,\n split_train_test=0.5)]\n\n # TODO: NOTE: HACK -- validation and test tasks use a fixed number of test-set samples, instead of the supplied\n # ones. This is because in MAML/FOMAML the test set is used to compute the meta-gradient, and a small number of\n # samples is used (in the philosophy of few-shot learning, where only few samples are available).\n # However, in this case we wish to use a few more test-samples to better estimate the accuracy of the model on the validation\n # and test tasks!\n num_test_samples_per_class = 20\n validation_tasks_list = [ClassificationTask(cub200_valX,\n cub200_valY,\n num_training_samples_per_class,\n num_test_samples_per_class,\n num_training_classes,\n split_train_test=0.5)]\n\n test_tasks_list = [ClassificationTask(cub200_valX,\n cub200_valY,\n num_training_samples_per_class,\n num_test_samples_per_class,\n num_training_classes,\n split_train_test=0.5)]\n\n metatrain_task_distribution = TaskDistribution(tasks=train_tasks_list,\n task_probabilities=[1.0],\n batch_size=meta_batch_size,\n sample_with_replacement=True,\n use_classes_only_once=True)\n\n metaval_task_distribution = TaskDistribution(tasks=validation_tasks_list,\n task_probabilities=[1.0],\n batch_size=meta_batch_size,\n sample_with_replacement=True,\n use_classes_only_once=True)\n\n metatest_task_distribution = TaskDistribution(tasks=test_tasks_list,\n task_probabilities=[1.0],\n batch_size=meta_batch_size,\n sample_with_replacement=True,\n use_classes_only_once=True)\n\n return metatrain_task_distribution, metaval_task_distribution, metatest_task_distribution", "def benchmark_xla_fakedistort_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1,\n data_dir=self.fake_data_dir,\n data_name='imagenet',\n distortions=True,\n variable_update='parameter_server',\n xla=True)\n self._run_benchmark(params)", "def finetuned():\n launch_training_on_all_splits(experiment='full', splits=NAME_SPLIT, base_model='ft', dropout=0.7304, learning_rate=0.0000976)", "def test_pyt_multitask(self):\n\n def run_display_test(defaults, ep_and_ex_counts):\n with testing_utils.capture_output() as f:\n parser = display_setup_args()\n parser.set_defaults(**defaults)\n opt = parser.parse_args()\n display_data(opt)\n str_output = f.getvalue()\n self.assertTrue(\n '[ loaded {} episodes with a total of {} examples ]'.format(\n ep_and_ex_counts[0], ep_and_ex_counts[1]\n ) in str_output,\n 'PytorchDataTeacher multitasking failed with '\n 'following args: {}'.format(opt)\n )\n\n task1 = 'babi:task1k:1'\n task2 = 'babi:task1k:2'\n dataset1 = 'flickr30k'\n dataset2 = 'vqa_v1'\n\n # Expected example and episode counts\n eps_and_exs_counts = [\n (1800, 1800),\n (1080, 1800),\n (29900, 29900),\n (29180, 29900),\n (277349, 277349)\n ]\n defaults = parser_defaults.copy()\n\n # 1.\n defaults['pytorch_teacher_task'] = '{},{}'.format(task1, task2)\n run_display_test(defaults, eps_and_exs_counts[0])\n\n # 2.\n defaults['pytorch_teacher_task'] = task1\n defaults['task'] = task2\n run_display_test(defaults, eps_and_exs_counts[1])\n\n # 3.\n del defaults['task']\n defaults['pytorch_teacher_dataset'] = dataset1\n run_display_test(defaults, eps_and_exs_counts[2])\n\n # 4.\n del defaults['pytorch_teacher_task']\n defaults['task'] = task1\n run_display_test(defaults, eps_and_exs_counts[3])\n\n # 5.\n del defaults['task']\n defaults['pytorch_teacher_dataset'] = '{},{}'.format(dataset1, dataset2)\n run_display_test(defaults, eps_and_exs_counts[4])", "def Experiment1(train_x,train_y,test_x,test_y,task):\r\n lambda_r = np.array(np.arange(0,151,1))\r\n if(task=='1'):\r\n #Task1: Effects of regularization parameters\r\n Exp1_task1(lambda_r,train_x,train_y,test_x,test_y)\r\n if(task=='2'):\r\n #Task2: Effects of No.of examples\r\n Exp1_task2(lambda_r,train_x,train_y,test_x,test_y)", "def test(neuralnet, dataloader):\n neuralnet.eval()\n batch_transform = data.BatchTransform()\n\n idx = 0\n for iteration, batch in enumerate(dataloader):\n with torch.no_grad():\n im = batch[0].requires_grad_(False).to(DEVICE)\n keypts = batch[1].requires_grad_(False).to(DEVICE)\n\n deformed_batch = batch_transform.exe(im, landmarks=keypts)\n im, future_im, mask = deformed_batch['image'], deformed_batch['future_image'], deformed_batch['mask']\n\n future_im_pred, gauss_mu, _ = neuralnet(im, future_im)\n\n predict = future_im_pred.data.cpu().numpy().transpose(0, 2, 3, 1)\n gauss_mu = gauss_mu.data.cpu().numpy()\n # gauss_map = gauss_map.data.cpu().numpy()\n future_im = future_im.data.cpu().numpy().transpose(0, 2, 3, 1)\n\n os.makedirs('testcheck', exist_ok=True)\n fig_path = path.join('testcheck', 'fig_{}.png'.format(iteration))\n utils.savegrid(fig_path, future_im, predict, gauss_mu=gauss_mu, name='deform')\n\n idx += im.shape[0]\n\n neuralnet.train()\n return idx", "def test_all_tf_execution_regimes(test_case):\n if BACKEND == 'backend_tensorflow':\n return test_util.test_all_tf_execution_regimes(test_case)\n else:\n return test_case", "def simulate_run(run, maker, all_data, train_mask, test_mask, instances, independent, mixture):\n\n train_data = all_data.masked(train_mask)\n test_data = all_data.masked(test_mask)\n\n if instances is not None:\n ids = sorted(train_data.run_lists, key = lambda _: numpy.random.rand())[:instances]\n train_data = train_data.filter(*ids)\n\n if independent:\n train_data = train_data.collect_independent(mixture).only_nonempty()\n else:\n train_data = train_data.collect_systematic(mixture).only_nonempty()\n\n budget = test_data.common_budget\n #budget = test_data.common_budget / 2 # XXX\n suite = borg.fake.FakeSuite(test_data)\n\n if maker.subname == \"preplanning-dir\":\n model_kwargs = {\"K\": 64}\n\n if \"set_alpha\" in maker.variants:\n model_kwargs[\"alpha\"] = 1e-2\n else:\n model_kwargs = {}\n\n solver = maker(suite, train_data, model_kwargs = model_kwargs)\n successes = []\n\n for (i, instance_id) in enumerate(test_data.run_lists):\n logger.info(\"simulating run %i/%i on %s\", i, len(test_data), instance_id)\n\n with suite.domain.task_from_path(instance_id) as instance:\n with borg.accounting() as accountant:\n answer = solver.start(instance).run_then_stop(budget)\n\n succeeded = suite.domain.is_final(instance, answer)\n\n logger.info(\n \"%s %s on %s (%.2f CPU s)\",\n maker.name,\n \"succeeded\" if succeeded else \"failed\",\n os.path.basename(instance),\n accountant.total.cpu_seconds,\n )\n\n if succeeded:\n successes.append(accountant.total.cpu_seconds)\n\n logger.info(\n \"%s had %i successes over %i instances\",\n maker.name,\n len(successes),\n len(test_data),\n )\n\n description = \"{0} ({1})\".format(mixture, \"Sep.\" if independent else \"Sys.\")\n\n return (\n description,\n maker.name,\n instances,\n len(successes),\n numpy.mean(successes),\n numpy.median(successes),\n )", "def main(config_file):\n \n # Load the configuration from json file\n assert os.path.isfile(\n config_file), \"No json configuration file found at {}\".format(config_file)\n config = utils.LoadConfig(config_file)\n\n # use GPU if available\n config.cuda = torch.cuda.is_available()\n\n # Set the random seed for reproducible experiments\n torch.manual_seed(config.general['seed'])\n if config.cuda:\n torch.cuda.manual_seed(config.general['seed'])\n \n #Generate output path if it does not exist\n out_dir = config.general['out_dir']\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n \n #Save config file\n config.save(os.path.join(out_dir, 'experiment_config.json'))\n\n # Set the logger\n utils.set_logger(os.path.join(out_dir, 'train.log'))\n\n # Create the input data pipeline\n logging.info(\"Loading the datasets...\")\n\n # Load data\n train, test = read_and_format_full_dataset()\n train_kaggle, test_kaggle = read_and_format_kaggle_dataset()\n \n #Using kaggle's training data for training\n train, val = split_train_val_partition(train_kaggle, config.data['split_train_percentage'],config.general['seed'])\n \n #Adding data augmentation to training\n # train = MNISTDatasetLabels(train,\n # transform=transforms.Compose([\n # Normalization(),\n # transforms.RandomHorizontalFlip(0.5),\n # transforms.RandomVerticalFlip(0.5),\n # transforms.RandomPerspective(),\n # transforms.RandomRotation(30)])) \n \n train = MNISTDatasetLabels(train,\n transform=transforms.Compose([\n Normalization(),\n transforms.RandomRotation(15)])) \n \n val = MNISTDatasetLabels(val,\n transform=transforms.Compose([Normalization()])) \n \n test = MNISTDatasetLabels(test,\n transform=transforms.Compose([Normalization()])) \n \n test_kaggle = MNISTDatasetNoLabels(test_kaggle,\n transform=transforms.Compose([Normalization()])) \n \n train_dataloader = DataLoader(train, batch_size=config.CNN_train['batch_size'], shuffle=True, num_workers=config.CNN_train['num_workers'])\n val_dataloader = DataLoader(val, batch_size=config.CNN_train['batch_size'], shuffle=True, num_workers=config.CNN_train['num_workers'])\n test_dataloader = DataLoader(test, batch_size=config.CNN_train['batch_size'], shuffle=False, num_workers=config.CNN_train['num_workers'])\n test_kaggle_dataloader = DataLoader(test_kaggle, batch_size=config.CNN_train['batch_size'], shuffle=False, num_workers=config.CNN_train['num_workers'])\n\n logging.info(\"- done.\")\n \n # Train the model\n logging.info(\"Starting training for {} epoch(s)\".format(config.CNN_train['num_epochs']))\n train_wraper(train_dataloader, val_dataloader, config)\n logging.info(\"- done.\")\n \n #Evaluate the model test set \n # Using Kaggle's test set unknown labels (can have true labels or not (Kaggle's case))\n logging.info(\"Starting the model evaluation on Kaggle's test data\")\n eval_out_kaggle = evaluate_return_labels(test_kaggle_dataloader, config)\n #Save the results\n eval_out_kaggle.to_csv(os.path.join(out_dir, 'test_result_kaggle.csv'),index=False)\n logging.info(\"- done.\")\n \n # Using test set with known labels\n logging.info(\"Starting the model evaluation on test data\")\n eval_out = evaluate_return_labels(test_dataloader, config)\n #Save the results\n eval_out.to_csv(os.path.join(out_dir, 'test_result.csv'),index=False)\n logging.info(\"- done.\")\n \n # Compute metrics\n if 'TrueLabel' in eval_out:\n #Evaluate the model with test set (known labels)\n logging.info(\"Calculating final metrics\")\n # Get unique true labels in dataset\n classes = eval_out.TrueLabel.unique()\n # Sort them\n classes.sort()\n # Calculate accuracy\n accuracy_total = accuracy(eval_out)\n # Calculate error rate\n error_rate_total = error_rate(eval_out)\n # Confussion matrix\n c_matrix = confusion_matrix(eval_out, classes)\n plot_confusion_matrix(c_matrix, classes, 'CNN', out_dir)\n # Overall metrics\n metrics_per_class, metrics_overall = confusion_matrix_metrics(c_matrix)\n metrics_overall['accuracy_percent'] = accuracy_total\n metrics_overall['error_rate_percent'] = error_rate_total\n \n metrics_per_class.to_csv(os.path.join(out_dir, 'CNN_results_per_class.csv'))\n metrics_overall.to_csv(os.path.join(out_dir, 'CNN_results_overall.csv'))\n \n logging.info(\"- done.\")", "def fewshot_eval_model(experiment_name, task_name, mt, eval_data, batch_size, \n k=0, random_seed=0, n=None, prompt_data=None, \n instructions=None, answers=None, template_id=0, cot_reasons=None,\n max_decode_steps=128, extract_answers=None,\n trigger_phrase=None,\n print_examples=0, print_all_wrong=False):\n # argument checks\n if k > 0 and prompt_data is None: \n assert len(prompt_data) >= 1, f\"need to provide prompt data of at least len {k}\"\n # define stats\n n_correct = 0\n n_str_em = 0\n n_datapoints = 0\n all_preds = []\n all_labels = []\n # task specific info\n task_name_to_hendrycks_em_group_by = {\n 'commonsense': 1,\n 'deontology': 4,\n 'justice': 4,\n 'utilitarianism': 1,\n 'virtue': 1, # we treat as multiple choice\n 'trolley' : 1,\n 'factual' : 1,\n 'counterfact' : 1,\n }\n if 'virtue' in task_name:\n assert answers is None, \"do not use answers with virtue subset\"\n if answers and not extract_answers:\n extract_answers = answers\n # subsample eval data if requested\n if n is not None:\n eval_data_loop = eval_data.sample(n=n, random_state=random_seed, replace=False)\n else:\n eval_data_loop = eval_data\n # begin eval loop\n # calculate query batch size based on if len(inputs) * len(answers) can fit in BATCH_SIZE query to model\n effective_batch_size = batch_size if not answers else batch_size // len(extract_answers)\n n_chunks = np.ceil(len(eval_data_loop) / effective_batch_size)\n for batch_num, batch in enumerate(np.array_split(eval_data_loop, n_chunks)):\n if batch_num > 0:\n running_acc = n_correct / n_datapoints \n check_answers = extract_answers if answers is None else answers\n prop_invalid_preds = compute_prop_invalid_preds(all_preds, check_answers)\n start = '\\r' # '\\n' if batch_num < 3 else \n print(f\"{start}Batch {batch_num-1} | Acc: {100*running_acc:.2f} | Invalid: {100*prop_invalid_preds:.2f}\", end=\"\")\n # make inputs and labels:\n query_inputs = []\n for test_input in batch.input:\n query_input = format_prompt_from_df(prompt_data, test_input, answers=answers, instructions=instructions, cot_reasons=cot_reasons, separator='\\n', template_id=template_id)\n query_inputs.append(query_input)\n labels = batch.label_str\n # make multiple choice answers for virtue\n if 'virtue' in task_name:\n answers = []\n for answer_list in batch.answers:\n answers.append(answer_list.split(','))\n answers = np.array(answers)\n # query model. query inputs may be editing when doing chain_of_thought multiple choice\n with torch.no_grad():\n preds, scores, query_inputs = predict_model(mt, \n query_inputs, \n answers, \n trigger_phrase=trigger_phrase, \n max_decode_steps=max_decode_steps)\n # record stats\n # first case is when we are generating predictions and extracting answers from them\n if answers is None and extract_answers is not None:\n batch_n_correct, correct_vec = first_appearance_fewshot_accuracy_sum(preds, labels, \n extract_answers=extract_answers, \n trigger_phrase=trigger_phrase,\n return_vec=True)\n else:\n batch_n_correct, correct_vec = fewshot_accuracy_sum(preds, labels, return_vec=True)\n n_correct += batch_n_correct\n n_str_em += em_accuracy_sum(preds, labels)\n n_datapoints += len(batch)\n all_preds.extend(list(preds))\n all_labels.extend(list(labels))\n if (print_examples>0 and batch_num == 0):\n print_idx = np.arange(min(print_examples, len(batch)))\n elif print_all_wrong:\n print_idx = np.argwhere(1-correct_vec).reshape(-1)\n else:\n print_idx = np.array([])\n if len(print_idx) > 0:\n print(f\"\\nExamples from batch {batch_num}...\")\n print(\"--------\")\n for i in print_idx:\n print(f\"Example {i}\")\n print(f\"point: \\n{batch.input.iloc[i]}\")\n print(f\"prompt: \\n{query_inputs[i]}\")\n print(\"pred: \", preds[i])\n print(\"label: \", labels.iloc[i])\n if isinstance(answers, np.ndarray):\n print(\"anwers: \", answers[i])\n print(\"exact scores: \", scores[i])\n print(\"correct: \", correct_vec[i])\n if 'completion' in batch.columns:\n print(\"gpt completion: \", batch.completion.iloc[i])\n print(\"--------\")\n print(f\"Examples acc: {correct_vec[print_idx].mean():.2f}\")\n print(\"--------\\n\")\n del batch, preds, labels, scores\n # calculate EM from Hendrycks et al paper\n group_by = task_name_to_hendrycks_em_group_by[task_name]\n hendrycks_em = get_hendrycks_em(all_preds, all_labels, answers, group_by)\n # make df with results\n results_dict = {\n 'exp_name' : experiment_name,\n 'task_name' : task_name,\n 'k' : k,\n 'n' : n,\n 'seed' : random_seed,\n 'acc' : n_correct / n_datapoints,\n 'acc_em' : n_str_em / n_datapoints,\n 'hendrycks_em': hendrycks_em,\n 'prop_invalid': compute_prop_invalid_preds(all_preds, answers)\n }\n results = pd.DataFrame.from_dict({k : [v] for k,v in results_dict.items()})\n print(\"\\nRESULTS:\")\n for k,v in results_dict.items():\n if any([x in k for x in ['acc', 'em', 'prop']]):\n v = f\"{100*v:.2f}\"\n print(f\" {k}: {str(v):10s}\")\n return results", "def main():\n grid_tester_cpu = GridTesterCPU()\n\n # parse args, load configuration and create all required objects.\n grid_tester_cpu.setup_grid_experiment()\n\n # GO!\n grid_tester_cpu.run_grid_experiment()", "def test_net(args, dataset_name, proposal_file, output_dir, ind_range=None, gpu_id=0, early_stop=False):\n # print('test_net')\n roidb, dataset, start_ind, end_ind, total_num_images = get_roidb_and_dataset(dataset_name, proposal_file, ind_range)\n model = initialize_model_from_cfg(args, gpu_id=gpu_id)\n num_images = len(roidb)\n num_classes = cfg.MODEL.NUM_CLASSES\n all_boxes = {}\n\n timers = defaultdict(Timer)\n \n \n\n\n if 'train' in dataset_name:\n if ind_range is not None:\n det_name = 'discovery_range_%s_%s.pkl' % tuple(ind_range)\n else:\n det_name = 'discovery.pkl'\n else:\n if ind_range is not None:\n det_name = 'detection_range_%s_%s.pkl' % tuple(ind_range)\n else:\n det_name = 'detections.pkl'\n \n det_file = os.path.join(output_dir, det_name)\n if os.path.exists(det_file):\n print('the file', det_file, 'exists. I am loading detections from it...')\n return load_object(det_file)['all_boxes']\n\n for i, entry in enumerate(roidb):\n if early_stop and i > 10: break\n\n box_proposals = entry['boxes']\n if len(box_proposals) == 0:\n continue\n \n im = cv2.imread(entry['image'])\n # print(entry['image'])\n cls_boxes_i = im_detect_all(model, im, box_proposals, timers)\n\n all_boxes[entry['image']] = cls_boxes_i\n\n if i % 10 == 0: # Reduce log file size\n ave_total_time = np.sum([t.average_time for t in timers.values()])\n eta_seconds = ave_total_time * (num_images - i - 1)\n eta = str(datetime.timedelta(seconds=int(eta_seconds)))\n \n det_time = (timers['im_detect_bbox'].average_time)\n \n logger.info(('im_detect: range [{:d}, {:d}] of {:d}:{:d}/{:d} {:.3f}s (eta: {})').format(\n start_ind + 1, end_ind, total_num_images, start_ind + i + 1, start_ind + num_images, det_time, eta))\n\n cfg_yaml = yaml.dump(cfg)\n\n save_object(\n dict(\n all_boxes=all_boxes,\n cfg=cfg_yaml\n ), det_file\n )\n logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))\n return all_boxes", "def setup_gpu_and_random(config):\n random.seed(config.general.manualSeed)\n np.random.seed(config.general.manualSeed)\n torch.manual_seed(config.general.manualSeed)\n torch.cuda.manual_seed(config.general.manualSeed)\n\n cudnn.benchmark = True\n cudnn.deterministic = True\n config.num_gpu = torch.cuda.device_count()\n\n if config.num_gpu > 1:\n print('------ Use multi-GPU setting ------')\n print('if you stuck too long time with multi-GPU setting, try to set --workers 0')\n # check multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1\n config.workers = config.workers * config.num_gpu\n config.batch_size = config.batch_size * config.num_gpu\n\n \"\"\" previous version\n print('To equlize batch stats to 1-GPU setting, the batch_size is multiplied with num_gpu and multiplied batch_size is ', opt.batch_size)\n opt.batch_size = opt.batch_size * opt.num_gpu\n print('To equalize the number of epochs to 1-GPU setting, num_iter is divided with num_gpu by default.')\n If you dont care about it, just commnet out these line.)\n opt.num_iter = int(opt.num_iter / opt.num_gpu)\n \"\"\"", "def main(_) -> None:\n params = train_utils.parse_configuration(FLAGS)\n mode = FLAGS.mode\n model_dir = FLAGS.model_dir\n if 'train' in FLAGS.mode:\n # Pure eval modes do not output yaml files. Otherwise continuous eval job\n # may race against the train job for writing the same file.\n train_utils.serialize_config(params, model_dir)\n\n if FLAGS.seed is not None:\n logging.info('Setting tf seed.')\n tf.random.set_seed(FLAGS.seed)\n\n task = RankingTask(\n params=params.task,\n optimizer_config=params.trainer.optimizer_config,\n logging_dir=model_dir,\n steps_per_execution=params.trainer.steps_per_loop,\n name='RankingTask')\n\n enable_tensorboard = params.trainer.callbacks.enable_tensorboard\n\n strategy = distribute_utils.get_distribution_strategy(\n distribution_strategy=params.runtime.distribution_strategy,\n all_reduce_alg=params.runtime.all_reduce_alg,\n num_gpus=params.runtime.num_gpus,\n tpu_address=params.runtime.tpu)\n\n with strategy.scope():\n model = task.build_model()\n\n def get_dataset_fn(params):\n return lambda input_context: task.build_inputs(params, input_context)\n\n train_dataset = None\n if 'train' in mode:\n train_dataset = strategy.distribute_datasets_from_function(\n get_dataset_fn(params.task.train_data),\n options=tf.distribute.InputOptions(experimental_fetch_to_device=False))\n\n validation_dataset = None\n if 'eval' in mode:\n validation_dataset = strategy.distribute_datasets_from_function(\n get_dataset_fn(params.task.validation_data),\n options=tf.distribute.InputOptions(experimental_fetch_to_device=False))\n\n if params.trainer.use_orbit:\n with strategy.scope():\n checkpoint_exporter = train_utils.maybe_create_best_ckpt_exporter(\n params, model_dir)\n trainer = RankingTrainer(\n config=params,\n task=task,\n model=model,\n optimizer=model.optimizer,\n train='train' in mode,\n evaluate='eval' in mode,\n train_dataset=train_dataset,\n validation_dataset=validation_dataset,\n checkpoint_exporter=checkpoint_exporter)\n\n train_lib.run_experiment(\n distribution_strategy=strategy,\n task=task,\n mode=mode,\n params=params,\n model_dir=model_dir,\n trainer=trainer)\n\n else: # Compile/fit\n checkpoint = tf.train.Checkpoint(model=model, optimizer=model.optimizer)\n\n latest_checkpoint = tf.train.latest_checkpoint(model_dir)\n if latest_checkpoint:\n checkpoint.restore(latest_checkpoint)\n logging.info('Loaded checkpoint %s', latest_checkpoint)\n\n checkpoint_manager = tf.train.CheckpointManager(\n checkpoint,\n directory=model_dir,\n max_to_keep=params.trainer.max_to_keep,\n step_counter=model.optimizer.iterations,\n checkpoint_interval=params.trainer.checkpoint_interval)\n checkpoint_callback = keras_utils.SimpleCheckpoint(checkpoint_manager)\n\n time_callback = keras_utils.TimeHistory(\n params.task.train_data.global_batch_size,\n params.trainer.time_history.log_steps,\n logdir=model_dir if enable_tensorboard else None)\n callbacks = [checkpoint_callback, time_callback]\n\n if enable_tensorboard:\n tensorboard_callback = tf.keras.callbacks.TensorBoard(\n log_dir=model_dir,\n update_freq=min(1000, params.trainer.validation_interval),\n profile_batch=FLAGS.profile_steps)\n callbacks.append(tensorboard_callback)\n\n num_epochs = (params.trainer.train_steps //\n params.trainer.validation_interval)\n current_step = model.optimizer.iterations.numpy()\n initial_epoch = current_step // params.trainer.validation_interval\n\n eval_steps = params.trainer.validation_steps if 'eval' in mode else None\n\n if mode in ['train', 'train_and_eval']:\n logging.info('Training started')\n history = model.fit(\n train_dataset,\n initial_epoch=initial_epoch,\n epochs=num_epochs,\n steps_per_epoch=params.trainer.validation_interval,\n validation_data=validation_dataset,\n validation_steps=eval_steps,\n callbacks=callbacks,\n )\n model.summary()\n logging.info('Train history: %s', history.history)\n elif mode == 'eval':\n logging.info('Evaluation started')\n validation_output = model.evaluate(validation_dataset, steps=eval_steps)\n logging.info('Evaluation output: %s', validation_output)\n else:\n raise NotImplementedError('The mode is not implemented: %s' % mode)", "def test(cfg):\n # Set up environment.\n distributed.init_distributed_training(cfg)\n\n # Set random seed from configs.\n np.random.seed(cfg.RNG_SEED)\n torch.manual_seed(cfg.RNG_SEED)\n\n # Print config.\n if distributed.is_master_proc():\n print(\"Test with config:\")\n print(cfg)\n\n # Build the model and print model statistics.\n # Use cuda if available\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n # Construct the model\n model = PanopticNarrativeGroundingBaseline(cfg, device=device)\n # Determine the GPU used by the current process\n cur_device = torch.cuda.current_device()\n # Transfer the model to the current GPU device\n model = model.cuda(device=cur_device)\n if cfg.NUM_GPUS > 1:\n # Make model replica operate on the current device\n model = torch.nn.parallel.DistributedDataParallel(\n module=model, device_ids=[cur_device], output_device=cur_device,\n find_unused_parameters=True\n )\n if cfg.LOG_MODEL_INFO and distributed.is_master_proc():\n print(\"Model:\\n{}\".format(model))\n print(\"Params: {:,}\".format(np.sum([p.numel() for p in model.parameters()]).item()))\n print(\"Mem: {:,} MB\".format(torch.cuda.max_memory_allocated() / 1024 ** 3))\n print(\"nvidia-smi\")\n os.system(\"nvidia-smi\")\n\n # Load a checkpoint to test if applicable.\n checkpoint_path = osp.join(cfg.OUTPUT_DIR, 'model_final.pth')\n if cfg.TEST.CHECKPOINT_FILE_PATH != \"\":\n checkpoint_path = cfg.TEST.CHECKPOINT_FILE_PATH\n if osp.exists(checkpoint_path):\n if distributed.is_master_proc():\n print('Loading model from: {0}'.format(checkpoint_path))\n checkpoint = torch.load(checkpoint_path, map_location=\"cpu\")\n if cfg.NUM_GPUS > 1:\n model.module.load_state_dict(checkpoint['model_state'])\n else:\n model.load_state_dict(checkpoint['model_state'])\n elif cfg.TRAIN.CHECKPOINT_FILE_PATH != \"\":\n # If no checkpoint found in TEST.CHECKPOINT_FILE_PATH or in the current\n # checkpoint folder, try to load checkpoint from\n # TRAIN.CHECKPOINT_FILE_PATH and test it.\n checkpoint_path = cfg.TRAIN.CHECKPOINT_FILE_PATH\n checkpoint = torch.load(checkpoint_path, map_location=\"cpu\")\n if cfg.NUM_GPUS > 1:\n model.module.load_state_dict(checkpoint['model_state'])\n else:\n model.load_state_dict(checkpoint['model_state'])\n else:\n if distributed.is_master_proc():\n print(\"Testing with random initialization. Only for debugging.\")\n\n # Create testing loaders.\n test_dataset = PanopticNarrativeGroundingDataset(cfg, cfg.DATA.VAL_SPLIT, train=False)\n test_loader = DataLoader(\n test_dataset,\n batch_size=int(cfg.TRAIN.BATCH_SIZE / max(1, cfg.NUM_GPUS)),\n shuffle=False,\n sampler=(DistributedSampler(test_dataset) if cfg.NUM_GPUS > 1 else None),\n num_workers=cfg.DATA_LOADER.NUM_WORKERS,\n pin_memory=cfg.DATA_LOADER.PIN_MEMORY\n )\n \n if distributed.is_master_proc():\n print(\"Testing model for {} iterations\".format(len(test_loader)))\n\n # Perform test on the entire dataset.\n perform_test(test_loader, model, cfg)" ]
[ "0.6628726", "0.6221861", "0.6108858", "0.6095782", "0.60324526", "0.60173756", "0.6013705", "0.60083187", "0.59039736", "0.58947945", "0.58934516", "0.5819561", "0.5818858", "0.5764458", "0.5733247", "0.5716348", "0.57152057", "0.5713483", "0.5712434", "0.569594", "0.56877816", "0.5667342", "0.56591594", "0.56332433", "0.5602469", "0.5599965", "0.5593883", "0.55924153", "0.55826396", "0.557186", "0.55692583", "0.55621403", "0.55589426", "0.55447763", "0.5541929", "0.55370706", "0.55331475", "0.5521408", "0.5520935", "0.5511216", "0.54991925", "0.5498184", "0.549106", "0.548503", "0.5464865", "0.5449779", "0.54482794", "0.54384583", "0.54340583", "0.5434047", "0.54328823", "0.5429811", "0.54243875", "0.5414304", "0.5410132", "0.54068446", "0.53913516", "0.5387298", "0.538339", "0.5381017", "0.5365513", "0.53607386", "0.5357645", "0.5348885", "0.5347391", "0.5347208", "0.53440857", "0.53378314", "0.5333555", "0.5332153", "0.53274757", "0.53261286", "0.5323791", "0.53208005", "0.53194535", "0.5316483", "0.5314356", "0.53132945", "0.53067136", "0.52997583", "0.5298849", "0.5298849", "0.5298849", "0.5297167", "0.52916986", "0.5289454", "0.52886015", "0.5285072", "0.528224", "0.5275417", "0.5270499", "0.52663606", "0.52486134", "0.5242211", "0.52412134", "0.5221763", "0.5219365", "0.5213369", "0.52120477", "0.52087396" ]
0.6573886
1
GPU Faster RCNN test for TF Training Instance Type g3.8xlarge Given above parameters, registers a task with family named after this test, runs the task, and waits for the task to be stopped before doing teardown operations of instance and cluster.
def test_ecs_tensorflow_training_fasterrcnn_gpu(gpu_only, ecs_container_instance, tensorflow_training, training_cmd, ecs_cluster_name): instance_id, cluster_arn = ecs_container_instance num_gpus = ec2_utils.get_instance_num_gpus(instance_id) ecs_utils.ecs_training_test_executor(ecs_cluster_name, cluster_arn, training_cmd, tensorflow_training, instance_id, num_gpus=num_gpus)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_testing(gpu=0):\n # expected environment variables\n os.environ[\"BERT_BASE_DIR\"] = \"pretrained/cased_L-12_H-768_A-12\"\n os.environ[\"DATA_DIR\"] = \"dataset\"\n os.environ[\"OUTPUT_DIR\"] = \"output\"\n assert os.environ.get(\"BERT_BASE_DIR\") is not None\n assert os.environ.get(\"DATA_DIR\") is not None\n assert os.environ.get(\"OUTPUT_DIR\") is not None\n\n # set the gpu index\n os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpu)\n # set the required flags\n FLAGS.task_name = \"topic\"\n FLAGS.do_predict = True\n FLAGS.data_dir = os.environ.get(\"DATA_DIR\")\n FLAGS.vocab_file = os.path.join(os.environ.get(\"BERT_BASE_DIR\"), \"vocab.txt\")\n FLAGS.bert_config_file = os.path.join(os.environ.get(\"BERT_BASE_DIR\"), \"bert_config.json\")\n FLAGS.init_checkpoint = os.path.join(os.environ.get(\"BERT_BASE_DIR\"), \"bert_model.ckpt\")\n FLAGS.do_lower_case = False\n FLAGS.max_seq_length = 128\n FLAGS.output_dir = os.environ.get(\"OUTPUT_DIR\")\n\n run_classifier.main(0)", "def test_mnist():\n env = os.environ.copy()\n if not \"CUDA_VISIBLE_DEVICES\" in env:\n env[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n subprocess.run(\n \"edflow -b template_tfe/config.yaml -t --max_batcher_per_epoch --num_epochs 1\",\n shell=True,\n check=True,\n env=env,\n )", "def test_ecs_tensorflow_training_mnist_gpu(gpu_only, ecs_container_instance, tensorflow_training, training_cmd,\n ecs_cluster_name):\n instance_id, cluster_arn = ecs_container_instance\n\n num_gpus = ec2_utils.get_instance_num_gpus(instance_id)\n\n ecs_utils.ecs_training_test_executor(ecs_cluster_name, cluster_arn, training_cmd, tensorflow_training, instance_id,\n num_gpus=num_gpus)", "def run_sm_perf_test(image_uri, num_nodes, region):\n _, framework_version = get_framework_and_version_from_tag(image_uri)\n if framework_version.startswith(\"1.\"):\n pytest.skip(\"Skipping benchmark test on TF 1.x images.\")\n\n processor = \"gpu\" if \"gpu\" in image_uri else \"cpu\"\n device_cuda_str = (\n f\"{processor}-{get_cuda_version_from_tag(image_uri)}\" if processor == \"gpu\" else processor\n )\n\n ec2_instance_type = \"p3.16xlarge\" if processor == \"gpu\" else \"c5.18xlarge\"\n\n py_version = \"py2\" if \"py2\" in image_uri else \"py37\" if \"py37\" in image_uri else \"py3\"\n\n time_str = time.strftime(\"%Y-%m-%d-%H-%M-%S\")\n commit_info = os.getenv(\"CODEBUILD_RESOLVED_SOURCE_VERSION\")\n target_upload_location = os.path.join(\n BENCHMARK_RESULTS_S3_BUCKET,\n \"tensorflow\",\n framework_version,\n \"sagemaker\",\n \"training\",\n device_cuda_str,\n py_version,\n )\n training_job_name = f\"tf{framework_version[0]}-tr-bench-{device_cuda_str}-{num_nodes}-node-{py_version}-{commit_info[:7]}-{time_str}\"\n\n # Inserting random sleep because this test starts multiple training jobs around the same time, resulting in\n # a throttling error for SageMaker APIs.\n time.sleep(Random(x=training_job_name).random() * 60)\n\n test_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"resources\")\n venv_dir = os.path.join(test_dir, \"sm_benchmark_venv\")\n\n ctx = Context()\n\n with ctx.cd(test_dir), ctx.prefix(f\"source {venv_dir}/bin/activate\"):\n log_file = f\"results-{commit_info}-{time_str}-{framework_version}-{device_cuda_str}-{py_version}-{num_nodes}-node.txt\"\n run_out = ctx.run(\n f\"timeout 45m python tf_sm_benchmark.py \"\n f\"--framework-version {framework_version} \"\n f\"--image-uri {image_uri} \"\n f\"--instance-type ml.{ec2_instance_type} \"\n f\"--node-count {num_nodes} \"\n f\"--python {py_version} \"\n f\"--region {region} \"\n f\"--job-name {training_job_name}\"\n f\"2>&1 | tee {log_file}\",\n warn=True,\n echo=True,\n )\n\n if not (run_out.ok or run_out.return_code == 124):\n target_upload_location = os.path.join(target_upload_location, \"failure_log\")\n\n ctx.run(\n f\"aws s3 cp {os.path.join(test_dir, log_file)} {os.path.join(target_upload_location, log_file)}\"\n )\n\n LOGGER.info(f\"Test results can be found at {os.path.join(target_upload_location, log_file)}\")\n\n result_statement, throughput = _print_results_of_test(\n os.path.join(test_dir, log_file), processor\n )\n throughput /= num_nodes\n\n assert run_out.ok, (\n f\"Benchmark Test failed with return code {run_out.return_code}. \"\n f\"Test results can be found at {os.path.join(target_upload_location, log_file)}\"\n )\n\n threshold_table = (\n (\n TENSORFLOW_SM_TRAINING_CPU_1NODE_THRESHOLD\n if num_nodes == 1\n else TENSORFLOW_SM_TRAINING_CPU_4NODE_THRESHOLD\n )\n if processor == \"cpu\"\n else TENSORFLOW_SM_TRAINING_GPU_1NODE_THRESHOLD\n if num_nodes == 1\n else TENSORFLOW_SM_TRAINING_GPU_4NODE_THRESHOLD\n )\n threshold = get_threshold_for_image(framework_version, threshold_table)\n LOGGER.info(\n f\"tensorflow {framework_version} sagemaker training {device_cuda_str} {py_version} \"\n f\"imagenet {num_nodes} nodes Throughput: {throughput} images/sec, threshold: {threshold} images/sec\"\n )\n assert throughput > threshold, (\n f\"tensorflow {framework_version} sagemaker training {processor} {py_version} imagenet {num_nodes} nodes \"\n f\"Benchmark Result {throughput} does not reach the threshold {threshold}\"\n )", "def Test(self):\n print('Testing:')\n # set mode eval\n torch.cuda.empty_cache()\n self.network.eval()\n transform = transforms.Compose([Rescale(params.rescale_size),\n RandomCrop(params.image_size),\n \n ToTensor()\n ])\n dataset = Cityscapes(params.dataset_root, mode='test', transforms = transform)\n test_loader = DataLoader(dataset,\n batch_size=params.test_batch,\n shuffle=params.shuffle,\n num_workers=params.dataloader_workers)\n # prepare test data\n recal = 0\n precision = 0\n F_one = 0\n IOU = 0\n accuracy_new = 0\n test_size = 1124\n if test_size % self.params.test_batch != 0:\n total_batch = test_size // self.params.test_batch + 1\n else:\n total_batch = test_size // self.params.test_batch\n\n # test for one epoch\n for batch_idx, batch in enumerate(test_loader):\n self.pb.click(batch_idx, total_batch)\n image, label, name = batch['image'], batch['label'], batch['label_name']\n image_cuda, label_cuda = image.cuda(), label.cuda()\n pred = image_cuda\n pred = pred.to(torch.device(\"cpu\"))\n pred = pred.detach()\n img_grid = pred[0]\n #img_grid = torchvision.utils.make_grid(out) \n img_grid = img_grid.numpy().transpose(1, 2, 0)*255\n cv2.imwrite(\"/content/drive/My Drive/Test_images/original%d.jpg\" % batch_idx, img_grid)\n if self.params.should_split:\n image_cuda.requires_grad_()\n out = checkpoint_sequential(self.network, self.params.split, image_cuda)\n else:\n out = self.network(image_cuda)\n TP, FP, TN, FN = confusion(out, label_cuda)\n recal = recal+TP\n precision = precision+FP\n F_one = F_one +TN\n IOU = IOU+ FN \n _,predict = torch.max(out.data,1)\n predict = predict.to(torch.device(\"cpu\"))\n predict = predict.detach()\n img = predict[0]\n img = img.numpy()*255\n #img_grid = torchvision.utils.make_grid(out) \n cv2.imwrite(\"/content/drive/My Drive/Test_images/predict_label%d.png\" % batch_idx, img)\n label = label_cuda.to(torch.device(\"cpu\"))\n label = label.detach()\n label = label[0].numpy()*255\n cv2.imwrite(\"/content/drive/My Drive/Test_images/original_label%d.png\" % batch_idx, label)\n\n accuracy_final = accuracy(out, label_cuda)\n accuracy_new = accuracy_new + accuracy_final\n print(\"\\t\")\n print(recal/total_batch, precision/ total_batch, F_one/ total_batch, IOU/ total_batch)\n print(\"\\t\")\n print(accuracy_new/total_batch)", "def run_time_operation(self, learning_option, cluster):\r\n\r\n # whether or not test procedure\r\n is_train = tf.placeholder_with_default(True, shape=())\r\n learning_option['is_train'] = is_train\r\n\r\n # get worker info: worker num, device type, device num\r\n device = self.get_attr('device')\r\n num = re.sub('[^0-9]', '', cluster.get('types')[device])\r\n type = cluster.get('types')[device].replace(str(num), '')\r\n\r\n # construct API\r\n def apiConstructor():\r\n # CIFAR-10 images: [224, 224, 3]\r\n # label: [1000]\r\n def train_in():\r\n x, y = learning_option.get('train_imagenet')\r\n return x, y\r\n def test_in():\r\n x, y = learning_option.get('test_imagenet')\r\n return x, y\r\n\r\n images, labels = tf.cond(is_train, train_in, test_in)\r\n # get output dimension\r\n outdim = list(images.get_shape()[i].value for i in xrange(len(images.get_shape())))\r\n\r\n # set output\r\n self.set_output('image', images)\r\n self.set_output('label', labels)\r\n self.set_dimension('image', outdim)\r\n\r\n # set tf summary\r\n tf.summary.image(self.name, images, max_outputs=10)\r\n\r\n with tf.variable_scope(self.name):\r\n # single node, model parallelism: explicit worker mapping\r\n # data parallelism: equally duplicate model\r\n if learning_option.get(\"parallel\", None) != \"DP\":\r\n with tf.device('/job:worker/task:{0}/{1}:{2}'.format(device, type, num)):\r\n apiConstructor()\r\n else:\r\n apiConstructor()", "def run_universal_demo(args, use_gpu: bool = True) -> None:\n if \"scannet\" in args.dataset:\n args.img_name_unique = False\n else:\n args.img_name_unique = True\n\n args.u_classes = names_utils.get_universal_class_names()\n args.print_freq = 10\n\n args.split = \"test\"\n # os.environ[\"CUDA_VISIBLE_DEVICES\"] = ','.join(str(x) for x in args.test_gpu)\n logger.info(args)\n logger.info(\"=> creating model ...\")\n args.num_model_classes = len(args.u_classes)\n\n itask = InferenceTask(\n args,\n base_size=args.base_size,\n crop_h=args.test_h,\n crop_w=args.test_w,\n input_file=args.input_file,\n model_taxonomy=\"universal\",\n eval_taxonomy=\"universal\",\n scales=args.scales,\n )\n itask.execute()", "def test_training():\n config = SmartDict()\n\n config.NETWORK_CLASS = LMBiSeNet\n config.DATASET_CLASS = DummyCamvid\n\n config.IS_DEBUG = False\n config.IMAGE_SIZE = [128, 160]\n config.BATCH_SIZE = 2\n config.TEST_STEPS = 1\n config.MAX_STEPS = 2\n config.SAVE_CHECKPOINT_STEPS = 1\n config.KEEP_CHECKPOINT_MAX = 5\n config.SUMMARISE_STEPS = 1\n config.IS_PRETRAIN = False\n config.TASK = Tasks.SEMANTIC_SEGMENTATION\n\n # network model config\n config.NETWORK = SmartDict()\n config.NETWORK.OPTIMIZER_CLASS = tf.train.AdamOptimizer\n config.NETWORK.OPTIMIZER_KWARGS = {\"learning_rate\": 0.001}\n config.NETWORK.IMAGE_SIZE = config.IMAGE_SIZE\n config.NETWORK.BATCH_SIZE = config.BATCH_SIZE\n config.NETWORK.DATA_FORMAT = \"NHWC\"\n\n # daasegt config\n config.DATASET = SmartDict()\n config.DATASET.PRE_PROCESSOR = Resize(config.IMAGE_SIZE)\n config.DATASET.BATCH_SIZE = config.BATCH_SIZE\n config.DATASET.DATA_FORMAT = \"NHWC\"\n\n environment.init(\"test_lm_bisenet\")\n prepare_dirs(recreate=True)\n start_training(config, profile_step=1)", "def test_ecs_tensorflow_training_mnist_cpu(cpu_only, ecs_container_instance, tensorflow_training, training_cmd,\n ecs_cluster_name):\n instance_id, cluster_arn = ecs_container_instance\n\n ecs_utils.ecs_training_test_executor(ecs_cluster_name, cluster_arn, training_cmd, tensorflow_training, instance_id)", "def main():\r\n # assert tf.__version__[0] == \"2\"\r\n\r\n \"\"\" Load Config \"\"\"\r\n with open('./config/config_origin.json', 'r') as f:\r\n CONFIG = json.load(f)\r\n BATCH_SIZE = CONFIG[\"BATCH_SIZE\"]\r\n ROOT_PATH = CONFIG[\"ROOT_PATH\"]\r\n TRAIN_DATA_DIR = CONFIG[\"TRAIN_DATA_DIR\"]\r\n TEST_DATA_DIR = CONFIG[\"TEST_DATA_DIR\"]\r\n TRAIN_DATA_DIR = os.path.join(ROOT_PATH, TRAIN_DATA_DIR)\r\n TEST_DATA_DIR = os.path.join(ROOT_PATH, TEST_DATA_DIR)\r\n MODEL_CKPT = CONFIG[\"MODEL_CKPT\"]\r\n\r\n \"\"\" Prepare Model \"\"\"\r\n n = 6 # order of ResNetv2\r\n version = 2\r\n depth = model_depth(n, version)\r\n MODEL_TYPE = 'ResNet%dv%d' % (depth, version)\r\n SAVES_DIR = \"models-%s/\" % MODEL_TYPE\r\n SAVES_DIR = os.path.join(ROOT_PATH, SAVES_DIR)\r\n MODEL_CKPT = os.path.join(SAVES_DIR, MODEL_CKPT)\r\n\r\n # Features directory\r\n FEATURE_DIR = os.path.join(ROOT_PATH, \"features\")\r\n FEATURE_DIR = os.path.join(FEATURE_DIR, \"models-%s/\" % MODEL_TYPE)\r\n if not os.path.exists(FEATURE_DIR):\r\n os.mkdir(FEATURE_DIR)\r\n\r\n if not os.path.exists(SAVES_DIR):\r\n os.mkdir(SAVES_DIR)\r\n model = resnet_v2(input_shape=INPUT_SHAPE, depth=depth, num_classes=2)\r\n model.compile(loss='categorical_crossentropy',\r\n optimizer=Adam(learning_rate=lr_schedule(TRAINING_EPOCHS)),\r\n metrics=METRICS)\r\n # model.summary()\r\n print(MODEL_TYPE)\r\n\r\n \"\"\" Load Weights \"\"\"\r\n model_ckpt_file = os.path.join(SAVES_DIR, MODEL_CKPT)\r\n if os.path.exists(model_ckpt_file):\r\n print(\"Model ckpt found! Loading...:%s\" % model_ckpt_file)\r\n model.load_weights(model_ckpt_file)\r\n\r\n \"\"\" Extract Testing Data \"\"\"\r\n _train_filenames = os.listdir(os.path.join(TRAIN_DATA_DIR, \"bad_1\"))\r\n train_bad_df = pd.DataFrame({\r\n 'filename': _train_filenames\r\n })\r\n n_bad_samples = train_bad_df.shape[0]\r\n train_bad_df.to_csv(os.path.join(\r\n FEATURE_DIR, \"bad_samples_list.csv\"), index=False)\r\n\r\n \"\"\" Extract good samples \"\"\"\r\n _train_filenames = os.listdir(os.path.join(TRAIN_DATA_DIR, \"good_0\"))\r\n train_good_df = pd.DataFrame({\r\n 'filename': _train_filenames\r\n })\r\n n_good_samples = train_good_df.shape[0]\r\n train_good_df.to_csv(os.path.join(\r\n FEATURE_DIR, \"good_samples_list.csv\"), index=False)\r\n\r\n \"\"\" Create bad sample validation generator \"\"\"\r\n train_bad_datagen = ImageDataGenerator(rescale=1./255)\r\n train_bad_generator = train_bad_datagen.flow_from_dataframe(\r\n train_bad_df,\r\n os.path.join(TRAIN_DATA_DIR, \"bad_1\"),\r\n x_col='filename',\r\n y_col=None,\r\n class_mode=None,\r\n target_size=IMAGE_SIZE,\r\n color_mode=\"grayscale\",\r\n batch_size=BATCH_SIZE,\r\n shuffle=False\r\n )\r\n\r\n \"\"\" Create bad sample validation generator \"\"\"\r\n train_good_datagen = ImageDataGenerator(rescale=1./255)\r\n train_good_generator = train_good_datagen.flow_from_dataframe(\r\n train_good_df,\r\n os.path.join(TRAIN_DATA_DIR, \"good_0\"),\r\n x_col='filename',\r\n y_col=None,\r\n class_mode=None,\r\n target_size=IMAGE_SIZE,\r\n color_mode=\"grayscale\",\r\n batch_size=BATCH_SIZE,\r\n shuffle=False\r\n )\r\n\r\n \"\"\" Extractor \"\"\"\r\n extractor = Model(\r\n model.inputs, model.layers[-2].output) # flatten_2 (Flatten) (None, 12544)\r\n # features = extractor.predict(data)\r\n\r\n \"\"\" Extract train set 的特征 \"\"\"\r\n import time\r\n # bad samples\r\n start = time.perf_counter()\r\n print(\"Start extracting bad samples...\")\r\n features = extractor.predict_generator(\r\n train_bad_generator, steps=np.ceil(n_bad_samples / BATCH_SIZE),\r\n workers=4, verbose=1)\r\n print(\"features.shape:\", features.shape) # (16/32/etc, 12544)\r\n np.save(os.path.join(FEATURE_DIR, \"features_train_bad.npy\"), features)\r\n\r\n elapsed = (time.perf_counter() - start)\r\n print(\"Prediction time used:\", elapsed)\r\n # TODO 用 pandas 存储\r\n # good samples\r\n start = time.perf_counter()\r\n print(\"Start extracting good samples...\")\r\n features = extractor.predict_generator(\r\n train_good_generator, steps=np.ceil(n_good_samples / BATCH_SIZE),\r\n workers=4, verbose=1)\r\n print(\"features.shape:\", features.shape) # (16/32/etc, 12544)\r\n np.save(os.path.join(FEATURE_DIR, \"features_train_good.npy\"), features)\r\n\r\n elapsed = (time.perf_counter() - start)\r\n print(\"Prediction time used:\", elapsed)", "def configure_gpu_tf():\n\n try:\n # locate available devices & set required environment variables\n available_device_ids = GPUtil.getFirstAvailable(order='first', maxLoad=0.7, maxMemory=0.7, attempts=1, interval=10)\n available_device_id = available_device_ids[0]\n os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'\n os.environ['CUDA_VISIBLE_DEVICES'] = str(available_device_id)\n print(f\"\\n GPU Found! running on GPU:{available_device_id}\\n\")\n\n # set GPU configuration (use all GPU memory if device 0, else use <50% of memory)\n tf.debugging.set_log_device_placement(False)\n physical_gpu = tf.config.experimental.list_physical_devices('GPU')[0]\n\n if available_device_id == 0:\n tf.config.experimental.set_memory_growth(physical_gpu, True)\n else:\n tf.config.experimental.set_virtual_device_configuration(\n physical_gpu,\n [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=4500)]\n )\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n assert len(logical_gpus) == 1, \"error creating virtual GPU to fractionally use memory\"\n\n # if we can't find a GPU, or they are all busy, default to using CPU\n except RuntimeError:\n print(\"\\n No GPUs available... running on CPU\\n\")\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1'", "def train_distributed():\n # Distributed stuff learnt from this repo: https://github.com/GoogleCloudPlatform/cloudml-dist-\n # mnist-example/blob/master/trainer/task.py\n\n # For Distributed TensorFlow\n env = json.loads(os.environ.get('TF_CONFIG', '{}'))\n cluster_info = env.get('cluster')\n cluster_spec = tf.train.ClusterSpec(cluster_info)\n task_info = env.get('task')\n job_name, task_index = task_info['type'], task_info['index']\n\n device_fn = tf.train.replica_device_setter(\n cluster=cluster_spec,\n worker_device='/job:%s/task:%d' % (job_name, task_index))\n\n print(\"Start job:%s, index:%d\" % (job_name, task_index))\n\n server = tf.train.Server(cluster_spec,\n job_name=job_name, task_index=task_index)\n\n # Start a parameter server node\n if job_name == 'ps':\n server.join()\n\n # Start a master/worker node\n if job_name == 'master' or job_name == 'worker':\n is_chief = (job_name == 'master')\n\n with tf.Graph().as_default() as graph: # TODO necessary?\n with tf.device(device_fn):\n # Prepare the data\n train_data, test_data, embeddings_file = prepare_data()\n\n # Create the model\n print(\"(%s,%d) Creating %d layers of %d units.\" %\n (job_name, task_index, FLAGS.num_layers, FLAGS.size))\n model = create_model(False)\n\n # Create train_dir\n if is_chief:\n if not tf.gfile.Exists(FLAGS.train_dir):\n tf.gfile.MkDir(FLAGS.train_dir)\n\n # TensorBoard summaries\n (test_loss, test_perplexity, bucket_loss_placeholders,\n bucket_perplexity_placeholders, summary, summary_writer) = create_summary_objects(graph)\n\n # Create supervisor\n init_op = tf.global_variables_initializer()\n\n # Create Supervisor. Disabling checkpoints and summaries, because we do that manually\n sv = tf.train.Supervisor(is_chief=is_chief, logdir=FLAGS.train_dir, init_op=init_op,\n init_fn=lambda session: after_init(session, model, embeddings_file),\n saver=model.saver, global_step=model.global_step,\n save_model_secs=0, save_summaries_secs=0, summary_op=None,\n summary_writer=None)\n\n with sv.managed_session(server.target) as sess:\n train(sess, model, train_data, test_data, summary, summary_writer, test_loss,\n test_perplexity, bucket_loss_placeholders, bucket_perplexity_placeholders,\n is_chief, job_name, task_index, sv.should_stop)\n sv.stop()", "def main(unused_argv):\n # Load data\n (train_adj, full_adj, train_feats, test_feats, y_train, y_val, y_test,\n train_mask, val_mask, test_mask, _, val_data, test_data, num_data,\n visible_data) = load_data(FLAGS.data_prefix, FLAGS.dataset, FLAGS.precalc)\n\n # Partition graph and do preprocessing\n if FLAGS.bsize > 1:\n _, parts = partition_utils.partition_graph(train_adj, visible_data,\n FLAGS.num_clusters)\n parts = [np.array(pt) for pt in parts]\n else:\n (parts, features_batches, support_batches, y_train_batches,\n train_mask_batches) = utils.preprocess(train_adj, train_feats, y_train,\n train_mask, visible_data,\n FLAGS.num_clusters,\n FLAGS.diag_lambda)\n\n (_, val_features_batches, val_support_batches, y_val_batches,\n val_mask_batches) = utils.preprocess(full_adj, test_feats, y_val, val_mask,\n np.arange(num_data),\n FLAGS.num_clusters_val,\n FLAGS.diag_lambda)\n\n (_, test_features_batches, test_support_batches, y_test_batches,\n test_mask_batches) = utils.preprocess(full_adj, test_feats, y_test,\n test_mask, np.arange(num_data),\n FLAGS.num_clusters_test,\n FLAGS.diag_lambda)\n idx_parts = list(range(len(parts)))\n\n # Some preprocessing\n model_func = models.GCN\n\n # Define placeholders\n placeholders = {\n 'support':\n tf.sparse_placeholder(tf.float32),\n 'features':\n tf.placeholder(tf.float32),\n 'labels':\n tf.placeholder(tf.float32, shape=(None, y_train.shape[1])),\n 'labels_mask':\n tf.placeholder(tf.int32),\n 'dropout':\n tf.placeholder_with_default(0., shape=()),\n 'num_features_nonzero':\n tf.placeholder(tf.int32) # helper variable for sparse dropout\n }\n\n # Create model\n model = model_func(\n placeholders,\n input_dim=test_feats.shape[1],\n logging=True,\n multilabel=FLAGS.multilabel,\n norm=FLAGS.layernorm,\n precalc=FLAGS.precalc,\n num_layers=FLAGS.num_layers)\n\n # Initialize session\n sess = tf.Session()\n tf.set_random_seed(seed)\n\n # Init variables\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n cost_val = []\n total_training_time = 0.0\n # Train model\n for epoch in range(FLAGS.epochs):\n t = time.time()\n np.random.shuffle(idx_parts)\n if FLAGS.bsize > 1:\n (features_batches, support_batches, y_train_batches,\n train_mask_batches) = utils.preprocess_multicluster(\n train_adj, parts, train_feats, y_train, train_mask,\n FLAGS.num_clusters, FLAGS.bsize, FLAGS.diag_lambda)\n for pid in range(len(features_batches)):\n # Use preprocessed batch data\n features_b = features_batches[pid]\n support_b = support_batches[pid]\n y_train_b = y_train_batches[pid]\n train_mask_b = train_mask_batches[pid]\n # Construct feed dictionary\n feed_dict = utils.construct_feed_dict(features_b, support_b, y_train_b,\n train_mask_b, placeholders)\n feed_dict.update({placeholders['dropout']: FLAGS.dropout})\n # Training step\n outs = sess.run([model.opt_op, model.loss, model.accuracy],\n feed_dict=feed_dict)\n else:\n np.random.shuffle(idx_parts)\n for pid in idx_parts:\n # Use preprocessed batch data\n features_b = features_batches[pid]\n support_b = support_batches[pid]\n y_train_b = y_train_batches[pid]\n train_mask_b = train_mask_batches[pid]\n # Construct feed dictionary\n feed_dict = utils.construct_feed_dict(features_b, support_b, y_train_b,\n train_mask_b, placeholders)\n feed_dict.update({placeholders['dropout']: FLAGS.dropout})\n # Training step\n outs = sess.run([model.opt_op, model.loss, model.accuracy],\n feed_dict=feed_dict)\n\n total_training_time += time.time() - t\n print_str = 'Epoch: %04d ' % (epoch + 1) + 'training time: {:.5f} '.format(\n total_training_time) + 'train_acc= {:.5f} '.format(outs[2])\n\n # Validation\n if FLAGS.validation:\n cost, acc, micro, macro = evaluate(sess, model, val_features_batches,\n val_support_batches, y_val_batches,\n val_mask_batches, val_data,\n placeholders)\n cost_val.append(cost)\n print_str += 'val_acc= {:.5f} '.format(\n acc) + 'mi F1= {:.5f} ma F1= {:.5f} '.format(micro, macro)\n\n tf.logging.info(print_str)\n\n if epoch > FLAGS.early_stopping and cost_val[-1] > np.mean(\n cost_val[-(FLAGS.early_stopping + 1):-1]):\n tf.logging.info('Early stopping...')\n break\n\n tf.logging.info('Optimization Finished!')\n\n # Save model\n saver.save(sess, FLAGS.save_name)\n\n # Load model (using CPU for inference)\n with tf.device('/cpu:0'):\n sess_cpu = tf.Session(config=tf.ConfigProto(device_count={'GPU': 0}))\n sess_cpu.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n saver.restore(sess_cpu, FLAGS.save_name)\n # Testing\n test_cost, test_acc, micro, macro = evaluate(\n sess_cpu, model, test_features_batches, test_support_batches,\n y_test_batches, test_mask_batches, test_data, placeholders)\n print_str = 'Test set results: ' + 'cost= {:.5f} '.format(\n test_cost) + 'accuracy= {:.5f} '.format(\n test_acc) + 'mi F1= {:.5f} ma F1= {:.5f}'.format(micro, macro)\n tf.logging.info(print_str)", "def finetune(ft_ds, model, task, epochs=10, eval_ds=None):\n\n print('==========FINETUNE==========')\n\n # Filter out undesired examples with excluded_label\n ds = ft_ds.filter(lambda x: x['label'] != task['excluded_label'])\n ds = ds.map(data_utils.finetune_preprocess)\n ds = ds.shuffle(1000)\n ds = ds.batch(FLAGS.finetune_bs)\n\n # loss, metrics, optimizers\n train_loss= tf.keras.metrics.Mean(name='train_loss')\n train_sup_acc = tf.keras.metrics.Accuracy(name='train_supervised_accuracy')\n criterion_sup = tf.nn.softmax_cross_entropy_with_logits \n optimizer = tf.keras.optimizers.Adam(learning_rate=0.001) \n for epoch in range(epochs): \n train_loss.reset_states()\n train_sup_acc.reset_states()\n for x in ds:\n with tf.GradientTape() as tape:\n image = x['image']\n labels = x[task['name']]\n out = model(image, mode='supervised', sup_layers=1, training=True)\n # print(tf.math.argmax(out, axis=-1))\n metrics.update_supervised_accuracy(train_sup_acc, labels, out)\n loss = criterion_sup(tf.one_hot(labels, depth=task['num_classes']), out)\n loss = tf.math.reduce_mean(loss)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(\n filter(lambda gv: gv[0] is not None, zip(gradients, model.trainable_variables))\n )\n train_loss.update_state(loss)\n print('supervised loss')\n print(train_loss.result())\n print('supervised accuracy')\n print(train_sup_acc.result())\n\n # Evaluate results on eval_ds if possible\n if eval_ds: \n evaluate(eval_ds, model, task)", "def run_fn(fn_args: TrainerFnArgs):\n tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)\n\n train_dataset = _input_fn(\n fn_args.train_files,\n tf_transform_output,\n constants.TRAIN_BATCH_SIZE,\n is_train=True\n )\n\n eval_dataset = _input_fn(\n fn_args.eval_files,\n tf_transform_output,\n constants.EVAL_BATCH_SIZE,\n is_train=False\n )\n\n # # check for availabe tpu and gpu units\n # try:\n # tpu = tf.distribute.cluster_resolver.TPUClusterResolver()\n # tf.config.experimental_connect_to_cluster(tpu)\n # tf.tpu.experimental.initialize_tpu_system(tpu)\n # strategy = tf.distribute.experimental.TPUStrategy(tpu)\n # except ValueError:\n # strategy = tf.distribute.MirroredStrategy()\n\n # with strategy.scope():\n model = get_model(fn_args)\n\n try:\n log_dir = fn_args.model_run_dir\n except KeyError:\n log_dir = os.path.join(os.path.dirname(fn_args.serving_model_dir), \"logs\")\n\n absl.logging.info('Tensorboard logging to {}'.format(log_dir))\n\n callbacks = [\n # tf.keras.callbacks.ModelCheckpoint(\"DeepLabV3plus.ckpt\", verbose=1, save_weights_only=True, save_best_only=True),\n tf.keras.callbacks.ReduceLROnPlateau(monitor=\"iou_score\", factor=0.2, patience=6, verbose=1, mode=\"max\"),\n tf.keras.callbacks.EarlyStopping(monitor=\"iou_score\", patience=16, mode=\"max\", verbose=1, restore_best_weights=True),\n tf.keras.callbacks.TensorBoard(log_dir=log_dir, update_freq=\"batch\")\n ]\n\n absl.logging.info('Start training the top classifier')\n \n model.fit(\n train_dataset,\n epochs=constants.EPOCHS,\n steps_per_epoch=fn_args.train_steps,\n validation_data=eval_dataset,\n validation_steps=fn_args.eval_steps,\n callbacks=callbacks\n )\n\n signatures = {\n 'serving_default':\n _get_serve_image_fn(model).get_concrete_function(\n tf.TensorSpec(\n shape=[None, constants.HEIGHT, constants.WIDTH, 3],\n dtype=tf.float32,\n name=_transformed_name(constants.IMAGE_KEY)\n )\n )\n }\n\n model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)", "def run(config_file):\n config = load_config(config_file)\n config_global = config['global']\n\n # setup a logger\n logger = logging.getLogger('experiment')\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler_stdout = logging.StreamHandler(sys.stdout)\n handler_stdout.setLevel(config['logger']['level'])\n handler_stdout.setFormatter(formatter)\n logger.addHandler(handler_stdout)\n\n if 'path' in config['logger']:\n handler_file = logging.FileHandler(config['logger']['path'])\n handler_file.setLevel(config['logger']['level'])\n handler_file.setFormatter(formatter)\n logger.addHandler(handler_file)\n\n logger.setLevel(config['logger']['level'])\n\n # Allow the gpu to be used in parallel\n sess_config = tf.ConfigProto()\n sess_config.gpu_options.allow_growth = True\n if 'max_threads' in config_global:\n sess_config.intra_op_parallelism_threads = config_global['max_threads']\n\n # we allow to set the random seed in the config file for reproducibility. However, when running on GPU, results\n # will still be nondeterministic (due to nondeterministic behavior of tensorflow)\n if 'random_seed' in config_global:\n seed = config_global['random_seed']\n logger.info('Using fixed random seed'.format(seed))\n np.random.seed(seed)\n tf.set_random_seed(seed)\n\n with tf.Session(config=sess_config) as sess:\n # We are now fetching all relevant modules. It is strictly required that these module contain a variable named\n # 'component' that points to a class which inherits from experiment.Data, experiment.Experiment,\n # experiment.Trainer or experiment.Evaluator\n data_module = config['data-module']\n model_module = config['model-module']\n training_module = config['training-module']\n evaluation_module = config.get('evaluation-module', None)\n\n # The modules are now dynamically loaded\n DataClass = importlib.import_module(data_module).component\n ModelClass = importlib.import_module(model_module).component\n TrainingClass = importlib.import_module(training_module).component\n EvaluationClass = importlib.import_module(evaluation_module).component if evaluation_module else None\n\n # We then wire together all the modules and start training\n data = DataClass(config['data'], config_global, logger)\n model = ModelClass(config['model'], config_global, logger)\n training = TrainingClass(config['training'], config_global, logger)\n\n # setup the data (validate, create generators, load data, or else)\n logger.info('Setting up the data')\n data.setup()\n # build the model (e.g. compile it)\n logger.info('Building the model')\n model.build(data, sess)\n # start the training process\n logger.info('Starting the training process')\n training.start(model, data, sess)\n\n # perform evaluation, if required\n if EvaluationClass:\n logger.info('Evaluating')\n evaluation = EvaluationClass(config['evaluation'], config_global, logger)\n evaluation.start(model, data, sess)\n else:\n logger.info('No evaluation')\n\n logger.info('DONE')", "def train():\n args = arguments_st_train()\n\n h, w = map(int, args.input_size.split(','))\n input_size = (h, w)\n\n \n if args.use_random_seed:\n tf.set_random_seed(args.random_seed)\n \n # Create queue coordinator.\n coord = tf.train.Coordinator()\n \n # Load reader.\n with tf.name_scope(\"create_inputs\"):\n reader = ImageReader(\n args.data_dir,\n args.data_list,\n input_size=input_size,\n random_scale=args.random_scale,\n random_mirror=args.random_mirror,\n random_crop=args.random_crop,\n ignore_label=args.ignore_label,\n img_mean=IMG_MEAN,\n coord=coord,\n task=args.task)\n image_batch, label_batch = reader.dequeue(args.batch_size)\n \n # Create network.\n with slim.arg_scope(vgg_arg_scope(weight_decay=args.weight_decay, use_batch_norm=True, is_training=True)):\n if args.network == 'vgg_16_deeplab_st':\n net, end_points = vgg_16_deeplab_st(image_batch, num_classes=args.num_classes, is_training=True, dropout_keep_prob=args.keep_prob)\n elif args.network == 'vgg_16_shortcut_deeplab_st':\n net, end_points = vgg_16_shortcut_deeplab_st(image_batch, num_classes=args.num_classes, is_training=True, dropout_keep_prob=args.keep_prob)\n else:\n raise Exception('network name is not recognized!')\n \n \n # Predictions.\n raw_output = end_points['vgg_16/fc8_voc12']\n\n # gt labels\n raw_gt = prepare_label(label_batch, tf.stack(raw_output.get_shape()[1:3]), num_classes=args.num_classes,\n one_hot=False, task=args.task) # [batch_size, h, w]\n\n # losses\n if args.task == 'normal':\n loss = get_normal_loss(raw_output, raw_gt, args.num_classes, args.ignore_label) * args.loss_scale\n elif args.task == 'seg':\n loss = get_seg_loss(raw_output, raw_gt, args.num_classes, args.ignore_label) * args.loss_scale\n\n l2_losses = [args.weight_decay * tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'weights' in v.name]\n reduced_loss = tf.reduce_mean(loss) + tf.add_n(l2_losses)\n \n # Image summary for visualisation.\n raw_output_up = tf.image.resize_bilinear(raw_output, tf.shape(image_batch)[1:3,])\n raw_output_up = tf.argmax(raw_output_up, axis=3)\n pred = tf.expand_dims(raw_output_up, dim=3)\n \n images_summary = tf.py_func(inv_preprocess, [image_batch, args.save_num_images, IMG_MEAN], tf.uint8)\n labels_summary = tf.py_func(decode_labels, [label_batch, args.save_num_images, args.num_classes, args.task], tf.uint8)\n preds_summary = tf.py_func(decode_labels, [pred, args.save_num_images, args.num_classes, args.task], tf.uint8)\n \n total_summary = tf.summary.image('images', \n tf.concat(axis=2, values=[images_summary, labels_summary, preds_summary]), \n max_outputs=args.save_num_images) # Concatenate row-wise.\n summary_writer = tf.summary.FileWriter(args.snapshot_dir,\n graph=tf.get_default_graph())\n \n # Define loss and optimisation parameters.\n train_op, step_ph = create_train_ops_st(reduced_loss, args)\n \n # Set up tf session and initialize variables. \n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n init = tf.global_variables_initializer()\n \n sess.run(init)\n\n # Load variables if the checkpoint is provided.\n if args.restore_from is not None:\n load_st(sess, args)\n \n # Saver for storing checkpoints of the model.\n save_op = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=args.max_to_keep)\n \n # Start queue threads.\n threads = tf.train.start_queue_runners(coord=coord, sess=sess)\n\n # Iterate over training steps.\n for step in range(args.num_steps):\n start_time = time.time()\n feed_dict = { step_ph : step }\n \n if step % args.save_pred_every == 0:\n loss_value, images, labels, preds, summary, _ = sess.run([reduced_loss, image_batch, label_batch, pred, total_summary, train_op], feed_dict=feed_dict)\n summary_writer.add_summary(summary, step)\n save(save_op, sess, args.snapshot_dir, step)\n else:\n loss_value, _ = sess.run([reduced_loss, train_op], feed_dict=feed_dict)\n duration = time.time() - start_time\n print('step {:d} \\t loss = {:.3f}, ({:.3f} sec/step)'.format(step, loss_value, duration))\n coord.request_stop()\n coord.join(threads)", "def configure_training_federated(\n task_spec: training_specs.TaskSpecFederated,\n *, # Caller passes below args by name.\n resnet_layers: int = 18,\n num_classes: int = 100,\n l2_weight_decay: float = 1e-4,\n) -> training_specs.RunnerSpecFederated:\n\n return _Cifar100ImageTask(\n task_spec,\n resnet_layers=resnet_layers,\n num_classes=num_classes,\n l2_weight_decay=l2_weight_decay).build_federated_runner_spec()", "def main(_):\n\n params = create_params()\n\n assert params[\"train_dataset_path\"]\n assert params[\"eval_dataset_path\"]\n\n input_fn = input_fn_from_files(\n params[\"train_dataset_path\"])\n eval_input_fn = input_fn_from_files(\n params[\"eval_dataset_path\"])\n\n feature_columns = create_feature_columns(params)\n\n model_fn = create_model_fn(feature_columns)\n estimator = create_tpu_estimator(model_fn, feature_columns, params)\n\n for cycle_index in range(params[\"train_epochs\"]):\n tf.logging.info(\"Starting a training cycle: {}/{}\".format(\n cycle_index + 1, params[\"train_epochs\"]))\n estimator.train(input_fn=input_fn, steps=params[\"steps_per_epoch\"])\n tf.logging.info(\"Beginning evaluation.\")\n eval_results = estimator.evaluate(eval_input_fn,\n steps=params[\"num_eval_steps\"])\n tf.logging.info(\"Evaluation complete.\")\n\n recall_1 = float(eval_results[\"recall@1\"])\n recall_5 = float(eval_results[\"recall@5\"])\n loss = float(eval_results[\"loss\"])\n tf.logging.info(\n \"Iteration {}: recall@1 = {:.4f}, recall@5 = {:.4f}, Loss = {:.4f}\"\n .format(cycle_index + 1, recall_1, recall_5, loss))", "def main(seed, filter_, num_classes, setup, model_name, images_dir, precision_mode, test):\n f1, f2 = filter_\n model_name = 'flex_random_seed_{}_resnet_manual_highres_center_only_f1_{}_f2_{}'.format(seed, f1, f2)\n frozen_graph_filepath = './Models/Frozen_graphs/{}_{}/'.format(f1,f2) + model_name + '_frozen_graph.pb'\n frozen_graph, x_tensor, y_tensor = trt_frozen_graph_and_tensors(\n model_name=model_name, \n frozen_graph_filepath=frozen_graph_filepath, \n precision_mode=precision_mode\n )\n\n elapsed_time_full_dataset = []\n sum_of_confusion_matrices = np.zeros((6, 6))\n \n with tf.compat.v1.Session(graph=frozen_graph) as sess:\n for image_file in [img for img in os.listdir(images_dir) if img.endswith('.JPG')]:\n\n img = Image.open(images_dir + image_file)\n sx,sy = img.size\n\n print(\"Image size is %i x %i\" % (sx,sy)) # sx = 4912, sy = 3264\n print(\"Loading image %s\" % image_file)\n\n img_np = np.array(img)/255.0\n del img\n\n print(\"Predicting for image %s (%i x %i pixel)\" % (image_file,sx,sy))\n\n start = time.time()\n predictions_flex = sess.run(y_tensor, feed_dict={x_tensor:np.expand_dims(img_np, 0)})\n elapsed = time.time() - start\n elapsed_time_full_dataset.append(elapsed)\n del img_np #deleting afterwards to not take the deleting time into account\n\n print(\"Prediction took %f seconds (inference on full image)\" % elapsed)\n print(\"Merging predictions\")\n # merge the predictions on the quarter images\n predictions_flex_combined = np.zeros(predictions_flex.shape)\n\n elapsed = time.time()-start\n if embedded_version:\n print(\"Prediction took %f seconds (inference on split up image)\" % elapsed)\n\n if embedded_version:\n predictions_flex = predictions_flex_combined\n\n if save_annotations:\n print(\"Computing annotations...\")\n annotations = []\n d = 4\n for x in range(100, sx-101, d):\n for y in range(100, sy-101, d):\n x0 = int(round(float(x-100)/4) + 15)\n y0 = int(round(float(y-100)/4) + 15)\n probs_flex = np.squeeze(predictions_flex[0, y0, x0, :])\n annotations.append((probs_flex, x, y))\n\n if test: # add a prefix for test to not replace real experiments\n model_name = 'TEST_' + model_name\n\n # saving annotations\n annotation_dir = images_dir.replace('Data', 'Results/seeds/annotations_trt') + image_file\n annotate_and_save(annotations, d, annotation_dir, model_name, precision_mode)\n classes_image = annotate_and_save_per_class(\n annotations, \n d, \n annotation_dir, \n model_name, \n precision_mode\n )\n\n labels = load_labels(annotation_dir)\n confusion_matrix = np.zeros((num_classes, num_classes))\n for (c_name, x, y) in labels:\n if 100 <= x < sx-101 and 100 <= y < sy-101:\n x0 = int(round(float(x-100)/4) + 15 )\n y0 = int(round(float(y-100)/4) + 15)\n probs_flex = np.squeeze(predictions_flex[0, y0, x0, :])\n\n predicted_class = np.argmax(probs_flex)\n c = train_model.get_classes().index(c_name)\n confusion_matrix[c, predicted_class] += 1\n print(confusion_matrix)\n sum_of_confusion_matrices += confusion_matrix\n\n print(sum_of_confusion_matrices)\n sum_of_cm_fp = './Results/seeds/preds_trt/{}/{}_{}/sum_of_cm_'\\\n .format(precision_mode.lower(), f1,f2) + model_name + '_fp32.npy'\n elapsed_time_fp = './Results/seeds/elapsed_trt/{}/{}_{}/time_taken_'\\\n .format(precision_mode.lower(), f1,f2) + model_name + '_fp32.npy'\n\n\n np.save(sum_of_cm_fp, sum_of_confusion_matrices)\n np.save(elapsed_time_fp, elapsed_time_full_dataset)\n tf.reset_default_graph()", "def run_experiments() :\n #%%\n target_size=(32,32)\n g_specs = {\n \"batch_size\" : [ 30 , 60, 100 ],\n \"learning_rate\" : [ 0.0002, 0.0003, 0.0005 ],\n \"drop_out_rate\" : [ 0.2, 0.25, 0.3 ],\n \"rescale_mode\" : [ \"max_q\" , \"max\", \"\" ]\n }\n\n model_traits = MODEL_TRAITS[\"model2\"].copy()\n tt_obj = model_traits[\"trainer_tester_class\"]( model_traits )\n del model_traits[\"trainer_tester_class\"]\n\n cnt = 0\n for batchs, lrate, do_rate, resc_mode in product( g_specs[\"batch_size\"],\n g_specs[\"learning_rate\"],\n g_specs[\"drop_out_rate\"],\n g_specs[\"rescale_mode\"] ) :\n\n tt_obj.model_traits.update( {\"batch_size\" : batchs,\n \"learning_rate\" : lrate,\n \"rescale_mode\" : resc_mode,\n \"drop_out_rate\" : do_rate } )\n\n train_4d, train_gt = tu.make_4d_arrays( images_dir=\"images/train\",\n target_size=target_size )\n\n test_4d, test_gt = tu.make_4d_arrays( images_dir=\"images/test\",\n target_size=target_size )\n\n data = {\"train_4d\" : train_4d,\n \"test_4d\" : test_4d,\n \"train_y\" : train_gt,\n \"test_y\" : test_gt}\n\n valid_accu_log, train_accu_log = tt_obj.train( model_traits, data,\n logl=100 )\n idx_v = int(np.argmax( valid_accu_log))\n idx_t = int(np.argmax( train_accu_log))\n\n model_traits.update({\"valid_accu_log\" : valid_accu_log,\n \"train_accu_log\" : train_accu_log,\n \"best_valid\" : max(valid_accu_log),\n \"best_valid_at\" : idx_v,\n \"train_at_best_valid\" : train_accu_log[idx_v],\n \"best_train\" : max(train_accu_log),\n \"best_train_at\": idx_t })\n\n #print(cnt, pformat(model_traits) )\n print( \"%d : best_train = %.4f, best_valid = %.4f\" % \\\n (cnt, max(train_accu_log), max(valid_accu_log) ))\n\n with open( \"exp_results_%d.json\" % cnt,\n \"wt\" , encoding=\"utf8\" ) as f_out :\n print( json.dumps( model_traits ), file=f_out)\n\n\n cnt += 1\n #%%", "def test_all_tf_execution_regimes(test_case):\n if BACKEND == 'backend_tensorflow':\n return test_util.test_all_tf_execution_regimes(test_case)\n else:\n return test_case", "def run_sequence(seq: Sequence, tracker: Tracker, debug=False, num_gpu=8):\n '''2021.1.2 Add multiple gpu support'''\n try:\n worker_name = multiprocessing.current_process().name\n worker_id = int(worker_name[worker_name.find('-') + 1:]) - 1\n gpu_id = worker_id % num_gpu\n torch.cuda.set_device(gpu_id)\n except:\n pass\n\n def _results_exist():\n if seq.object_ids is None:\n if seq.dataset in ['trackingnet', 'got10k']:\n base_results_path = os.path.join(tracker.results_dir, seq.dataset, seq.name)\n bbox_file = '{}.txt'.format(base_results_path)\n else:\n bbox_file = '{}/{}.txt'.format(tracker.results_dir, seq.name)\n return os.path.isfile(bbox_file)\n else:\n bbox_files = ['{}/{}_{}.txt'.format(tracker.results_dir, seq.name, obj_id) for obj_id in seq.object_ids]\n missing = [not os.path.isfile(f) for f in bbox_files]\n return sum(missing) == 0\n\n if _results_exist() and not debug:\n print('FPS: {}'.format(-1))\n return\n\n print('Tracker: {} {} {} , Sequence: {}'.format(tracker.name, tracker.parameter_name, tracker.run_id, seq.name))\n\n if debug:\n output = tracker.run_sequence(seq, debug=debug)\n else:\n try:\n output = tracker.run_sequence(seq, debug=debug)\n except Exception as e:\n print(e)\n return\n\n sys.stdout.flush()\n\n if isinstance(output['time'][0], (dict, OrderedDict)):\n exec_time = sum([sum(times.values()) for times in output['time']])\n num_frames = len(output['time'])\n else:\n exec_time = sum(output['time'])\n num_frames = len(output['time'])\n\n print('FPS: {}'.format(num_frames / exec_time))\n\n if not debug:\n _save_tracker_output(seq, tracker, output)", "def run_custom_training_tests():\n test_custom_training()\n test_custom_distributed_training()\n test_custom_multimodel_training()\n test_custom_distributed_multimodel_training()", "def warmup_resnet_imagenet_128_gpu_8_real(self):\n test_id = 'warmup_resnet_imagenet.gpu_8.128.real'\n args = {}\n config = self.build_resnet_test_config(test_id, args, batch_size=128, gpus=8,\n repeat=1, total_batches=1300)\n self.run_test_suite(config)", "def benchmark_fake_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, data_dir=self.fake_data_dir, data_name='imagenet')\n self._run_benchmark(params)", "def run(args):\n # CONFIG\n run_name = get_run_name(args)\n logger.info(f'*** Starting run {run_name} ***')\n data_dir = f'gs://{args.bucket_name}/{args.project_name}/finetune/finetune_data/{args.finetune_data}'\n output_dir = f'gs://{args.bucket_name}/{args.project_name}/finetune/runs/{run_name}'\n\n # Get configs\n pretrained_model_config_path = get_model_config_path(args)\n model_config = get_model_config(pretrained_model_config_path)\n\n # Meta data/label mapping\n input_meta_data = get_input_meta_data(data_dir)\n label_mapping = get_label_mapping(data_dir)\n logger.info(f'Loaded training data meta.json file: {input_meta_data}')\n\n # Calculate steps, warmup steps and eval steps\n train_data_size = input_meta_data['train_data_size']\n num_labels = input_meta_data['num_labels']\n max_seq_length = input_meta_data['max_seq_length']\n if args.limit_train_steps is None:\n steps_per_epoch = int(train_data_size / args.train_batch_size)\n else:\n steps_per_epoch = args.limit_train_steps\n warmup_steps = int(args.num_epochs * train_data_size * args.warmup_proportion/ args.train_batch_size)\n if args.limit_eval_steps is None:\n eval_steps = int(math.ceil(input_meta_data['eval_data_size'] / args.eval_batch_size))\n else:\n eval_steps = args.limit_eval_steps\n\n # some logging\n if args.init_checkpoint is None:\n logger.info(f'Finetuning on datset {args.finetune_data} using default pretrained model {args.model_class}')\n else:\n logger.info(f'Finetuning on datset {args.finetune_data} using pretrained model in {args.init_checkpoint} of type {args.model_class}')\n logger.info(f'Running {args.num_epochs} epochs with {steps_per_epoch:,} steps per epoch')\n logger.info(f'Using warmup proportion of {args.warmup_proportion}, resulting in {warmup_steps:,} warmup steps')\n logger.info(f'Using learning rate: {args.learning_rate}, training batch size: {args.train_batch_size}, num_epochs: {args.num_epochs}')\n\n # Get model\n classifier_model, core_model = get_model(args, model_config, steps_per_epoch, warmup_steps, num_labels, max_seq_length)\n optimizer = classifier_model.optimizer\n loss_fn = get_loss_fn(num_labels)\n try:\n if ',' in args.validation_freq:\n validation_freq = args.validation_freq.split(',')\n validation_freq = [int(v) for v in validation_freq]\n else:\n validation_freq = int(args.validation_freq)\n except:\n raise ValueError(f'Invalid argument for validation_freq!')\n logger.info(f'Using a validation frequency of {validation_freq}')\n\n # Restore checkpoint\n if args.init_checkpoint:\n checkpoint_path = f'gs://{args.bucket_name}/{args.project_name}/pretrain/runs/{args.init_checkpoint}'\n checkpoint = tf.train.Checkpoint(model=core_model)\n checkpoint.restore(checkpoint_path).assert_existing_objects_matched()\n logger.info(f'Successfully restored checkpoint from {checkpoint_path}')\n\n # Run keras compile\n logger.info(f'Compiling keras model...')\n classifier_model.compile(\n optimizer=optimizer,\n loss=loss_fn,\n metrics=get_metrics())\n logger.info(f'... done')\n\n # Create all custom callbacks\n summary_dir = os.path.join(output_dir, 'summaries')\n summary_callback = tf.keras.callbacks.TensorBoard(summary_dir, profile_batch=0)\n time_history_callback = keras_utils.TimeHistory(\n batch_size=args.train_batch_size,\n log_steps=args.time_history_log_steps,\n logdir=summary_dir)\n custom_callbacks = [summary_callback, time_history_callback]\n if args.save_model:\n logger.info('Using save_model option...')\n checkpoint_path = os.path.join(output_dir, 'checkpoint')\n checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, verbose=1)\n custom_callbacks.append(checkpoint_callback)\n if args.early_stopping_epochs > 0:\n logger.info(f'Using early stopping of after {args.early_stopping_epochs} epochs of val_loss not decreasing')\n early_stopping_callback = tf.keras.callbacks.EarlyStopping(patience=args.early_stopping_epochs, monitor='val_loss')\n custom_callbacks.append(early_stopping_callback)\n\n # Generate dataset_fn\n train_input_fn = get_dataset_fn(\n os.path.join(data_dir, 'tfrecords', 'train.tfrecords'),\n max_seq_length,\n args.train_batch_size,\n is_training=True)\n eval_input_fn = get_dataset_fn(\n os.path.join(data_dir, 'tfrecords', 'dev.tfrecords'),\n max_seq_length,\n args.eval_batch_size,\n is_training=False)\n\n # Add mertrics callback to calculate performance metrics at the end of epoch\n performance_metrics_callback = Metrics(\n eval_input_fn,\n label_mapping,\n os.path.join(summary_dir, 'metrics'),\n eval_steps,\n args.eval_batch_size,\n validation_freq)\n custom_callbacks.append(performance_metrics_callback)\n\n # Run keras fit\n time_start = time.time()\n logger.info('Run training...')\n history = classifier_model.fit(\n x=train_input_fn(),\n validation_data=eval_input_fn(),\n steps_per_epoch=steps_per_epoch,\n epochs=args.num_epochs,\n validation_steps=eval_steps,\n validation_freq=validation_freq,\n callbacks=custom_callbacks,\n verbose=1)\n time_end = time.time()\n training_time_min = (time_end-time_start)/60\n logger.info(f'Finished training after {training_time_min:.1f} min')\n\n # Write training log\n all_scores = performance_metrics_callback.scores\n all_predictions = performance_metrics_callback.predictions\n if len(all_scores) > 0:\n final_scores = all_scores[-1]\n logger.info(f'Final eval scores: {final_scores}')\n else:\n final_scores = {}\n full_history = history.history\n if len(full_history) > 0:\n final_val_loss = full_history['val_loss'][-1]\n final_loss = full_history['loss'][-1]\n logger.info(f'Final training loss: {final_loss:.2f}, Final validation loss: {final_val_loss:.2f}')\n else:\n final_val_loss = None\n final_loss = None\n data = {\n 'created_at': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'run_name': run_name,\n 'final_loss': final_loss,\n 'final_val_loss': final_val_loss,\n 'max_seq_length': max_seq_length,\n 'num_train_steps': steps_per_epoch * args.num_epochs,\n 'eval_steps': eval_steps,\n 'steps_per_epoch': steps_per_epoch,\n 'training_time_min': training_time_min,\n 'data_dir': data_dir,\n 'output_dir': output_dir,\n 'all_scores': all_scores,\n 'all_predictions': all_predictions,\n 'num_labels': num_labels,\n 'label_mapping': label_mapping,\n **full_history,\n **final_scores,\n **vars(args),\n }\n # Write run_log\n f_path_training_log = os.path.join(output_dir, 'run_logs.json')\n logger.info(f'Writing training log to {f_path_training_log}...')\n save_to_json(data, f_path_training_log)\n # Write bert config\n model_config.id2label = label_mapping\n model_config.label2id = {v:k for k, v in label_mapping.items()}\n model_config.max_seq_length = max_seq_length\n model_config.num_labels = num_labels\n f_path_bert_config = os.path.join(output_dir, 'bert_config.json')\n logger.info(f'Writing BERT config to {f_path_bert_config}...')\n save_to_json(model_config.to_dict(), f_path_bert_config)", "def test_net_on_dataset(args, dataset_name, proposal_file, output_dir, multi_gpu=False, gpu_id=0, use_matlab = False, early_stop=False):\n\n \n # print(\"test_net_on_dataset\")\n dataset = JsonDataset(dataset_name)\n test_timer = Timer()\n \n test_timer.tic()\n \n all_boxes = test_net(args, dataset_name, proposal_file, output_dir, gpu_id=gpu_id, early_stop=early_stop)\n test_timer.toc()\n\n logger.info('Total inference time: {:.3f}s'.format(test_timer.average_time))\n\n roidb = dataset.get_roidb()\n num_images = len(roidb)\n num_classes = cfg.MODEL.NUM_CLASSES + 1\n final_boxes = empty_results(num_classes, num_images)\n test_corloc = 'train' in dataset_name\n \n\n all_cls_scores = {}\n\n for i, entry in enumerate(roidb):\n\n if early_stop and i > 10: break\n\n boxes = all_boxes[entry['image']]\n \n cls_key = entry['image'].replace('.jpg','').split('/')[-1]\n\n # print(cls_key)\n\n if boxes['scores'] is not None:\n if test_corloc:\n # print(\"corlooking\")\n _, _, cls_boxes_i = box_results_for_corloc(boxes['scores'], boxes['boxes'])\n else:\n _, _, cls_boxes_i = box_results_with_nms_and_limit(boxes['scores'], boxes['boxes'])\n\n extend_results(i, final_boxes, cls_boxes_i)\n else:\n final_boxes = None\n \n results = task_evaluation.evaluate_all(dataset, final_boxes, output_dir, test_corloc, use_matlab = use_matlab)\n return results", "def run_vgg_experiment(args, device):\n validation_ratio, record_train_acc, record_val_acc, record_test_acc = utils.configure_training_mode(args)\n\n train_loader, validation_loader, test_loader = datasets.build_cifar10_loaders(args.batch_size,\n validation_ratio=validation_ratio,\n train_validation_split_seed=0)\n local_loss_list = utils.get_loss(args)\n nonlinearity = utils.get_nonlinearity(args)\n\n optimizer_local, local_opt_arguments_dict, local_scheduler_arguments_dict, \\\n optimizer_final, final_opt_arguments_dict, final_scheduler_arguments_dict = \\\n utils.choose_optimizers_and_parameters(args)\n\n conv_sizes = [128, 256, 256, 512, 512, 512]\n\n if args.vgg_conv_size_multiplier != 1:\n for i in range(len(conv_sizes)):\n conv_sizes[i] = conv_sizes[i] * args.vgg_conv_size_multiplier\n do_pooling = [False, True, False, True, True, True]\n\n if args.divisive_norm_conv:\n divisive_norm_list_conv = [networks.DivisiveNorm(args.divnorm_power, args.grouping_dim, args.grouped_var_delta)\n for i in range(len(conv_sizes))]\n else:\n divisive_norm_list_conv = None\n\n kernel_sizes = [3 for i in range(len(conv_sizes))]\n fc_layers = [1024]\n\n if args.divisive_norm_fc:\n divisive_norm_list_fc = [networks.DivisiveNorm(args.divnorm_power, args.grouping_dim,\n args.grouped_var_delta)\n for i in range(len(fc_layers))]\n else:\n divisive_norm_list_fc = None\n\n alt_feedback_type = None\n if args.feedback_alignment:\n alt_feedback_type = 'feedback_alignment'\n elif args.sign_symmetry:\n alt_feedback_type = 'sign_symmetry'\n\n net = networks.Network(nonlinearity, local_loss_list, optimizer_local,\n torch.optim.lr_scheduler.MultiStepLR, conv_sizes, kernel_sizes,\n do_pooling, fc_layers, 'max', 'CIFAR10', bias=False,\n local_opt_arguments_dict=local_opt_arguments_dict,\n local_scheduler_arguments_dict=local_scheduler_arguments_dict,\n dropout_p=args.dropout_p, batch_norm=args.batch_norm,\n divisive_norm_list_conv=divisive_norm_list_conv, divisive_norm_list_fc=divisive_norm_list_fc,\n spatial_dropout=args.spatial_dropout, alt_feedback_type=alt_feedback_type)\n\n net = net.to(device)\n print(net)\n\n final_loss = nn.CrossEntropyLoss()\n\n if args.backprop:\n final_opt = optimizer_final(net.parameters(), **final_opt_arguments_dict)\n compute_local_loss = False\n update_local_loss = False\n else:\n final_opt = optimizer_final(net.softmax_layer.parameters(), **final_opt_arguments_dict)\n compute_local_loss = True\n update_local_loss = True\n\n final_scheduler = torch.optim.lr_scheduler.MultiStepLR(final_opt, **final_scheduler_arguments_dict)\n\n train_acc, val_acc, test_acc = utils.train_network(\n net, device, final_loss, final_opt, final_scheduler, args.n_epochs, train_loader, validation_loader,\n test_loader, compute_local_loss=compute_local_loss, update_local_loss=update_local_loss,\n record_train_acc=record_train_acc, record_val_acc=record_val_acc, record_test_acc=record_test_acc,\n print_results=True, backprop_batch_manhattan=args.backprop_batch_manhattan)\n\n return train_acc, val_acc, test_acc", "def main(_) -> None:\n params = train_utils.parse_configuration(FLAGS)\n mode = FLAGS.mode\n model_dir = FLAGS.model_dir\n if 'train' in FLAGS.mode:\n # Pure eval modes do not output yaml files. Otherwise continuous eval job\n # may race against the train job for writing the same file.\n train_utils.serialize_config(params, model_dir)\n\n if FLAGS.seed is not None:\n logging.info('Setting tf seed.')\n tf.random.set_seed(FLAGS.seed)\n\n task = RankingTask(\n params=params.task,\n optimizer_config=params.trainer.optimizer_config,\n logging_dir=model_dir,\n steps_per_execution=params.trainer.steps_per_loop,\n name='RankingTask')\n\n enable_tensorboard = params.trainer.callbacks.enable_tensorboard\n\n strategy = distribute_utils.get_distribution_strategy(\n distribution_strategy=params.runtime.distribution_strategy,\n all_reduce_alg=params.runtime.all_reduce_alg,\n num_gpus=params.runtime.num_gpus,\n tpu_address=params.runtime.tpu)\n\n with strategy.scope():\n model = task.build_model()\n\n def get_dataset_fn(params):\n return lambda input_context: task.build_inputs(params, input_context)\n\n train_dataset = None\n if 'train' in mode:\n train_dataset = strategy.distribute_datasets_from_function(\n get_dataset_fn(params.task.train_data),\n options=tf.distribute.InputOptions(experimental_fetch_to_device=False))\n\n validation_dataset = None\n if 'eval' in mode:\n validation_dataset = strategy.distribute_datasets_from_function(\n get_dataset_fn(params.task.validation_data),\n options=tf.distribute.InputOptions(experimental_fetch_to_device=False))\n\n if params.trainer.use_orbit:\n with strategy.scope():\n checkpoint_exporter = train_utils.maybe_create_best_ckpt_exporter(\n params, model_dir)\n trainer = RankingTrainer(\n config=params,\n task=task,\n model=model,\n optimizer=model.optimizer,\n train='train' in mode,\n evaluate='eval' in mode,\n train_dataset=train_dataset,\n validation_dataset=validation_dataset,\n checkpoint_exporter=checkpoint_exporter)\n\n train_lib.run_experiment(\n distribution_strategy=strategy,\n task=task,\n mode=mode,\n params=params,\n model_dir=model_dir,\n trainer=trainer)\n\n else: # Compile/fit\n checkpoint = tf.train.Checkpoint(model=model, optimizer=model.optimizer)\n\n latest_checkpoint = tf.train.latest_checkpoint(model_dir)\n if latest_checkpoint:\n checkpoint.restore(latest_checkpoint)\n logging.info('Loaded checkpoint %s', latest_checkpoint)\n\n checkpoint_manager = tf.train.CheckpointManager(\n checkpoint,\n directory=model_dir,\n max_to_keep=params.trainer.max_to_keep,\n step_counter=model.optimizer.iterations,\n checkpoint_interval=params.trainer.checkpoint_interval)\n checkpoint_callback = keras_utils.SimpleCheckpoint(checkpoint_manager)\n\n time_callback = keras_utils.TimeHistory(\n params.task.train_data.global_batch_size,\n params.trainer.time_history.log_steps,\n logdir=model_dir if enable_tensorboard else None)\n callbacks = [checkpoint_callback, time_callback]\n\n if enable_tensorboard:\n tensorboard_callback = tf.keras.callbacks.TensorBoard(\n log_dir=model_dir,\n update_freq=min(1000, params.trainer.validation_interval),\n profile_batch=FLAGS.profile_steps)\n callbacks.append(tensorboard_callback)\n\n num_epochs = (params.trainer.train_steps //\n params.trainer.validation_interval)\n current_step = model.optimizer.iterations.numpy()\n initial_epoch = current_step // params.trainer.validation_interval\n\n eval_steps = params.trainer.validation_steps if 'eval' in mode else None\n\n if mode in ['train', 'train_and_eval']:\n logging.info('Training started')\n history = model.fit(\n train_dataset,\n initial_epoch=initial_epoch,\n epochs=num_epochs,\n steps_per_epoch=params.trainer.validation_interval,\n validation_data=validation_dataset,\n validation_steps=eval_steps,\n callbacks=callbacks,\n )\n model.summary()\n logging.info('Train history: %s', history.history)\n elif mode == 'eval':\n logging.info('Evaluation started')\n validation_output = model.evaluate(validation_dataset, steps=eval_steps)\n logging.info('Evaluation output: %s', validation_output)\n else:\n raise NotImplementedError('The mode is not implemented: %s' % mode)", "def setup(params):\n os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'\n os.environ['TF_SYNC_ON_FINISH'] = str(int(params.sync_on_finish))\n argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n\n # Sets GPU thread settings\n params = params._replace(gpu_thread_mode=params.gpu_thread_mode.lower())\n os.environ['TF_GPU_THREAD_MODE'] = params.gpu_thread_mode\n\n # Default to two threads. One for the device compute and the other for\n # memory copies.\n per_gpu_thread_count = params.per_gpu_thread_count or 2\n total_gpu_thread_count = per_gpu_thread_count * params.num_gpus\n\n os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count)\n\n if not params.num_inter_threads and params.gpu_thread_mode in [\n 'gpu_private', 'gpu_shared'\n ]:\n cpu_count = multiprocessing.cpu_count()\n main_thread_count = max(cpu_count - total_gpu_thread_count, 1)\n params = params._replace(num_inter_threads=main_thread_count)\n\n platforms_util.initialize(params, create_config_proto(params))\n\n return params", "def renset50_128_gpu_8_real(self):\n test_id = 'resnet50.gpu_8.128.real'\n args = {}\n config = self.build_resnet_test_config(test_id, args, batch_size=128, gpus=8)\n self.run_test_suite(config)", "def run_test(filepath):\n num_class = 120 # dogbreeds class\n model = Resnet50MO(num_class, checkpoint_path=None)\n\n # image settings\n crop_size = model.input_size\n scale_size = model.input_size\n input_size = model.input_size\n input_mean = model.input_mean\n input_std = model.input_std\n\n # hyperparams settings\n epochs = 1\n batch_size = 32 # mini-batch-size\n learning_rate = 0.01\n momentum = 0.5\n decay_factor = 10\n eval_freq = 5 # in epochs\n\n # data generator settings: dataset and dataloader\n train_dataset = DogImageset(filepath, input_size,\n input_mean=input_mean, input_std=input_std)\n val_dataset = DogImageset(filepath, input_size,\n input_mean=input_mean, input_std=input_std)\n \n train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)\n val_loader = DataLoader(dataset=val_dataset, batch_size=batch_size, shuffle=False)\n\n # Loss and backprop settings\n model.cuda()\n criterion = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(\n model.parameters(),\n lr=learning_rate,\n momentum=momentum\n )\n\n run_model_train_test(model, train_loader, criterion, optimizer)", "def test_image_task_early_fusion(self):\n args = BASE_ARGS.copy()\n args.update(IMAGE_ARGS)\n args.update(EARLY_FUSION_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 8.6, 'failed to train image_seq2seq on image task'\n )", "def test_distributed(self):\n self.model.eval()\n test_loss, test_correct_preds = 0, defaultdict(int)\n if self.test_loader is None: # running G2E\n self.test_loader, self.test_size, self.test_sampler = self._get_smi_dl(phase=\"test\", shuffle=False)\n self.test_sampler.set_epoch(0)\n if self.rank == 0:\n test_loader = tqdm(self.test_loader, desc='testing...')\n else:\n test_loader = self.test_loader\n \n running_topk_accs = defaultdict(lambda: np.nan)\n with torch.no_grad():\n epoch_test_size = 0\n for i, batch in enumerate(test_loader):\n batch_data = batch[0]\n if not isinstance(batch_data, tuple):\n batch_data = batch_data.cuda(non_blocking=True)\n if self.model_name == 'TransformerEBM':\n batch_data = (batch_data, 'test')\n batch_mask = batch[1].cuda(non_blocking=True)\n batch_energies = self._one_batch(\n batch_data, batch_mask, backprop=False,\n )\n test_batch_size = batch_energies.shape[0]\n test_batch_size = torch.tensor([test_batch_size]).cuda(self.gpu, non_blocking=True)\n dist.all_reduce(test_batch_size, dist.ReduceOp.SUM)\n test_batch_size = test_batch_size.item()\n epoch_test_size += test_batch_size\n\n # for validation/test data, true rxn may not be present!\n batch_idx = batch[2]\n batch_true_ranks_array = self.proposals_data['test'][batch_idx, 2].astype('int')\n batch_true_ranks_valid = batch_true_ranks_array[batch_true_ranks_array < self.args.minibatch_eval]\n batch_true_ranks = torch.as_tensor(batch_true_ranks_array).unsqueeze(dim=-1)\n # slightly tricky as we have to ignore rxns with no 'positive' rxn for loss calculation\n # (bcos nothing in the numerator, loss is undefined)\n loss_numerator = batch_energies[\n np.arange(batch_energies.shape[0])[batch_true_ranks_array < self.args.minibatch_eval],\n batch_true_ranks_valid\n ]\n loss_denominator = batch_energies[\n np.arange(batch_energies.shape[0])[batch_true_ranks_array < self.args.minibatch_eval],\n :\n ]\n batch_loss = (loss_numerator + torch.logsumexp(-loss_denominator, dim=1)).sum().item()\n\n for k in self.k_to_test:\n # index with lowest energy is what the model deems to be the most feasible rxn\n batch_preds = torch.topk(batch_energies, k=k, dim=1, largest=False)[1] \n batch_correct_preds = torch.where(batch_preds == batch_true_ranks)[0].shape[0]\n batch_correct_preds = torch.tensor([batch_correct_preds]).cuda(self.gpu, non_blocking=True)\n dist.all_reduce(batch_correct_preds, dist.ReduceOp.SUM)\n batch_correct_preds = batch_correct_preds.item()\n test_correct_preds[k] += batch_correct_preds\n running_topk_accs[k] = test_correct_preds[k] / epoch_test_size\n\n if k == 1 and self.rank == 0: # overhead is only 5 ms, will check ~5 times each epoch (regardless of batch_size)\n try:\n for j in range(i * self.args.batch_size_eval, (i+1) * self.args.batch_size_eval):\n if j % (self.test_size // 5) == random.randint(0, 3) or j % (self.test_size // 8) == random.randint(0, 5): # peek at a random sample of current batch to monitor training progress\n rxn_idx = random.sample(list(range(self.args.batch_size_eval)), k=1)[0]\n rxn_true_rank = batch_true_ranks_array[rxn_idx]\n rxn_pred_rank = batch_preds[rxn_idx, 0].item()\n rxn_pred_energy = batch_energies[rxn_idx, rxn_pred_rank].item()\n rxn_true_energy = batch_energies[rxn_idx, rxn_true_rank].item() if rxn_true_rank != 9999 else 'NaN'\n rxn_orig_energy = batch_energies[rxn_idx, 0].item()\n rxn_orig_energy2 = batch_energies[rxn_idx, 1].item()\n rxn_orig_energy3 = batch_energies[rxn_idx, 2].item()\n\n rxn_true_prod = self.proposals_data['test'][batch_idx[rxn_idx], 0]\n rxn_true_prec = self.proposals_data['test'][batch_idx[rxn_idx], 1]\n rxn_cand_precs = self.proposals_data['test'][batch_idx[rxn_idx], 3:]\n rxn_pred_prec = rxn_cand_precs[batch_preds[rxn_idx]]\n rxn_orig_prec = rxn_cand_precs[0]\n rxn_orig_prec2 = rxn_cand_precs[1]\n rxn_orig_prec3 = rxn_cand_precs[2]\n logging.info(f'\\ntrue product: \\t\\t\\t\\t{rxn_true_prod}')\n logging.info(f'pred precursor (rank {rxn_pred_rank}, energy = {rxn_pred_energy:+.4f}):\\t\\t\\t{rxn_pred_prec}')\n if rxn_true_energy == 'NaN':\n logging.info(f'true precursor (rank {rxn_true_rank}, energy = {rxn_true_energy}):\\t\\t\\t\\t{rxn_true_prec}')\n else:\n logging.info(f'true precursor (rank {rxn_true_rank}, energy = {rxn_true_energy:+.4f}):\\t\\t\\t{rxn_true_prec}')\n logging.info(f'orig precursor (rank 0, energy = {rxn_orig_energy:+.4f}):\\t\\t\\t{rxn_orig_prec}')\n logging.info(f'orig precursor (rank 1, energy = {rxn_orig_energy2:+.4f}):\\t\\t\\t{rxn_orig_prec2}')\n logging.info(f'orig precursor (rank 2, energy = {rxn_orig_energy3:+.4f}):\\t\\t\\t{rxn_orig_prec3}\\n')\n break\n except Exception as e:\n tb_str = traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)\n logging.info(\"\".join(tb_str))\n logging.info('\\nIndex out of range (last minibatch)')\n \n batch_loss = torch.tensor([batch_loss]).cuda(self.gpu, non_blocking=True)\n dist.all_reduce(batch_loss, dist.ReduceOp.SUM)\n batch_loss = batch_loss.item()\n test_loss += batch_loss\n if self.rank == 0:\n test_loader.set_description(f\"testing...loss={test_loss / test_batch_size:.4f}, top-1 acc={running_topk_accs[1]:.4f}, top-5 acc={running_topk_accs[5]:.4f}, top-10 acc={running_topk_accs[10]:.4f}\")\n test_loader.refresh()\n \n for k in self.k_to_test:\n self.test_topk_accs[k] = test_correct_preds[k] / epoch_test_size\n \n dist.barrier()\n message = f\"{self.args.expt_name}\\n\"\n if self.rank == 0:\n logging.info(f'\\nTest loss: {test_loss / epoch_test_size:.4f}')\n for k in self.k_to_test:\n this_topk_message = f'Test top-{k} accuracy: {100 * self.test_topk_accs[k]:.3f}%'\n logging.info(this_topk_message)\n message += this_topk_message + '\\n'\n try:\n send_message(message)\n except Exception as e:\n pass", "def backend_train_test_loop(e=None, timeout=30,\n is_compute_contributivity='True',\n is_parallelize=''):\n if is_parallelize == '':\n is_parallelize = None\n else:\n is_parallelize = strtobool(is_parallelize)\n\n from databoard.db_tools import backend_train_test_loop\n is_compute_contributivity = strtobool(is_compute_contributivity)\n backend_train_test_loop(\n e, timeout, is_compute_contributivity, is_parallelize)", "def renset50_64_gpu_8_real(self):\n test_id = 'resnet50.gpu_8.64.real'\n args = {}\n config = self.build_resnet_test_config(test_id, args, batch_size=64, gpus=8)\n self.run_test_suite(config)", "def main(_):\n\n spec = cluster_spec(args.num_workers, 1)\n cluster = tf.train.ClusterSpec(spec).as_cluster_def()\n\n def shutdown(signal, frame):\n logger.warn(\"Received signal {}: exiting\".format(signal))\n sys.exit(128+signal)\n signal.signal(signal.SIGHUP, shutdown)\n signal.signal(signal.SIGINT, shutdown)\n signal.signal(signal.SIGTERM, shutdown)\n\n if args.job_name == 'worker':\n config = tf.ConfigProto(intra_op_parallelism_threads=1,\n inter_op_parallelism_threads=2)\n\n server = tf.train.Server(cluster, job_name='worker',\n task_index=args.task,\n config=config)\n run(args, server)\n else:\n config = tf.ConfigProto(device_filters=['/job:ps'])\n server = tf.train.Server(cluster, job_name='ps', task_index=args.task,\n config=config)\n while True:\n time.sleep(1000)", "def evaluate(eval_ds, model, task):\n\n print('==========EVAL==========')\n # Testing contrastive accuracy\n if task['name'] == 'contrastive_accuracy':\n ds = eval_ds.map(data_utils.pretrain_preprocess)\n ds = ds.batch(128)\n test_contrast_acc = tf.keras.metrics.Accuracy(name='test_constrastive_accuracy')\n for x in ds:\n image = x['image']\n image = tf.transpose(image, [1, 0, 2, 3, 4])\n image = tf.reshape(\n image, \n (image.shape[0]*image.shape[1], image.shape[2], image.shape[3], image.shape[4])\n )\n out = model(image, mode='unsupervised', training=False)\n metrics.update_contrastive_accuracy2(test_contrast_acc, out, TEMP)\n print('test contrastive accuracy')\n print(test_contrast_acc.result())\n return \n\n # Testing classification accuracy \n ds = eval_ds.filter(lambda x: x['label'] != task['excluded_label'])\n ds = ds.map(data_utils.eval_preprocess)\n ds = ds.batch(FLAGS.eval_bs)\n test_class_acc = tf.keras.metrics.Accuracy(name='test_class_accuracy')\n for x in ds:\n image = x['image']\n labels = x[task['name']]\n if task['name'] == 'extr':\n out = model(image, mode='eval', sup_layers=2, training=False)\n else:\n out = model(image, mode='eval', sup_layers=1, training=False)\n metrics.update_supervised_accuracy(test_class_acc, labels, out)\n \n if FLAGS.debug:\n print(tf.math.argmax(out, axis=-1))\n print('test classification accuracy')\n print(test_class_acc.result())", "def main():\n\n args = define_and_process_args()\n print('\\n', 'ARGUMENTS', '\\n\\n', args, '\\n')\n\n log_dir = get_log_dir(args)\n print('\\n', 'LOG DIRECTORY', '\\n\\n', log_dir, '\\n')\n\n standardized_data_path = os.path.join(args.data_dir, args.data_filename)\n if not os.path.exists(standardized_data_path):\n message = '%s does not exist.' % standardized_data_path\n raise ValueError(message)\n\n dataset = data.Dataset(standardized_data_path)\n train_raw_seqs, test_raw_seqs = dataset.get_splits(args.test_users)\n train_triplets = [data.prepare_raw_seq(seq) for seq in train_raw_seqs]\n test_triplets = [data.prepare_raw_seq(seq) for seq in test_raw_seqs]\n\n train_input_seqs, train_reset_seqs, train_label_seqs = zip(*train_triplets)\n test_input_seqs, test_reset_seqs, test_label_seqs = zip(*test_triplets)\n\n Model = eval('models.' + args.model_type + 'Model')\n input_size = dataset.input_size\n target_size = dataset.num_classes\n\n # This is just to satisfy a low-CPU requirement on our cluster\n # when using GPUs.\n if 'CUDA_VISIBLE_DEVICES' in os.environ:\n config = tf.ConfigProto(intra_op_parallelism_threads=2,\n inter_op_parallelism_threads=2)\n else:\n config = None\n\n with tf.Session(config=config) as sess:\n model = Model(input_size, target_size, args.num_layers,\n args.hidden_layer_size, args.init_scale,\n args.dropout_keep_prob)\n optimizer = optimizers.Optimizer(\n model.loss, args.num_train_sweeps, args.initial_learning_rate,\n args.num_initial_sweeps, args.num_sweeps_per_decay,\n args.decay_factor, args.max_global_grad_norm)\n train(sess, model, optimizer, log_dir, args.batch_size,\n args.num_sweeps_per_summary, args.num_sweeps_per_save,\n train_input_seqs, train_reset_seqs, train_label_seqs,\n test_input_seqs, test_reset_seqs, test_label_seqs)", "def test_multitask_early_fusion(self):\n args = BASE_ARGS.copy()\n args.update(MULTITASK_ARGS)\n args.update(EARLY_FUSION_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 5.0, 'failed to train image_seq2seq on image+text task'\n )", "def main():\n test_runner = TestRunner(\n FLAGS.workspace, FLAGS.bench_home, imagenet_dir=FLAGS.train_data_dir)\n test_runner.run_tests(FLAGS.test_list.split(','))", "def run_task(data_dir, task_id):\n print(\"Train and test for task %d ...\" % task_id)\n\n print(\"We are going to use this\")\n \n\n # Parse data\n train_files = glob.glob('%s/qa3_*_train.txt' % (data_dir, task_id))\n test_files = glob.glob('%s/qa3_*_test.txt' % (data_dir, task_id))\n\n dictionary = {\"nil\": 0}\n train_story, train_questions, train_qstory = parse_babi_task(train_files, dictionary, False)\n test_story, test_questions, test_qstory = parse_babi_task(test_files, dictionary, False)\n\n general_config = BabiConfig(train_story, train_questions, dictionary)\n\n\n # #### R: this line build a empty model to train\n # memory, model, loss = build_model(general_config)\n\n # if general_config.linear_start:\n # train_linear_start(train_story, train_questions, train_qstory, memory, model, loss, general_config)\n # else:\n # train(train_story, train_questions, train_qstory, memory, model, loss, general_config)\n\n\n\n # memory, model, loss = build_model(general_config)\n\n # this line\n test(test_story, test_questions, test_qstory, memory, model, loss, general_config)", "def test(cfg):\n # Set up environment.\n distributed.init_distributed_training(cfg)\n\n # Set random seed from configs.\n np.random.seed(cfg.RNG_SEED)\n torch.manual_seed(cfg.RNG_SEED)\n\n # Print config.\n if distributed.is_master_proc():\n print(\"Test with config:\")\n print(cfg)\n\n # Build the model and print model statistics.\n # Use cuda if available\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n # Construct the model\n model = PanopticNarrativeGroundingBaseline(cfg, device=device)\n # Determine the GPU used by the current process\n cur_device = torch.cuda.current_device()\n # Transfer the model to the current GPU device\n model = model.cuda(device=cur_device)\n if cfg.NUM_GPUS > 1:\n # Make model replica operate on the current device\n model = torch.nn.parallel.DistributedDataParallel(\n module=model, device_ids=[cur_device], output_device=cur_device,\n find_unused_parameters=True\n )\n if cfg.LOG_MODEL_INFO and distributed.is_master_proc():\n print(\"Model:\\n{}\".format(model))\n print(\"Params: {:,}\".format(np.sum([p.numel() for p in model.parameters()]).item()))\n print(\"Mem: {:,} MB\".format(torch.cuda.max_memory_allocated() / 1024 ** 3))\n print(\"nvidia-smi\")\n os.system(\"nvidia-smi\")\n\n # Load a checkpoint to test if applicable.\n checkpoint_path = osp.join(cfg.OUTPUT_DIR, 'model_final.pth')\n if cfg.TEST.CHECKPOINT_FILE_PATH != \"\":\n checkpoint_path = cfg.TEST.CHECKPOINT_FILE_PATH\n if osp.exists(checkpoint_path):\n if distributed.is_master_proc():\n print('Loading model from: {0}'.format(checkpoint_path))\n checkpoint = torch.load(checkpoint_path, map_location=\"cpu\")\n if cfg.NUM_GPUS > 1:\n model.module.load_state_dict(checkpoint['model_state'])\n else:\n model.load_state_dict(checkpoint['model_state'])\n elif cfg.TRAIN.CHECKPOINT_FILE_PATH != \"\":\n # If no checkpoint found in TEST.CHECKPOINT_FILE_PATH or in the current\n # checkpoint folder, try to load checkpoint from\n # TRAIN.CHECKPOINT_FILE_PATH and test it.\n checkpoint_path = cfg.TRAIN.CHECKPOINT_FILE_PATH\n checkpoint = torch.load(checkpoint_path, map_location=\"cpu\")\n if cfg.NUM_GPUS > 1:\n model.module.load_state_dict(checkpoint['model_state'])\n else:\n model.load_state_dict(checkpoint['model_state'])\n else:\n if distributed.is_master_proc():\n print(\"Testing with random initialization. Only for debugging.\")\n\n # Create testing loaders.\n test_dataset = PanopticNarrativeGroundingDataset(cfg, cfg.DATA.VAL_SPLIT, train=False)\n test_loader = DataLoader(\n test_dataset,\n batch_size=int(cfg.TRAIN.BATCH_SIZE / max(1, cfg.NUM_GPUS)),\n shuffle=False,\n sampler=(DistributedSampler(test_dataset) if cfg.NUM_GPUS > 1 else None),\n num_workers=cfg.DATA_LOADER.NUM_WORKERS,\n pin_memory=cfg.DATA_LOADER.PIN_MEMORY\n )\n \n if distributed.is_master_proc():\n print(\"Testing model for {} iterations\".format(len(test_loader)))\n\n # Perform test on the entire dataset.\n perform_test(test_loader, model, cfg)", "def test(model_params, dataset_test, testing_params, log_directory, device, cuda_available=True,\n metric_fns=None):\n # DATA LOADER\n test_loader = DataLoader(dataset_test, batch_size=testing_params[\"batch_size\"],\n shuffle=False, pin_memory=True,\n collate_fn=imed_loader_utils.imed_collate,\n num_workers=0)\n\n # LOAD TRAIN MODEL\n fname_model = os.path.join(log_directory, \"best_model.pt\")\n print('\\nLoading model: {}'.format(fname_model))\n model = torch.load(fname_model, map_location=device)\n if cuda_available:\n model.cuda()\n model.eval()\n\n # CREATE OUTPUT FOLDER\n path_3Dpred = os.path.join(log_directory, 'pred_masks')\n if not os.path.isdir(path_3Dpred):\n os.makedirs(path_3Dpred)\n\n # METRIC MANAGER\n metric_mgr = imed_metrics.MetricManager(metric_fns)\n\n # UNCERTAINTY SETTINGS\n if (testing_params['uncertainty']['epistemic'] or testing_params['uncertainty']['aleatoric']) and \\\n testing_params['uncertainty']['n_it'] > 0:\n n_monteCarlo = testing_params['uncertainty']['n_it']\n testing_params['uncertainty']['applied'] = True\n print('\\nComputing model uncertainty over {} iterations.'.format(n_monteCarlo))\n else:\n testing_params['uncertainty']['applied'] = False\n n_monteCarlo = 1\n\n for i_monteCarlo in range(n_monteCarlo):\n preds_npy, gt_npy = run_inference(test_loader, model, model_params, testing_params, path_3Dpred,\n cuda_available, i_monteCarlo)\n metric_mgr(preds_npy, gt_npy)\n\n # COMPUTE UNCERTAINTY MAPS\n if n_monteCarlo > 1:\n imed_utils.run_uncertainty(ifolder=path_3Dpred)\n\n metrics_dict = metric_mgr.get_results()\n metric_mgr.reset()\n print(metrics_dict)\n return metrics_dict", "def renset50_32_gpu_1_real(self):\n test_id = 'resnet50.gpu_1.32.real'\n args = {}\n config = self.build_resnet_test_config(test_id, args, batch_size=32, gpus=1)\n self.run_test_suite(config)", "def __init__(self, prototxt=os.path.join(settings.DEPENDENCIES_PATH, 'face-py-faster-rcnn', 'models', 'face', 'VGG16', 'faster_rcnn_end2end', 'test.prototxt'),\n caffemodel=settings.GPU_FACE_DETECTION_CAFFE_MODEL,\n face_rect_expand_factor=FACE_RECT_EXPAND_FACTOR,\n enable_cuda=settings.CUDA_ENABLED):\n self.is_cuda_enable = enable_cuda\n self.prototxt = prototxt\n self.caffemodel = caffemodel\n self.face_rect_expand_factor = face_rect_expand_factor\n self.net = caffe.Net(self.prototxt, self.caffemodel, caffe.TEST)", "def executeCNN(args, files, var_targets, nn_arch, batchsize, epoch, mode, n_gpu=(1, 'avolkov'), shuffle=(False, None), tb_logger=False):\n\n print '\\nEpoch Interval:\\t', epoch[0], ' - ', epoch[1], '\\n'\n\n if epoch[0] == 0:\n if nn_arch == 'DCNN':\n if args.wires in ['U', 'V']: model = create_shared_dcnn_network_2()\n elif args.wires in ['UV', 'U+V']: model = create_shared_dcnn_network_4()\n else: raise ValueError('passed wire specifier need to be U/V/UV')\n elif nn_arch == 'ResNet':\n raise ValueError('Currently, this is not implemented')\n elif nn_arch == 'Inception':\n if args.wires in ['U', 'V']: model = create_shared_inception_network_2()\n elif args.wires in ['UV', 'U+V']: model = create_shared_inception_network_4()\n else: raise ValueError('passed wire specifier need to be U/V/UV')\n elif nn_arch == 'Conv_LSTM':\n raise ValueError('Currently, this is not implemented')\n else:\n raise ValueError('Currently, only DCNN and Inception are available as nn_arch')\n else:\n model = load_trained_model(args)\n\n if mode == 'train':\n model.summary()\n try: # plot model, install missing packages with conda install if it throws a module error\n raise OSError\n ks.utils.plot_model(model, to_file=args.folderOUT + '/plot_model.png',\n show_shapes=True, show_layer_names=False)\n except OSError:\n save_plot_model_script(folderOUT=args.folderOUT)\n print 'could not produce plot_model.png ---- run generate_model_plot on CPU'\n\n # exit()\n\n adam = ks.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0) # Default: epsi: None, Deep NN: epsi=0.1/1.0\n optimizer = adam # Choose optimizer, only used if epoch == 0\n\n # model, batchsize = parallelize_model_to_n_gpus(model, n_gpu, batchsize) # TODO compile after restart????\n # if n_gpu[0] > 1: model.compile(loss=loss_opt[0], optimizer=optimizer, metrics=[loss_opt[1]]) # TODO check\n\n if epoch[0] == 0:\n print 'Compiling Keras model\\n'\n model.compile(\n loss='categorical_crossentropy',\n optimizer=optimizer,\n metrics=['accuracy'])\n # TODO Add Precision/Recall to metric, see:\n # TODO https://stackoverflow.com/questions/43076609/how-to-calculate-precision-and-recall-in-keras\n\n print 'optimizer epsilon:', model.optimizer.epsilon\n\n print \"\\nTraining begins in Epoch:\\t\", epoch\n\n model.save(args.folderOUT + \"models/model-000.hdf5\")\n model.save_weights(args.folderOUT + \"models/weights-000.hdf5\")\n model = fit_model(args, model, files, batchsize, var_targets, epoch, shuffle, n_events=None, tb_logger=tb_logger)\n model.save_weights(args.folderOUT + \"models/weights_final.hdf5\")\n model.save(args.folderOUT + \"models/model_final.hdf5\")\n elif mode in ['valid']: #validation\n # model.summary()\n print 'Validate events'\n print model\n print model.get_layer(index=3)\n exit()\n\n args.sources = \"\".join(sorted(args.sources))\n args.position = \"\".join(sorted(args.position))\n args.folderOUT += \"0validation/\" + args.sources + \"-\" + mode + \"-\" + args.position + \"-\" + str(args.num_weights) + \"-\" + args.wires + \"/\"\n os.system(\"mkdir -p -m 770 %s \" % (args.folderOUT))\n\n EVENT_INFO = get_events(args=args, files=files, model=model,\n fOUT=(args.folderOUT + \"events_\" + str(args.num_weights) + \"_\" + args.sources + \"-\" + mode + \"-\" + args.position + \"-\" + args.wires + \".hdf5\"))\n\n # EVENT_INFO['DNNPredClass'] = EVENT_INFO['DNNPred'].argmax(axis=-1)\n # EVENT_INFO['DNNTrueClass'] = EVENT_INFO['DNNTrue'].argmax(axis=-1)\n # EVENT_INFO['DNNPredTrueClass'] = EVENT_INFO['DNNPred'][:, 1]\n\n validation_mc_plots(args=args, folderOUT=args.folderOUT, data=EVENT_INFO)\n else:\n raise ValueError('chosen mode (%s) not available. Choose between train/mc/data'%(mode))", "def run_experiment(hparams):\n\n data_file_name = build_data_file_name(hparams.pair, hparams.time_interval, hparams.data_period)\n\n df = data_pre_processing(data_file_name, hparams.path_to_archives, hparams.path_to_data_dir)\n\n rows = df.shape[0]\n\n train, test = prepare_data(df[rows - 100:rows], hparams.feature_window, hparams.label_window)\n\n print(\"train:{}\".format(train))\n print(\"test:{}\".format(test))\n # my_feature_columns = [tf.feature_column.numeric_column('f')]\n # estimator = tf.estimator.DNNClassifier(\n # feature_columns=[],\n # hidden_units=[1024, 512, 256])\n\n # estimator = tf.estimator.DNNRegressor()", "def configure_training_federated(\n task_spec: training_specs.TaskSpecFederated,\n *, # Caller passes below args by name.\n model: str = 'resnet18',\n only_digits: bool = False,\n merge_case: bool = False,\n) -> training_specs.RunnerSpecFederated:\n return _EmnistCharacterTask(\n task_spec,\n model=model,\n only_digits=only_digits,\n merge_case=merge_case,\n ).build_federated_runner_spec()", "def run_inference(test_loader, model, model_params, testing_params, ofolder, cuda_available,\n i_monte_carlo=None):\n # INIT STORAGE VARIABLES\n preds_npy_list, gt_npy_list = [], []\n pred_tmp_lst, z_tmp_lst, fname_tmp = [], [], ''\n volume = None\n weight_matrix = None\n\n for i, batch in enumerate(tqdm(test_loader, desc=\"Inference - Iteration \" + str(i_monte_carlo))):\n with torch.no_grad():\n # GET SAMPLES\n # input_samples: list of batch_size tensors, whose size is n_channels X height X width X depth\n # gt_samples: idem with n_labels\n # batch['*_metadata']: list of batch_size lists, whose size is n_channels or n_labels\n if model_params[\"name\"] == \"HeMISUnet\":\n input_samples = imed_utils.cuda(imed_utils.unstack_tensors(batch[\"input\"]), cuda_available)\n else:\n input_samples = imed_utils.cuda(batch[\"input\"], cuda_available)\n gt_samples = imed_utils.cuda(batch[\"gt\"], cuda_available, non_blocking=True)\n\n # EPISTEMIC UNCERTAINTY\n if testing_params['uncertainty']['applied'] and testing_params['uncertainty']['epistemic']:\n for m in model.modules():\n if m.__class__.__name__.startswith('Dropout'):\n m.train()\n\n # RUN MODEL\n if model_params[\"name\"] in [\"HeMISUnet\", \"FiLMedUnet\"]:\n metadata = get_metadata(batch[\"input_metadata\"], model_params)\n preds = model(input_samples, metadata)\n else:\n preds = model(input_samples)\n\n if model_params[\"name\"] == \"HeMISUnet\":\n # Reconstruct image with only one modality\n input_samples = batch['input'][0]\n\n if model_params[\"name\"] == \"UNet3D\" and model_params[\"attention\"]:\n imed_utils.save_feature_map(batch, \"attentionblock2\", os.path.dirname(ofolder), model, input_samples,\n slice_axis=test_loader.dataset.slice_axis)\n\n # PREDS TO CPU\n preds_cpu = preds.cpu()\n\n # RECONSTRUCT 3D IMAGE\n last_batch_bool = (i == len(test_loader) - 1)\n\n slice_axis = imed_utils.AXIS_DCT[testing_params['slice_axis']]\n\n # LOOP ACROSS SAMPLES\n for smp_idx in range(len(preds_cpu)):\n if \"bounding_box\" in batch['input_metadata'][smp_idx][0]:\n imed_obj_detect.adjust_undo_transforms(testing_params[\"undo_transforms\"].transforms, batch, smp_idx)\n\n if not model_params[\"name\"].endswith('3D'):\n last_sample_bool = (last_batch_bool and smp_idx == len(preds_cpu) - 1)\n # undo transformations\n preds_idx_undo, metadata_idx = testing_params[\"undo_transforms\"](preds_cpu[smp_idx],\n batch['gt_metadata'][smp_idx],\n data_type='gt')\n # preds_idx_undo is a list n_label arrays\n preds_idx_arr = np.array(preds_idx_undo)\n\n # TODO: gt_filenames should not be a list\n fname_ref = metadata_idx[0]['gt_filenames'][0]\n\n # NEW COMPLETE VOLUME\n if pred_tmp_lst and (fname_ref != fname_tmp or last_sample_bool):\n # save the completely processed file as a nifti file\n fname_pred = os.path.join(ofolder, fname_tmp.split('/')[-1])\n fname_pred = fname_pred.split(testing_params['target_suffix'][0])[0] + '_pred.nii.gz'\n # If Uncertainty running, then we save each simulation result\n if testing_params['uncertainty']['applied']:\n fname_pred = fname_pred.split('.nii.gz')[0] + '_' + str(i_monte_carlo).zfill(2) + '.nii.gz'\n\n output_nii = imed_utils.pred_to_nib(data_lst=pred_tmp_lst,\n z_lst=z_tmp_lst,\n fname_ref=fname_tmp,\n fname_out=fname_pred,\n slice_axis=slice_axis,\n kernel_dim='2d',\n bin_thr=0.9 if testing_params[\"binarize_prediction\"] else -1)\n # TODO: Adapt to multilabel\n preds_npy_list.append(output_nii.get_fdata()[:, :, :, 0])\n gt_npy_list.append(nib.load(fname_tmp).get_fdata())\n\n output_nii_shape = output_nii.get_fdata().shape\n if len(output_nii_shape) == 4 and output_nii_shape[-1] > 1:\n imed_utils.save_color_labels(np.stack(pred_tmp_lst, -1),\n testing_params[\"binarize_prediction\"],\n fname_tmp,\n fname_pred.split(\".nii.gz\")[0] + '_color.nii.gz',\n imed_utils.AXIS_DCT[testing_params['slice_axis']])\n\n # re-init pred_stack_lst\n pred_tmp_lst, z_tmp_lst = [], []\n\n # add new sample to pred_tmp_lst, of size n_label X h X w ...\n pred_tmp_lst.append(preds_idx_arr)\n\n # TODO: slice_index should be stored in gt_metadata as well\n z_tmp_lst.append(int(batch['input_metadata'][smp_idx][0]['slice_index']))\n fname_tmp = fname_ref\n\n else:\n pred_undo, metadata, last_sample_bool, volume, weight_matrix = \\\n imed_utils.volume_reconstruction(batch,\n preds_cpu,\n testing_params['undo_transforms'],\n smp_idx, volume, weight_matrix)\n fname_ref = metadata[0]['gt_filenames'][0]\n # Indicator of last batch\n if last_sample_bool:\n pred_undo = np.array(pred_undo)\n fname_pred = os.path.join(ofolder, fname_ref.split('/')[-1])\n fname_pred = fname_pred.split(testing_params['target_suffix'][0])[0] + '_pred.nii.gz'\n # If uncertainty running, then we save each simulation result\n if testing_params['uncertainty']['applied']:\n fname_pred = fname_pred.split('.nii.gz')[0] + '_' + str(i_monte_carlo).zfill(2) + '.nii.gz'\n\n # Choose only one modality\n output_nii = imed_utils.pred_to_nib(data_lst=[pred_undo],\n z_lst=[],\n fname_ref=fname_ref,\n fname_out=fname_pred,\n slice_axis=slice_axis,\n kernel_dim='3d',\n bin_thr=0.5 if testing_params[\"binarize_prediction\"] else -1)\n preds_npy_list.append(output_nii.get_fdata().transpose(3, 0, 1, 2))\n gt_lst = []\n for gt in metadata[0]['gt_filenames']:\n # For multi-label, if all labels are not in every image\n if gt is not None:\n gt_lst.append(nib.load(gt).get_fdata())\n else:\n gt_lst.append(np.zeros(gt_lst[0].shape))\n\n gt_npy_list.append(np.array(gt_lst))\n # Save merged labels with color\n\n if pred_undo.shape[0] > 1:\n imed_utils.save_color_labels(pred_undo,\n testing_params['binarize_prediction'],\n batch['input_metadata'][smp_idx][0]['input_filenames'],\n fname_pred.split(\".nii.gz\")[0] + '_color.nii.gz',\n slice_axis)\n\n return preds_npy_list, gt_npy_list", "def run():\n import numpy as np\n\n batch_size = 500\n discard_threshold = 50\n input_base = '/home/ubuntu/faces'\n model_dir = '/home/ubuntu/FaceNet/20170512-110547'\n\n with tf.Graph().as_default():\n with tf.Session() as sess:\n\n meta_file, ckpt_file = facenet.get_model_filenames(os.path.expanduser(model_dir))\n\n print('Metagraph file: %s' % meta_file)\n print('Checkpoint file: %s' % ckpt_file)\n load_model(model_dir, meta_file, ckpt_file)\n with open('/home/ubuntu/face_uploaded.txt', 'w') as upload_log:\n with concurrent.futures.ProcessPoolExecutor(max_workers=4) as executor:\n futures = []\n img_cnt = 0\n ignored = 0\n for fam_id in os.listdir(input_base):\n input_dir = os.path.join(input_base, fam_id)\n image_paths = get_onedir(input_dir)\n # image_list, label_list = facenet.get_image_paths_and_labels(train_set)\n\n # Get input and output tensors\n images_placeholder = tf.get_default_graph().get_tensor_by_name(\"input:0\")\n embeddings = tf.get_default_graph().get_tensor_by_name(\"embeddings:0\")\n phase_train_placeholder = tf.get_default_graph().get_tensor_by_name(\"phase_train:0\")\n\n image_size = images_placeholder.get_shape()[1]\n embedding_size = embeddings.get_shape()[1]\n\n nrof_images = len(image_paths)\n nrof_batches = int(math.ceil(1.0 * nrof_images / batch_size))\n emb_array = np.zeros((nrof_images, embedding_size))\n facial_encodings = compute_facial_encodings(sess, images_placeholder, embeddings, phase_train_placeholder,\n image_size,\n embedding_size, nrof_images, nrof_batches, emb_array,\n batch_size, image_paths)\n sorted_clusters = cluster_facial_encodings(facial_encodings)\n if not sorted_clusters:\n ignored += 1\n continue\n\n sorted_clusters = data_cleaning(sorted_clusters, facial_encodings)\n\n # For those families dont have enought photos for child, ignore them\n # Or those families use mitene in unordinary way.\n if len(sorted_clusters[0]) < discard_threshold:\n ignored += 1\n continue\n\n print('Start zip upload for: {0}, contains {1} images!'.format(fam_id, len(sorted_clusters[0])))\n futures.append(executor.submit(zip_and_upload, sorted_clusters, fam_id))\n img_cnt += len(sorted_clusters[0])\n for img in sorted_clusters[0]:\n upload_log.write('{0},{1}\\n'.format(fam_id, os.path.basename(img)))\n\n cnt = 0\n for future in concurrent.futures.as_completed(futures):\n try:\n print('job is finished!: ' + future.result())\n cnt += 1\n except Exception as e:\n print('zip and upload job failed!: ' + str(e))\n\n print('Finished upload {0} images from {1} families! Ignored {2} families!'.format(img_cnt, cnt, ignored))", "def test_net(args, dataset_name, proposal_file, output_dir, ind_range=None, gpu_id=0, early_stop=False):\n # print('test_net')\n roidb, dataset, start_ind, end_ind, total_num_images = get_roidb_and_dataset(dataset_name, proposal_file, ind_range)\n model = initialize_model_from_cfg(args, gpu_id=gpu_id)\n num_images = len(roidb)\n num_classes = cfg.MODEL.NUM_CLASSES\n all_boxes = {}\n\n timers = defaultdict(Timer)\n \n \n\n\n if 'train' in dataset_name:\n if ind_range is not None:\n det_name = 'discovery_range_%s_%s.pkl' % tuple(ind_range)\n else:\n det_name = 'discovery.pkl'\n else:\n if ind_range is not None:\n det_name = 'detection_range_%s_%s.pkl' % tuple(ind_range)\n else:\n det_name = 'detections.pkl'\n \n det_file = os.path.join(output_dir, det_name)\n if os.path.exists(det_file):\n print('the file', det_file, 'exists. I am loading detections from it...')\n return load_object(det_file)['all_boxes']\n\n for i, entry in enumerate(roidb):\n if early_stop and i > 10: break\n\n box_proposals = entry['boxes']\n if len(box_proposals) == 0:\n continue\n \n im = cv2.imread(entry['image'])\n # print(entry['image'])\n cls_boxes_i = im_detect_all(model, im, box_proposals, timers)\n\n all_boxes[entry['image']] = cls_boxes_i\n\n if i % 10 == 0: # Reduce log file size\n ave_total_time = np.sum([t.average_time for t in timers.values()])\n eta_seconds = ave_total_time * (num_images - i - 1)\n eta = str(datetime.timedelta(seconds=int(eta_seconds)))\n \n det_time = (timers['im_detect_bbox'].average_time)\n \n logger.info(('im_detect: range [{:d}, {:d}] of {:d}:{:d}/{:d} {:.3f}s (eta: {})').format(\n start_ind + 1, end_ind, total_num_images, start_ind + i + 1, start_ind + num_images, det_time, eta))\n\n cfg_yaml = yaml.dump(cfg)\n\n save_object(\n dict(\n all_boxes=all_boxes,\n cfg=cfg_yaml\n ), det_file\n )\n logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))\n return all_boxes", "def benchmark_xla_fakedistort_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1,\n data_dir=self.fake_data_dir,\n data_name='imagenet',\n distortions=True,\n variable_update='parameter_server',\n xla=True)\n self._run_benchmark(params)", "def construct_test_model(self):\n # Set the placeholder for the input episode\n self.inputa = tf.placeholder(tf.float32)\n self.inputb = tf.placeholder(tf.float32)\n self.labela = tf.placeholder(tf.float32)\n self.labelb = tf.placeholder(tf.float32)\n\n with tf.variable_scope('meta-test-model', reuse=None) as training_scope: \n # construct the model weights \n self.ss_weights = ss_weights = self.construct_resnet_ss_weights()\n self.weights = weights = self.construct_resnet_weights()\n self.fc_weights = fc_weights = self.construct_fc_weights()\n\n # Load test base epoch number from FLAGS\n num_updates = FLAGS.test_base_epoch_num\n\n def task_metalearn(inp, reuse=True):\n \"\"\"The function to process one episode in a meta-batch.\n Args:\n inp: the input episode.\n reuse: whether reuse the variables for the normalization.\n Returns:\n A serious outputs like losses and accuracies.\n \"\"\"\n # Seperate inp to different variables\n inputa, inputb, labela, labelb = inp\n # Generate empty list to record accuracies\n accb_list = []\n\n # Embed the input images to embeddings with ss weights\n emb_outputa = self.forward_resnet(inputa, weights, ss_weights, reuse=reuse)\n emb_outputb = self.forward_resnet(inputb, weights, ss_weights, reuse=True)\n\n # This part is similar to the meta-train function, you may refer to the comments above\n outputa = self.forward_fc(emb_outputa, fc_weights)\n lossa = self.loss_func(outputa, labela) \n grads = tf.gradients(lossa, list(fc_weights.values()))\n gradients = dict(zip(fc_weights.keys(), grads))\n fast_fc_weights = dict(zip(fc_weights.keys(), [fc_weights[key] - \\\n self.update_lr*gradients[key] for key in fc_weights.keys()]))\n outputb = self.forward_fc(emb_outputb, fast_fc_weights)\n accb = tf.contrib.metrics.accuracy(tf.argmax(tf.nn.softmax(outputb), 1), tf.argmax(labelb, 1))\n accb_list.append(accb)\n \n for j in range(num_updates - 1):\n lossa = self.loss_func(self.forward_fc(emb_outputa, fast_fc_weights), labela)\n grads = tf.gradients(lossa, list(fast_fc_weights.values()))\n gradients = dict(zip(fast_fc_weights.keys(), grads))\n fast_fc_weights = dict(zip(fast_fc_weights.keys(), [fast_fc_weights[key] - \\\n self.update_lr*gradients[key] for key in fast_fc_weights.keys()]))\n outputb = self.forward_fc(emb_outputb, fast_fc_weights)\n accb = tf.contrib.metrics.accuracy(tf.argmax(tf.nn.softmax(outputb), 1), tf.argmax(labelb, 1))\n accb_list.append(accb)\n\n lossb = self.loss_func(outputb, labelb)\n\n task_output = [lossb, accb, accb_list]\n\n return task_output\n\n if FLAGS.norm is not 'None':\n unused = task_metalearn((self.inputa[0], self.inputb[0], self.labela[0], self.labelb[0]), False)\n\n out_dtype = [tf.float32, tf.float32, [tf.float32]*num_updates]\n\n result = tf.map_fn(task_metalearn, elems=(self.inputa, self.inputb, self.labela, self.labelb), \\\n dtype=out_dtype, parallel_iterations=FLAGS.meta_batch_size)\n lossesb, accsb, accsb_list = result\n\n self.metaval_total_loss = total_loss = tf.reduce_sum(lossesb)\n self.metaval_total_accuracy = total_accuracy = tf.reduce_sum(accsb)\n self.metaval_total_accuracies = total_accuracies =[tf.reduce_sum(accsb_list[j]) for j in range(num_updates)]", "def build_resnet_test_config(self, test_id, test_args, batch_size=32, gpus=1,\n total_batches=300, repeat=3):\n config = {}\n config['total_batches'] = total_batches\n # Relative path in the repo to the test folder.\n config['cmd_path'] = 'imagenet'\n\n # PyTorch Automatically uses all GPUs it can see.\n gpu_list = ','.join(str(x) for x in range(gpus))\n visible_devices = 'CUDA_VISIBLE_DEVICES={}'.format(gpu_list)\n if gpus > 1:\n multi_gpu = (\" --dist-url 'tcp://127.0.0.1:6001' --dist-backend 'nccl'\"\n \"--multiprocessing-distributed --world-size 1 --rank 0 \")\n config['pycmd'] = 'python3 main.py {} {} {}'.format('{}',\n multi_gpu,\n self.imagenet_dir)\n else:\n config['pycmd'] = '{} python3 main.py {} {}'.format(visible_devices,\n '{}',\n self.imagenet_dir)\n config['test_id'] = test_id\n config['repeat'] = self.auto_test_config.get('repeat', repeat)\n # Normalized name of model being tested\n config['model'] = 'resnet50_v1.5'\n config['gpus'] = gpus\n config['batch_size'] = batch_size\n args = {}\n config['args'] = args\n # print stats every epoch.\n args['print-freq'] = self.auto_test_config.get('print_freq', 1)\n args['epochs'] = 1\n args['arch'] = 'resnet50'\n args['batch-size'] = batch_size * gpus\n args['workers'] = self.auto_test_config.get('input_threads', 5)\n\n # Override any args with the tests args\n args.update(test_args)\n\n return config", "def meta_train(tasks, model, args, device, method='random', meta_iters=10000, num_updates=5, meta_batch_size=5):\n # Define logging\n os.makedirs(args.save_path, exist_ok=True)\n writer = SummaryWriter(\n os.path.join(args.save_path, 'runs', '{}'.format(datetime.now()).replace(\":\", \"_\")))\n\n header = ' Time Task Iteration Loss Accuracy'\n log_template = '{:>10} {:>25} {:10.0f} {:10.6f} {:10.6f}'\n test_template = 'Test mean: {}, Test std: {}'\n\n print(header)\n start = time.time()\n\n # Define optimizers, lr schedulers and loss function\n optimizer_bert = AdamW(params=model.proto_net.encoder.bert.parameters(), lr=args.bert_lr)\n optimizer = optim.Adam(params=chain(model.proto_net.encoder.mlp.parameters(),\n model.output_layer.parameters()),\n lr=args.lr)\n scheduler_bert = get_cosine_schedule_with_warmup(optimizer_bert, 200, meta_iters)\n scheduler = get_cosine_schedule_with_warmup(optimizer, 0, meta_iters)\n # ProtoNets always have CrossEntropy loss due to softmax output\n cross_entropy = nn.CrossEntropyLoss()\n\n print('Loading Tokenizer..')\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)\n special_tokens_dict = {'additional_special_tokens': [\"[MNT]\", \"[URL]\"]}\n\n num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)\n print('We have added', num_added_toks, 'tokens')\n model.proto_net.encoder.bert.resize_token_embeddings(len(tokenizer))\n # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.\n\n # setup task sampler and task model\n sampler = TaskSampler(tasks, method=method, custom_task_ratio=args.custom_task_ratio, supp_query_split=True)\n task_model = type(model)(args)\n task_model.proto_net.encoder.bert.resize_token_embeddings(len(tokenizer))\n\n iterations = 0\n # Iterate over the data\n train_iter = sampler.get_iter('train', tokenizer, batch_size=args.batch_size, shuffle=True)\n model.train()\n\n # setup validation task and episodes for evaluation\n val_task = get_validation_task(args)\n episodes = torch.load(args.episodes)\n\n # dummy data to overwrite old values of task model output layer\n dummy_w = torch.randn((args.mlp_dims[-1], 2))\n dummy_b = torch.randn(2)\n\n average_query_loss = 0\n best_query_loss = 1e+9\n best_test_mean = -1\n best_test_last = -1\n convergence_tolerance_cnt = 0\n # outer loop (meta-iterations)\n for i in range(meta_iters):\n grads = []\n task_losses_inner = {}\n task_accuracies_inner = {}\n task_losses_outer = {}\n task_accuracies_outer = {}\n # inner loop (sample different tasks)\n for task_sample in range(meta_batch_size):\n # clone original model\n task_model.proto_net.load_state_dict(model.proto_net.state_dict())\n task_model.initialize_classifier(nn.Parameter(dummy_w), nn.Parameter(dummy_b), hard_replace=True)\n task_model.to(device)\n task_model.train()\n\n # new optimizer for every new task model\n task_optimizer_bert = optim.SGD(params=task_model.proto_net.encoder.bert.parameters(), lr=args.bert_lr)\n task_optimizer = optim.SGD(params=chain(task_model.proto_net.encoder.mlp.parameters(),\n task_model.output_layer.parameters()),\n lr=args.inner_lr)\n\n # prepare support and query set\n batch = next(train_iter)\n support = batch[:3]\n query = batch[3:]\n\n # setup output layer (via meta-model's prototype network)\n proto_embeddings = model.proto_net(support[0].to(device), attention_mask=support[2].to(device))\n prototypes = model.proto_net.calculate_centroids((proto_embeddings, support[1]), sampler.get_num_classes())\n W, b = task_model.calculate_output_params(prototypes.detach())\n task_model.initialize_classifier(W, b)\n\n # train some iterations on support set\n for update in range(num_updates):\n task_optimizer_bert.zero_grad()\n task_optimizer.zero_grad()\n predictions = task_model(support[0].to(device), attention_mask=support[2].to(device))\n task_loss = cross_entropy(predictions, support[1].long().squeeze().to(device))\n task_loss.backward()\n task_optimizer.step()\n task_optimizer_bert.step()\n\n # record task losses and accuracies for logging\n task_losses_inner[sampler.get_name()] = task_loss.item()\n task_accuracies_inner[sampler.get_name()] = sampler.calculate_accuracy(predictions, support[1].to(device))\n\n # trick to add prototypes back to computation graph\n W = 2 * prototypes + (W - 2 * prototypes).detach()\n b = -prototypes.norm(dim=1)**2 + (b + prototypes.norm(dim=1)**2).detach()\n task_model.initialize_classifier(W, b, hard_replace=True)\n\n # calculate gradients for meta update on the query set\n predictions = task_model(query[0].to(device), attention_mask=query[2].to(device))\n query_loss = cross_entropy(predictions, query[1].long().squeeze().to(device))\n query_loss.backward()\n\n # record task losses and accuracies for logging\n task_losses_outer[sampler.get_name()] = query_loss.item()\n task_accuracies_outer[sampler.get_name()] = sampler.calculate_accuracy(predictions, query[1].to(device))\n average_query_loss += query_loss.item()\n\n # register W and b parameters again to avoid error in weight update\n W = nn.Parameter(W)\n b = nn.Parameter(b)\n task_model.initialize_classifier(W, b, hard_replace=True)\n\n # save gradients of first task model\n if task_sample == 0:\n for param in task_model.parameters():\n if param.requires_grad and param.grad is not None:\n grads.append(param.grad.clone())\n # add the gradients of all task samples\n else:\n p = 0\n for param in task_model.parameters():\n if param.requires_grad and param.grad is not None:\n grads[p] += param.grad.clone()\n p += 1\n\n # perform meta update\n # first load/add the calculated gradients in the meta-model\n # (already contains gradients from prototype calculation)\n p = 0\n for param in model.parameters():\n if param.requires_grad and param.grad is not None:\n param.grad += grads[p]\n p += 1\n # update model parameters according to the gradients from inner loop (clear gradients afterwards)\n optimizer.step()\n optimizer_bert.step()\n scheduler.step()\n scheduler_bert.step()\n optimizer.zero_grad()\n optimizer_bert.zero_grad()\n\n iterations += 1\n if iterations % args.log_every == 0:\n average_query_loss /= (args.log_every*meta_batch_size)\n iter_loss = sum(task_losses_outer.values()) / len(task_losses_outer.values())\n iter_acc = sum(task_accuracies_outer.values()) / len(task_accuracies_outer.values())\n writer.add_scalar('Meta_Average/Loss/outer'.format(sampler.get_name()), iter_loss, iterations)\n writer.add_scalar('Meta_Average/Accuracy/outer'.format(sampler.get_name()), iter_acc, iterations)\n for t in tasks:\n task_name = t.get_name()\n if task_name in task_losses_inner.keys():\n writer.add_scalar('{}/Loss/inner'.format(task_name), task_losses_inner[task_name], iterations)\n writer.add_scalar('{}/Accuracy/inner'.format(task_name), task_accuracies_inner[task_name], iterations)\n writer.add_scalar('{}/Loss/outer'.format(task_name), task_losses_outer[task_name], iterations)\n writer.add_scalar('{}/Accuracy/outer'.format(task_name), task_accuracies_outer[task_name], iterations)\n print(log_template.format(\n str(timedelta(seconds=int(time.time() - start))),\n sampler.get_name(),\n iterations,\n iter_loss,\n iter_acc))\n\n # save best snapshot\n if average_query_loss < best_query_loss:\n best_query_loss = average_query_loss\n average_query_loss = 0\n snapshot_prefix = os.path.join(args.save_path, 'best_query')\n snapshot_path = (\n snapshot_prefix +\n '_loss_{:.5f}_iter_{}_model.pt'\n ).format(best_query_loss, iterations)\n model.save_model(snapshot_path)\n # Keep only the best snapshot\n for f in glob.glob(snapshot_prefix + '*'):\n if f != snapshot_path:\n os.remove(f)\n\n # evaluate in k shot fashion\n if iterations % args.eval_every == 0:\n task_model.proto_net.load_state_dict(model.proto_net.state_dict())\n task_model.initialize_classifier(nn.Parameter(dummy_w), nn.Parameter(dummy_b), hard_replace=True)\n test_mean, test_std = k_shot_testing(task_model, episodes, val_task, device, num_updates=args.inner_updates,\n num_test_batches=args.num_test_batches)\n writer.add_scalar('{}/Acc'.format(val_task.get_name()), test_mean, iterations)\n writer.add_scalar('{}/STD'.format(val_task.get_name()), test_std, iterations)\n print(test_template.format(test_mean, test_std), flush=True)\n if test_mean > best_test_mean:\n best_test_mean = test_mean\n snapshot_prefix = os.path.join(args.save_path, 'best_test_{}'.format(val_task.get_name()))\n snapshot_path = (\n snapshot_prefix +\n '_acc_{:.5f}_iter_{}_model.pt'\n ).format(best_test_mean, iterations)\n model.save_model(snapshot_path)\n # Keep only the best snapshot\n for f in glob.glob(snapshot_prefix + '*'):\n if f != snapshot_path:\n os.remove(f)\n \n if test_mean > best_test_last:\n best_test_last = best_test_mean\n convergence_tolerance_cnt = 0\n else:\n convergence_tolerance_cnt += 1\n\n if convergence_tolerance_cnt == args.convergence_tolerance:\n break\n\n\n # saving redundant parameters\n # Save model checkpoints.\n if iterations % args.save_every == 0:\n iter_loss = sum(task_losses_outer.values()) / len(task_losses_outer.values())\n snapshot_prefix = os.path.join(args.save_path, 'snapshot')\n snapshot_path = (\n snapshot_prefix +\n '_iter_{}_loss_{}_model.pt'\n ).format(iterations, iter_loss)\n logging.debug('Saving model...')\n model.save_model(snapshot_path)\n # Keep only the last snapshot\n for f in glob.glob(snapshot_prefix + '*'):\n if f != snapshot_path:\n os.remove(f)\n\n writer.close()", "def renset50_128_gpu_2_real(self):\n test_id = 'resnet50.gpu_2.128.real'\n args = {}\n config = self.build_resnet_test_config(test_id, args, batch_size=128, gpus=2)\n self.run_test_suite(config)", "def main():\n\n # Experiment Start\n start_time = datetime.now()\n logger.info(\n '################ Bergson Team Experiment Start #################')\n logger.info(\n f'Starting Bergson Astro Pi team experiment at {start_time.strftime(\"%Y-%m-%d %H:%M:%S\")}')\n\n '''\n # Load simple Conv2D AI Model\n logger.info(\"Loading AI Convolutional Model\")\n conv2D_model = load_model(\"Conv2D_TF114\")\n '''\n\n # Load TFLite Model\n logger.info(\"Loading TFLite Mobilenetv2 Model\")\n mobilenetv2_interpreter = load_tflite_model(\"./Mobilenetv2_TF114.tflite\")\n\n # Create Log File\n logger.info(f'Creating Log file at {str(data_file)}')\n with open(data_file, 'w') as f:\n writer = csv.writer(f)\n header = (\"Date/time\", \"Location\", \"Picture Name\", \"Predicted NO2\")\n writer.writerow(header)\n\n # Start Loop over 3 hours\n\n now_time = datetime.now()\n i = 0\n # run a loop for 2 minutes\n while (now_time < start_time + timedelta(minutes=175)):\n\n # Take Earth Picture\n timestamp = datetime.now().strftime(\"%Y-%m-%d_%H:%M:%S\")\n pic_name = f'bergson_img_{timestamp}.jpg'\n capture(rpi_cam, str(dir_path/pic_name))\n logger.info(f'Experiment Pipeline {i} on picture {pic_name}')\n\n # NDVI Preprocessing\n ndvi_image = get_ndvi(str(dir_path/pic_name))\n ndvi_image = np.expand_dims(ndvi_image, axis=2)\n\n # RGB Prepprocessing for expected shape by Mobilenetv2 - comment below line when using simple Conv2D model\n ndvi_rgb_image = get_ndvi_rgb(ndvi_image)\n\n '''\n # Do Inference with simple Conv2D AI Model\n prediction = make_inference(ndvi_image,conv2D_model)\n '''\n \n # Do Inference with TFLite Model\n ndvi_rgb_image = ndvi_rgb_image.astype('float32')\n prediction = make_tflite_inference(\n ndvi_rgb_image, mobilenetv2_interpreter)\n\n # Get Decoded Inference results\n decoded_prediction = decode_prediction(prediction)\n\n # Write Prediction as CSV to disk\n logger.info(\n f'Logging NO2 prediction \\\"{decoded_prediction}\\\" for {pic_name}')\n exif_data = get_img_exif(pic_name, iss, decoded_prediction)\n row = (exif_data['Date/Time'], exif_data['Location'],\n pic_name, exif_data['NO2'])\n with open(data_file, mode='a') as f:\n writer = csv.writer(f)\n writer.writerow(row)\n\n # update the current time\n now_time = datetime.now()\n i = i+1\n\n # End Loop over 3 hours\n\n # Experiment End\n end_time = datetime.now()\n logger.info(\n f'Finishing Bergson Astro Pi team experiment at {end_time.strftime(\"%Y-%m-%d %H:%M:%S\")}')\n experiment_time = end_time - start_time\n logger.info(f'Bergson Astro Pi team experiment run time {experiment_time}')\n logger.info('################ Bergson Team Experiment End #################')", "def Run(benchmark_spec):\n _UpdateBenchmarkSpecWithFlags(benchmark_spec)\n vm = benchmark_spec.vms[0]\n if benchmark_spec.tpus:\n # For MLPerf 1.0, the benchmake code of different hardware are different.\n if (benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-32' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-128' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-256' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-512' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-1024' or\n benchmark_spec.tpu_groups['train'].GetAcceleratorType() == 'v3-2048'):\n run_path = (\n '$HOME/training_results_{version}/Google/benchmarks/{model}/tpu-{tpus}'\n .format(\n version=VERSION.value,\n model=benchmark_spec.benchmark,\n tpus=benchmark_spec.tpu_groups['train'].GetAcceleratorType()))\n code_path = (\n '$HOME/training_results_{version}/Google/benchmarks/{model}/implementations/tpu-{tpus}-{model}'\n .format(\n version=VERSION.value,\n model=benchmark_spec.benchmark,\n tpus=benchmark_spec.tpu_groups['train'].GetAcceleratorType()))\n\n if MASK in benchmark_spec.benchmark:\n model = 'mask_rcnn'\n elif GNMT in benchmark_spec.benchmark:\n model = 'nmt'\n else:\n model = benchmark_spec.benchmark\n\n mlperf_benchmark_cmd = (\n 'cd {code_path} && '\n 'export PYTHONPATH=$(pwd):$(pwd)/{model} && '\n 'cd {model} && '\n '{run_path}/run_and_time.sh'.format(\n code_path=code_path,\n model=model,\n run_path=run_path))\n\n if SSD in benchmark_spec.benchmark:\n mlperf_benchmark_cmd = (\n 'export '\n 'MLP_GCS_RESNET_CHECKPOINT={checkpoint}'\n ' && {cmd}'.format(\n checkpoint=FLAGS.mlperf_gcs_resnet_checkpoint,\n cmd=mlperf_benchmark_cmd))\n else:\n raise ValueError(\n 'MLPerf configurations do not support the hardware in PKB. PKB may '\n 'need to be updated if this is a new TPU type.')\n\n else:\n run_sub_paths = {RESNET: 'resnet/implementations/mxnet',\n TRANSFORMER: 'transformer/implementations/pytorch',\n MINIGO: 'minigo/implementations/tensorflow',\n MASK: 'maskrcnn/implementations/pytorch',\n GNMT: 'gnmt/implementations/pytorch',\n SSD: 'ssd/implementations/pytorch',\n BERT: 'bert/implementations/pytorch',}\n benchmark_path = f'$HOME/training_results_{VERSION.value}/NVIDIA/benchmarks'\n run_path = posixpath.join(benchmark_path,\n run_sub_paths[benchmark_spec.benchmark])\n env = {\n 'DGXSYSTEM': DGXSYSTEM,\n 'NEXP': 1,\n 'PULL': 0,\n 'LOGDIR': f'/tmp/{benchmark_spec.benchmark}',\n }\n envs = {\n RESNET: {},\n TRANSFORMER: {'DATADIR': '/data/wmt/utf8'},\n MINIGO: {'CONT': 'mlperf-nvidia:minigo'},\n MASK: {},\n GNMT: {'DATADIR': '/data/gnmt'},\n SSD: {'DATADIR': '/data'},\n BERT: {}\n }\n env.update(envs[benchmark_spec.benchmark])\n\n run_script = posixpath.join(run_path, 'run_with_docker.sh')\n vm_util.ReplaceText(vm, 'SYSLOGGING=1', 'SYSLOGGING=0', run_script)\n vm_util.ReplaceText(vm, 'docker exec -it', 'docker exec -t', run_script)\n if benchmark_spec.benchmark == RESNET:\n vm_util.ReplaceText(vm, r'mpirun.*run_and_time\\.sh',\n r'.\\/run_and_time.sh', run_script)\n\n env = ' '.join(f'{key}={value}' for key, value in env.items())\n if nvidia_driver.CheckNvidiaGpuExists(vm):\n env = f'{tensorflow.GetEnvironmentVars(vm)} {env}'\n\n mlperf_benchmark_cmd = (\n f'chmod 755 {run_script} && '\n f'cd {run_path} && '\n f'{env} {run_script}')\n\n samples = []\n metadata = _CreateMetadataDict(benchmark_spec)\n stdout, _ = vm.RobustRemoteCommand(mlperf_benchmark_cmd)\n if NONE in FLAGS.mlperf_profiler:\n samples.extend(\n MakeSamplesFromOutput(\n metadata,\n stdout,\n use_tpu=bool(benchmark_spec.tpus),\n model=benchmark_spec.benchmark))\n return samples", "def benchmark_fake_8gpu_gpureplicated(self):\n params = self._shared_params()._replace(\n num_gpus=8,\n data_dir=self.fake_data_dir,\n data_name='imagenet',\n variable_update='replicated',\n all_reduce_spec='nccl',\n gradient_repacking=2)\n self._run_benchmark(params)", "def benchmark_fp16_fake_1gpu_gpuparams(self):\n params = self._shared_params_fp16()._replace(\n num_gpus=1, data_dir=self.fake_data_dir, data_name='imagenet')\n self._run_benchmark(params)", "def main(params):\n params = run_train.prepare_experiment_folder(params, FOLDER_EXPERIMENT)\n\n # run_train.check_pathes_patterns(paths)\n tl_expt.set_experiment_logger(params['path_expt'])\n logging.info('COMPUTER: \\n%r', platform.uname())\n logging.info(tl_expt.string_dict(params, desc='PARAMETERS'))\n\n tl_expt.create_subfolders(params['path_expt'], LIST_SUBFOLDER)\n\n path_csv = os.path.join(params['path_expt'], NAME_CSV_TRIPLES)\n df_paths = get_csv_triplets(\n params['path_list'], path_csv, params['path_images'], params['path_segms'], force_reload=FORCE_RERUN\n )\n\n dict_classif = seg_clf.load_classifier(params['path_classif'])\n params_clf = dict_classif['params']\n params_clf.update(params)\n logging.info(tl_expt.string_dict(params, desc='UPDATED PARAMETERS'))\n\n # perform on new images\n df_stat = pd.DataFrame()\n _wrapper_detection = partial(\n load_compute_detect_centers,\n params=params_clf,\n path_classif=params['path_classif'],\n path_output=params['path_expt'],\n )\n iterate = tl_expt.WrapExecuteSequence(_wrapper_detection, df_paths.iterrows(), nb_workers=params['nb_workers'])\n for dict_center in iterate:\n df_stat = df_stat.append(dict_center, ignore_index=True)\n df_stat.to_csv(os.path.join(params['path_expt'], NAME_CSV_TRIPLES_TEMP))\n\n df_stat.set_index(['image'], inplace=True)\n df_stat.to_csv(os.path.join(params['path_expt'], NAME_CSV_TRIPLES))\n logging.info('STATISTIC: \\n %r', df_stat.describe())", "def benchmark_fake_8gpu_gpureplicated(self):\n params = self._shared_params()._replace(\n num_gpus=8,\n data_dir=self.fake_data_dir,\n data_name='imagenet',\n variable_update='replicated',\n all_reduce_spec='nccl',\n compact_gradient_transfer=False,\n gradient_repacking=2)\n self._run_benchmark(params)", "def run(model_dir,\n schedule,\n problem_class=gin.REQUIRED,\n optimizer_class=gin.REQUIRED,\n dataset_name=gin.REQUIRED,\n batch_size=gin.REQUIRED,\n eval_batch_size=64,\n train_steps=gin.REQUIRED,\n eval_steps=gin.REQUIRED,\n base_optimizer_class=gin.REQUIRED,\n base_optimizer_conditioning_class=None,\n iterations_per_loop=gin.REQUIRED,\n eval_weights=None,\n training_params_class=gin.REQUIRED,\n training_params_conditioning_class=None,\n preprocess=\"\",\n preprocess_eval=\"\",\n save_checkpoints_steps=None,\n keep_checkpoint_max=0,\n eval_on_test=False):\n assert schedule in (\"train\", \"eval\")\n\n if save_checkpoints_steps:\n kwargs = {\"save_checkpoints_steps\": save_checkpoints_steps}\n else:\n kwargs = {\"save_checkpoints_secs\": 60*10} # Every 10 minutes.\n\n run_config = tf_estimator.tpu.RunConfig(\n keep_checkpoint_max=keep_checkpoint_max,\n master=FLAGS.master,\n evaluation_master=FLAGS.master,\n tpu_config=tf_estimator.tpu.TPUConfig(\n iterations_per_loop=iterations_per_loop),\n **kwargs)\n # We use one estimator (potentially on TPU) for training and evaluation.\n problem = problem_class()\n model_fn = construct_model_fn(\n problem, optimizer_class, base_optimizer_class,\n eval_weights=eval_weights,\n base_optimizer_conditioning_class=base_optimizer_conditioning_class,\n training_params_class=training_params_class,\n training_params_conditioning_class=training_params_conditioning_class)\n tpu_estimator = tf_estimator.tpu.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=model_fn,\n model_dir=model_dir,\n train_batch_size=batch_size,\n eval_batch_size=eval_batch_size,\n config=run_config)\n\n\n def input_fn_train(params):\n preprocess_fn = preprocessing.get_preprocess_fn(preprocess)\n return data.get_dataset(dataset_name, data.DatasetSplit.TRAIN,\n FLAGS.validation_percent, params[\"batch_size\"],\n preprocess_fn)\n\n def input_fn_eval(params, split):\n preprocess_fn = preprocessing.get_preprocess_fn(preprocess_eval)\n return data.get_dataset(dataset_name, split, FLAGS.validation_percent,\n params[\"batch_size\"], preprocess_fn).repeat()\n\n path_to_finished_file = os.path.join(model_dir, \"FINISHED\")\n if schedule == \"train\":\n gin_hook = gin.tf.GinConfigSaverHook(model_dir, summarize_config=True)\n tpu_estimator.train(input_fn=input_fn_train,\n hooks=[gin_hook],\n max_steps=train_steps)\n with tf.gfile.GFile(path_to_finished_file, \"w\") as finished_file:\n finished_file.write(\"1\")\n else:\n for checkpoint in iterate_checkpoints_until_file_exists(\n model_dir, path_to_finished_file):\n if eval_on_test:\n train_split = data.DatasetSplit.TRAIN_FULL\n test_split = data.DatasetSplit.TEST\n test_summary_name = \"test\"\n else:\n train_split = data.DatasetSplit.TRAIN\n test_split = data.DatasetSplit.VALID\n test_summary_name = \"valid\"\n\n eval_train = tpu_estimator.evaluate(\n input_fn=functools.partial(input_fn_eval, split=train_split),\n checkpoint_path=checkpoint,\n steps=eval_steps,\n name=\"train\")\n eval_test = tpu_estimator.evaluate(\n input_fn=functools.partial(input_fn_eval, split=test_split),\n checkpoint_path=checkpoint,\n steps=eval_steps,\n name=\"test\")\n\n current_step = eval_train[\"global_step\"]\n\n\n hub_modules_dir = os.path.join(model_dir, \"hub_modules\")\n if not tf.gfile.Exists(hub_modules_dir):\n tf.gfile.MkDir(hub_modules_dir)\n else:\n if not tf.gfile.IsDirectory(hub_modules_dir):\n raise ValueError(\"{0} exists and is not a directory\".format(\n hub_modules_dir))\n\n hub_module_path = os.path.join(hub_modules_dir,\n \"step-{:0>9}\".format(current_step))\n if not tf.gfile.Exists(hub_module_path):\n problem.module_spec.export(hub_module_path,\n checkpoint_path=checkpoint)\n else:\n logging.info(\"Not saving the hub module, since the path\"\n \" %s already exists\", hub_module_path)", "def test():\n image_reader = reader.ImageReader(FLAGS.test_dir,FLAGS.image_size,FLAGS.channels)\n num_images = image_reader.num_images()\n model = tf_model.GanModel(FLAGS.batch_size,FLAGS.image_size,FLAGS.gen_arch,FLAGS.batch_norm,training=False)\n\n graph = tf.Graph()\n with graph.as_default():\n placeholder = get_placeholder(FLAGS.batch_size,FLAGS.channels)\n test_fetch = get_test_fetch(placeholder, model)\n\n with tf.Session() as session:\n if FLAGS.load is not None:\n saver=tf.train.Saver()\n saver.restore(session, FLAGS.load)\n else:\n print(\"Need to specify a valid model to load: --load=path\")\n return\n\n #need to loop based on the size of the test set\n for i in range(0,num_images,FLAGS.batch_size):\n\n synth_batch = image_reader.next(min(FLAGS.batch_size,num_images-1))\n feed_dict = {placeholder:synth_batch}\n generated_images = session.run(test_fetch,feed_dict=feed_dict)\n\n #write generated_images to file\n print_images(generated_images,FLAGS.output_dir,i,FLAGS.pair_images,synth_batch)", "def main():\n # lr_decay = 0.5\n # decay_every = 100\n args = get_arguments()\n \n h, w = map(int, args.input_size.split(','))\n input_size = (h, w)\n \n tf.set_random_seed(args.random_seed)\n \n coord = tf.train.Coordinator()\n \n with tf.name_scope(\"create_inputs\"):\n reader = ImageReader(\n args.data_list,\n input_size,\n args.random_scale,\n args.random_mirror,\n args.ignore_label,\n IMG_MEAN,\n coord)\n image_batch, label_batch = reader.dequeue(args.batch_size)\n \n # Set up tf session and initialize variables. \n config = tf.ConfigProto()\n # config.gpu_options.allow_growth = True\n # config.allow_soft_placement = True\n # config.intra_op_parallelism_threads = 1\n sess = tf.Session(config = config)\n net = unext(image_batch, is_train = True, reuse = False, n_out = NUM_CLASSES)\n \n # Predictions: ignoring all predictions with labels greater or equal than n_classes\n raw_output = net.outputs\n raw_prediction = tf.reshape(raw_output, [-1, args.num_classes])\n label_proc = prepare_label(label_batch, tf.stack(raw_output.get_shape()[1:3]), num_classes=args.num_classes, one_hot=False) # [batch_size, h, w]\n raw_gt = tf.reshape(label_proc, [-1,])\n indices = tf.squeeze(tf.where(tf.less_equal(raw_gt, args.num_classes - 1)), 1)\n gt = tf.cast(tf.gather(raw_gt, indices), dtype = tf.int32)\n prediction = tf.gather(raw_prediction, indices)\n \n main_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = prediction, labels = gt)\n\n t_vars = tf.trainable_variables()\n l2_losses = [args.weight_decay * tf.nn.l2_loss(v) for v in t_vars if 'kernel' in v.name]\n #reduced_loss = 0.5 * tf.reduce_mean(main_loss) + generalised_dice_loss(prediction, gt) + tf.add_n(l2_losses)\n reduced_loss = tf.reduce_mean(main_loss) + tf.add_n(l2_losses)\n\n # Processed predictions: for visualisation.\n raw_output_up = tf.image.resize_bilinear(raw_output, tf.shape(image_batch)[1:3,])\n raw_output_up = tf.argmax(raw_output_up, dimension = 3)\n pred = tf.expand_dims(raw_output_up, dim = 3)\n \n # Image summary.\n images_summary = tf.py_func(inv_preprocess, [image_batch, args.save_num_images, IMG_MEAN], tf.uint8)\n labels_summary = tf.py_func(decode_labels, [label_batch, args.save_num_images, args.num_classes], tf.uint8)\n preds_summary = tf.py_func(decode_labels, [pred, args.save_num_images, args.num_classes], tf.uint8)\n \n total_summary = tf.summary.image('images', \n tf.concat(axis=2, values=[images_summary, labels_summary, preds_summary]), \n max_outputs=args.save_num_images) # Concatenate row-wise.\n loss_summary = tf.summary.scalar('TotalLoss', reduced_loss)\n summary_writer = tf.summary.FileWriter(args.snapshot_dir,\n graph=tf.get_default_graph())\n\n # Using Poly learning rate policy \n base_lr = tf.constant(args.learning_rate)\n step_ph = tf.placeholder(dtype=tf.float32, shape=())\n learning_rate = tf.train.exponential_decay(base_lr, step_ph, args.num_steps, args.power)\n\n lr_summary = tf.summary.scalar('LearningRate', learning_rate)\n #train_op = tf.train.MomentumOptimizer(learning_rate, args.momentum).minimize(reduced_loss, var_list = t_vars)\n train_op = tf.train.AdamOptimizer(learning_rate).minimize(reduced_loss, var_list = t_vars)\n init = tf.global_variables_initializer()\n sess.run(init)\n \n # Saver for storing checkpoints of the model.\n saver = tf.train.Saver(var_list = tf.global_variables(), max_to_keep = 10)\n\n ckpt = tf.train.get_checkpoint_state(SNAPSHOT_DIR)\n if ckpt and ckpt.model_checkpoint_path:\n #restore_vars = list([t for t in tf.global_variables() if not 'uconv1' in t.name])\n loader = tf.train.Saver(var_list = tf.global_variables())\n load_step = int(os.path.basename(ckpt.model_checkpoint_path).split('-')[1])\n load(loader, sess, ckpt.model_checkpoint_path)\n else:\n print('No checkpoint file found.')\n load_step = 0\n\n # Start queue threads.\n threads = tf.train.start_queue_runners(coord = coord, sess = sess)\n\n # Iterate over training steps.\n save_summary_every = 10\n for step in range(args.num_steps):\n start_time = time.time()\n \n feed_dict = {step_ph: step}\n if not step % args.save_pred_every == 0:\n loss_value, _, l_summary, lr_summ = sess.run([reduced_loss, train_op, loss_summary, lr_summary], feed_dict=feed_dict)\n duration = time.time() - start_time\n elif step % args.save_pred_every == 0:\n loss_value, _, summary, l_summary, lr_summ = sess.run([reduced_loss, train_op, total_summary, loss_summary, lr_summary], feed_dict=feed_dict)\n duration = time.time() - start_time\n save(saver, sess, args.snapshot_dir, step)\n summary_writer.add_summary(summary, step)\n\n if step % save_summary_every == 0:\n \n summary_writer.add_summary(l_summary, step)\n summary_writer.add_summary(lr_summ, step)\n \n print('step {:d} \\t loss = {:.3f}, ({:.3f} sec/step)'.format(step, loss_value, duration))\n \n coord.request_stop()\n coord.join(threads)", "def benchmark_xla_fake_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1,\n data_dir=self.fake_data_dir,\n data_name='imagenet',\n variable_update='parameter_server',\n xla=True)\n self._run_benchmark(params)", "def benchmark_xla_fake_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1,\n data_dir=self.fake_data_dir,\n data_name='imagenet',\n variable_update='parameter_server',\n xla=True)\n self._run_benchmark(params)", "def benchmark_xla_fake_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1,\n data_dir=self.fake_data_dir,\n data_name='imagenet',\n variable_update='parameter_server',\n xla=True)\n self._run_benchmark(params)", "def main():\n args = get_arguments()\n \n # Create queue coordinator.\n coord = tf.train.Coordinator()\n \n # Load reader.\n with tf.name_scope(\"create_inputs\"):\n reader = ImageReader_MultiClass_Loss(\n args.data_dir,\n args.data_list,\n None, # No defined input size.\n RANDOM_SEED,\n False, # No random scale.\n False, # No random mirror.\n coord)\n image, l2_catg, binary_catg, hinge_catg = reader.image, reader.l2_catg, reader.binary_catg, reader.hinge_catg\n image_batch = tf.expand_dims(image, dim=0)\n binary_catg_batch = tf.expand_dims(binary_catg, dim=0)\n\n # Create network.\n net = DeepLabResNetModel({'data': image_batch}, is_training=False)\n\n # Which variables to load.\n restore_var = tf.global_variables()\n \n # Predictions.\n raw_output = net.layers['fc1_voc12']\n\n # Do the global average pooling\n raw_output_bcgd_rmvd = raw_output[:,:,:,1:]\n g_avg_pool = tf.reduce_mean(tf.reduce_mean(raw_output_bcgd_rmvd, axis=1, keep_dims=True),\\\n axis=2, keep_dims=True) # Avg across the width and height dimension -> [Bx21]\n g_avg_pool_sqzd = tf.squeeze(g_avg_pool, axis=[1, 2])\n pred = tf.nn.softmax(g_avg_pool_sqzd)\n\n # Get the class activation map\n raw_output_up = tf.image.resize_bilinear(raw_output_bcgd_rmvd, tf.shape(image_batch)[1:3,])\n raw_output_up = raw_output_up - tf.reduce_min(tf.reduce_min(raw_output_up, axis=1, keep_dims=True), axis=2, keep_dims=True) + EPSILON\n raw_output_up = raw_output_up / tf.reduce_max(tf.reduce_max(raw_output_up, axis=1, keep_dims=True), axis=2, keep_dims=True)\n cam_m_1 = tf.argmax(raw_output_up, dimension=3) + 1\n raw_output_catgs_rmvd = raw_output_up * tf.expand_dims(tf.expand_dims(binary_catg_batch, 1), 2)\n cam_m_2 = tf.argmax(raw_output_catgs_rmvd, dimension=3) + 1\n cam = tf.cast(tf.equal(cam_m_1, cam_m_2), tf.int64) * cam_m_1\n\n cam_batch = tf.expand_dims(cam, dim=3)\n\n # Set up tf session and initialize variables. \n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n init = tf.global_variables_initializer()\n \n sess.run(init)\n sess.run(tf.local_variables_initializer())\n \n # Load weights.\n loader = tf.train.Saver(var_list=restore_var)\n if args.restore_from is not None:\n load(loader, sess, args.restore_from)\n \n # Start queue threads.\n threads = tf.train.start_queue_runners(coord=coord, sess=sess)\n \n # Iterate over training steps.\n for step in range(args.num_steps):\n preds, images, cams, bin_catg = sess.run([pred, image_batch, cam_batch, binary_catg])\n \"\"\"\n print(bin_catg)\n print(np.unique(np.unique(cams)))\n \"\"\"\n img = inv_preprocess(images)\n attMap = decode_labels(cams)\n output_dir = './output_maps_binary_without_norm/'\n img_name = output_dir + str(step) + '.jpg'\n map_name = output_dir + str(step) + '.png'\n misc.imsave(img_name, img[0,:,:,:])\n misc.imsave(map_name, attMap[0,:,:,:])\n coord.request_stop()\n coord.join(threads)", "def create_cub200_task_distribution(path_to_pkl,\n num_training_samples_per_class=10,\n num_test_samples_per_class=15,\n num_training_classes=20,\n meta_batch_size=5):\n\n global cub200_trainX\n global cub200_trainY\n\n global cub200_valX\n global cub200_valY\n\n global cub200_testX\n global cub200_testY\n\n\n with open(path_to_pkl, 'rb') as f:\n d = pickle.load(f)\n cub200_X, cub200_Y = d['dataset']\n\n cub200_X = cub200_X.astype(np.float32) / 255.0\n cub200_X = (cub200_X - np.asarray((0.4914, 0.4822, 0.4465))) / np.asarray((0.2023, 0.1994, 0.2010))\n\n #\n # TODO\n # random horiz flip + normalize by: \n # transforms.Normalize((0.4914, 0.4822, 0.4465),\n # (0.2023, 0.1994, 0.2010)) (mean, std)\n\n\n\n #np.random.seed(0)\n # TODO: shuffle allocation of class indices to train/val/test\n num_train = 100\n num_val = 50\n num_test = 50\n\n classes = list(set(cub200_Y))\n train_classes = classes[:num_train]\n val_classes = classes[num_train:(num_train+num_val)]\n test_classes = classes[(num_train+num_val):]\n\n train_indices = []\n val_indices = []\n test_indices = []\n\n for i in range(len(cub200_Y)):\n if cub200_Y[i] in train_classes:\n train_indices.append(i)\n elif cub200_Y[i] in val_classes:\n val_indices.append(i)\n elif cub200_Y[i] in test_classes:\n test_indices.append(i)\n\n cub200_trainX = cub200_X[train_indices]\n cub200_trainY = cub200_Y[train_indices]\n\n cub200_valX = cub200_X[val_indices]\n cub200_valY = cub200_Y[val_indices]\n\n cub200_testX = cub200_X[test_indices]\n cub200_testY = cub200_Y[test_indices]\n\n\n train_tasks_list = [ClassificationTask(cub200_trainX,\n cub200_trainY,\n num_training_samples_per_class,\n num_test_samples_per_class,\n num_training_classes,\n split_train_test=0.5)]\n\n # TODO: NOTE: HACK -- validation and test tasks use a fixed number of test-set samples, instead of the supplied\n # ones. This is because in MAML/FOMAML the test set is used to compute the meta-gradient, and a small number of\n # samples is used (in the philosophy of few-shot learning, where only few samples are available).\n # However, in this case we wish to use a few more test-samples to better estimate the accuracy of the model on the validation\n # and test tasks!\n num_test_samples_per_class = 20\n validation_tasks_list = [ClassificationTask(cub200_valX,\n cub200_valY,\n num_training_samples_per_class,\n num_test_samples_per_class,\n num_training_classes,\n split_train_test=0.5)]\n\n test_tasks_list = [ClassificationTask(cub200_valX,\n cub200_valY,\n num_training_samples_per_class,\n num_test_samples_per_class,\n num_training_classes,\n split_train_test=0.5)]\n\n metatrain_task_distribution = TaskDistribution(tasks=train_tasks_list,\n task_probabilities=[1.0],\n batch_size=meta_batch_size,\n sample_with_replacement=True,\n use_classes_only_once=True)\n\n metaval_task_distribution = TaskDistribution(tasks=validation_tasks_list,\n task_probabilities=[1.0],\n batch_size=meta_batch_size,\n sample_with_replacement=True,\n use_classes_only_once=True)\n\n metatest_task_distribution = TaskDistribution(tasks=test_tasks_list,\n task_probabilities=[1.0],\n batch_size=meta_batch_size,\n sample_with_replacement=True,\n use_classes_only_once=True)\n\n return metatrain_task_distribution, metaval_task_distribution, metatest_task_distribution", "def main():\n args = get_arguments()\n \n # Create queue coordinator.\n coord = tf.train.Coordinator()\n \n # Load reader.\n with tf.name_scope(\"create_inputs\"):\n reader = ImageReader(\n args.data_dir,\n args.data_list,\n None, # No defined input size.\n False, # No random scale.\n False, # No random mirror.\n args.ignore_label,\n IMG_MEAN,\n coord)\n image, label = reader.image, reader.label\n image_batch, label_batch = tf.expand_dims(image, dim=0), tf.expand_dims(label, dim=0) # Add one batch dimension.\n\n # Create network.\n net = DeepLabResNetModel({'data': image_batch}, is_training=False, num_classes=args.num_classes)\n\n # Which variables to load.\n restore_var = tf.global_variables()\n \n # Predictions.\n raw_output = net.layers['fc_out']\n raw_output = tf.image.resize_bilinear(raw_output, tf.shape(image_batch)[1:3,])\n raw_output = tf.argmax(raw_output, dimension=3)\n pred = tf.expand_dims(raw_output, dim=3) # Create 4-d tensor.\n\n # mIoU\n\n pred_flatten = tf.reshape(pred, [-1,])\n gt = tf.reshape(label_batch, [-1,])\n weights = tf.cast(tf.less_equal(gt, args.num_classes - 1), tf.int32) # Ignoring all labels greater than or equal to n_classes.\n mIoU, update_op = tf.contrib.metrics.streaming_mean_iou(predictions=pred_flatten, labels=gt, num_classes=args.num_classes, weights=weights)\n \n # Set up tf session and initialize variables. \n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True \n sess = tf.Session(config=config)\n init = tf.global_variables_initializer()\n \n sess.run(init)\n sess.run(tf.local_variables_initializer())\n \n # Load weights.\n loader = tf.train.Saver(var_list=restore_var)\n\n ckpt = tf.train.get_checkpoint_state(SNAPSHOT_DIR)\n\n if ckpt and ckpt.model_checkpoint_path:\n loader = tf.train.Saver(var_list=restore_var)\n load_step = int(os.path.basename(ckpt.model_checkpoint_path).split('-')[1])\n load(loader, sess, ckpt.model_checkpoint_path)\n else:\n print('No checkpoint file found.')\n load_step = 0\n\n # Start queue threads.\n threads = tf.train.start_queue_runners(coord=coord, sess=sess)\n\n if not os.path.exists(SAVE_DIR):\n os.makedirs(SAVE_DIR)\n\n for step in range(args.num_steps):\n preds, _ = sess.run([pred, update_op])\n\n if IS_SAVE == True:\n msk = decode_labels(preds, num_classes=args.num_classes)\n im = Image.fromarray(msk[0])\n filename = 'mask' + str(step) + '.png'\n im.save(SAVE_DIR + filename)\n\n if step % 10 == 0:\n print('step {0} mIoU: {1}'.format(step, mIoU.eval(session=sess)))\n\n coord.request_stop()\n coord.join(threads)", "def one_shot_test(self, model, support_set_size, number_of_tasks_per_alphabet,\n is_validation):\n\n # Set some variables that depend on dataset\n if is_validation:\n alphabets = self._validation_alphabets\n print('\\nMaking One Shot Task on validation alphabets:')\n else:\n alphabets = self._evaluation_alphabets\n print('\\nMaking One Shot Task on evaluation alphabets:')\n\n mean_global_accuracy = 0\n\n for alphabet in alphabets:\n mean_alphabet_accuracy = 0\n for _ in range(number_of_tasks_per_alphabet):\n images, _ = self.get_one_shot_batch(\n support_set_size, is_validation=is_validation)\n probabilities = model.predict_on_batch(images)\n\n # Added this condition because noticed that sometimes the outputs\n # of the classifier was almost the same in all images, meaning that\n # the argmax would be always by defenition 0.\n if np.argmax(probabilities) == 0 and probabilities.std()>0.01:\n accuracy = 1.0\n else:\n accuracy = 0.0\n\n mean_alphabet_accuracy += accuracy\n mean_global_accuracy += accuracy\n\n mean_alphabet_accuracy /= number_of_tasks_per_alphabet\n\n print(alphabet + ' alphabet' + ', accuracy: ' +\n str(mean_alphabet_accuracy))\n if is_validation:\n self._current_validation_alphabet_index += 1\n else:\n self._current_evaluation_alphabet_index += 1\n\n mean_global_accuracy /= (len(alphabets) *\n number_of_tasks_per_alphabet)\n\n print('\\nMean global accuracy: ' + str(mean_global_accuracy))\n\n # reset counter\n if is_validation:\n self._current_validation_alphabet_index = 0\n else:\n self._current_evaluation_alphabet_index = 0\n\n return mean_global_accuracy", "def __call__(self, params):\n logging.info('Running __call__ function...')\n batch_size = self._train_batch_size\n # For MCTS, the number of features for each trajecotry is unknown beforehand\n num_features = None\n\n if self._global_step_value % self._iterations_per_loop == 0:\n logging.info('Update iterator (gs=%d)...', self._global_step_value)\n # Feature/Labels Placeholders\n self.features_ph = {\n 'mcts_features':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_state_space],\n name='mcts_state_ph'),\n 'policy_features':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_state_space],\n name='policy_state_ph'),\n }\n self.labels_ph = {\n 'action_tensor':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_action_space],\n name='action_ph'),\n 'value_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='value_ph'),\n 'return_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='return_ph'),\n 'old_neg_logprob_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='old_neg'),\n 'mean_tensor':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_action_space],\n name='mean_ph'),\n 'logstd_tensor':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_action_space],\n name='logstd_ph'),\n 'mcts_enable_tensor':\n tf.placeholder(\n tf.bool, shape=[num_features], name='mcts_enable_ph'),\n 'policy_action_tensor':\n tf.placeholder(\n tf.float32,\n shape=[num_features, self._env_action_space],\n name='policy_action_ph'),\n 'policy_value_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='policy_value_ph'),\n 'policy_return_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='policy_return_ph'),\n 'policy_old_neg_logprob_tensor':\n tf.placeholder(\n tf.float32, shape=[num_features], name='policy_old_neg'),\n }\n # Create the dataset\n dataset = tf.data.Dataset.from_tensor_slices(\n (self.features_ph, self.labels_ph))\n dataset = dataset.shuffle(buffer_size=self._max_horizon)\n dataset = dataset.batch(batch_size, drop_remainder=True)\n\n # repeat until the loop is done\n dataset = dataset.repeat()\n if self._use_tpu:\n dataset = dataset.map(functools.partial(self._set_shapes, batch_size))\n dataset = dataset.prefetch(2)\n self._iterator = dataset.make_initializable_iterator()\n return self._iterator.get_next()\n else:\n return self._iterator.get_next()", "def renset50_128_gpu_1_real(self):\n test_id = 'resnet50.gpu_1.128.real'\n args = {}\n config = self.build_resnet_test_config(test_id, args, batch_size=128, gpus=1)\n self.run_test_suite(config)", "def run_time_operation(self, learning_option, cluster):\r\n # get input\r\n # TODO: to be change \"inputs\" : ['input'] to \"inputs\": ['input', 'cell']\r\n cells_ = self.get_input('input')\r\n input_ = learning_option.get('static_rnn_input')\r\n #input_ = self.get_input('input')\r\n #indim = self.get_dimension('input')\r\n\r\n # get attr\r\n # optional field\r\n init = self.get_attr('initial_state', default=None)\r\n length = self.get_attr('length', default=None)\r\n scope = self.get_attr('scope', default='default')\r\n # TODO: tmp\r\n if scope is None:\r\n scope = self.name\r\n\r\n num_steps = learning_option.get('num_steps')\r\n is_train = learning_option.get('is_train')\r\n\r\n # get worker info: worker num, device type, device num\r\n device = self.get_attr('device')\r\n num = re.sub('[^0-9]', '', cluster.get('types')[device])\r\n type = cluster.get('types')[device].replace(str(num), '')\r\n\r\n # construct API\r\n def apiConstructor():\r\n batch_size = tf.cond(is_train, lambda: tf.constant(learning_option.get('batch_size'), dtype=tf.int32),\r\n lambda: tf.constant(learning_option.get('test_batch_size'), dtype=tf.int32))\r\n if init == 'ZERO': # WARNING: only support zero initial state in this version\r\n initial_state = cells_.zero_state(batch_size, tf.float32)\r\n else:\r\n initial_state = None\r\n learning_option['initial_state'] = initial_state\r\n\r\n input_unstack = tf.unstack(input_, num=num_steps, axis=1)\r\n output, state = tf.contrib.rnn.static_rnn(cells_, input_unstack, initial_state=initial_state,\r\n dtype=tf.float32, sequence_length=length)\r\n\r\n # set output\r\n self.set_output('output', output)\r\n self.set_output('state', state)\r\n\r\n with tf.variable_scope(self.name):\r\n # single node, model parallelism: explicit worker mapping\r\n # data parallelism: equally duplicate model\r\n if learning_option.get(\"parallel\", None) != \"DP\":\r\n with tf.device('/job:worker/task:{0}/{1}:{2}'.format(device, type, num)):\r\n apiConstructor()\r\n else:\r\n apiConstructor()", "def benchmark_fp16_fake_1gpu_gpuparams(self):\n params = self._shared_params_fp16()._replace(\n num_gpus=1,\n data_dir=self.fake_data_dir,\n data_name='imagenet',\n variable_update='parameter_server')\n self._run_benchmark(params)", "def main():\n args = get_arguments()\n \n # Create queue coordinator.\n coord = tf.train.Coordinator()\n\n # Load reader.\n with tf.name_scope(\"create_inputs\"):\n reader = ImageReader(\n args.data_dir,\n args.data_list,\n None, # No defined input size.\n False, # No random scale.\n False, # No random mirror.\n args.ignore_label,\n IMG_MEAN,\n coord)\n image, label = reader.image, reader.label\n image_batch, label_batch = tf.expand_dims(image, dim=0), tf.expand_dims(label, dim=0) # Add one batch dimension.\n\n # Create network.\n net = DeepLabResNetModel({'data': image_batch}, is_training=False, num_classes=args.num_classes)\n\n # Which variables to load.\n restore_var = tf.global_variables()\n\n # Predictions.\n raw_output = net.layers['fc1_voc12']\n raw_output = tf.image.resize_bilinear(raw_output, tf.shape(image_batch)[1:3,])\n #raw_output = tf.argmax(raw_output, dimension=3)\n #pred = tf.expand_dims(raw_output, dim=3) # Create 4-d tensor.\n pred = raw_output\n\n # Set up TF session and initialize variables.\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n sess = tf.Session(config=config)\n init = tf.global_variables_initializer()\n\n sess.run(init)\n sess.run(tf.local_variables_initializer())\n\n # Load weights.\n loader = tf.train.Saver(var_list=restore_var)\n if args.restore_from is not None:\n load(loader, sess, args.restore_from)\n\n # Start queue threads.\n threads = tf.train.start_queue_runners(coord=coord, sess=sess)\n\n with open(args.data_list) as f:\n content = f.readlines()\n\n content = [x.strip() for x in content]\n\n\n \n for index, value in enumerate(content):\n print(\"outputting \"+str(index))\n \timg = tf.image.decode_png(tf.read_file(value.split()[0]), channels=3)\n raw_img = misc.imread(value.split()[0])\n print(type(raw_img))\n \t# Convert RGB to BGR.\n \timg_r, img_g, img_b = tf.split(axis=2, num_or_size_splits=3, value=img)\n \timg = tf.cast(tf.concat(axis=2, values=[img_b, img_g, img_r]), dtype=tf.float32)\n \t# Extract mean.\n \timg -= IMG_MEAN \n \t# Predictions.\n \traw_output = net.layers['fc1_voc12']\n\n \traw_output_up = tf.image.resize_bilinear(raw_output, tf.shape(img)[0:2,])\n \t#pred = raw_output_up\n probabilities = tf.nn.softmax(raw_output_up)\n pred = tf.argmax(raw_output_up, dimension=3)\n \tpred = tf.expand_dims(pred, dim=3)\n \t# Perform inference.\n \tpreds, probs = sess.run([pred, probabilities])\n print(preds.shape)\n print(probs.shape)\n print(\"probs\")\n print(probs)\n softmax = probs[0, :, :, :]\n print(\"softmax\")\n print(softmax)\n print(softmax.shape)\n print(type(softmax))\n processed_probabilities = softmax.transpose((2, 0, 1))\n print(processed_probabilities.shape)\n print(type(processed_probabilities))\n crf_processed = performCRF(processed_probabilities, raw_img)\n\n im_preds = Image.fromarray(np.uint8(preds[0, :, :, 0]))\n\n print(\"preds shape\", preds.shape)\n \tmsk = decode_labels(preds, num_classes=args.num_classes)\n \tim = Image.fromarray(msk[0])\n\n print(\"crf_processed shape\", crf_processed.shape)\n crf_processed = crf_processed.reshape(1, crf_processed.shape[0], crf_processed.shape[1], 1)\n msk_crf = decode_labels(crf_processed, num_classes=args.num_classes)\n im_crf = Image.fromarray(msk_crf[0])\n\n \tif not os.path.exists(args.save_dir):\n os.makedirs(args.save_dir)\n #im_preds.save(args.save_dir +str(index).zfill(8) +'_predlabels_'+args.train_set+'.png')\n \tim.save(args.save_dir +str(index).zfill(8) +'_pred_'+args.train_set+'.png')\n im_crf.save(args.save_dir +str(index).zfill(8) +'_predcrf_'+args.train_set+'.png')", "def _tpu_build(self):\n def _define_model(features, labels, mode, params):\n data_source = (features, labels)\n self.outputs = {}\n self.losses = {}\n self.otters = {}\n outputs, losses, others = self.define_model(data_source, mode)\n\n if mode == tf.estimator.ModeKeys.EVAL:\n return tpu.TPUEstimatorSpec(\n mode=mode, loss=losses, eval_metrics=others)\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tpu.TPUEstimatorSpec(\n mode=mode, predictions=outputs\n )\n if mode == tf.estimator.ModeKeys.TRAIN:\n self.losses['train'] = losses\n self._build_optimizer(tpu_support=True)\n if not len(self.optimize_ops) == 1:\n logging.error('Implementati Error: More than one optimizer defined')\n logging.warning(' [*] Selecting only the first optimizer')\n return tpu.TPUEstimatorSpec(\n mode=mode, loss=losses[0], train_op=self.optimize_ops[0]\n )\n\n tpu_name = ['node-1'] # TODO Bring outside\n tpu_iterations = 500 # TODO Bring outside\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n tpu_name)\n\n run_config = tf.contrib.tpu.RunConfig(\n model_dir=self.output_path,\n cluster=tpu_cluster_resolver,\n session_config=tf.ConfigProto(\n allow_soft_placement=True, log_device_placement=True),\n tpu_config=tpu.TPUConfig(tpu_iterations),\n )\n\n self.estimator = tpu.TPUEstimator(\n model_fn=_define_model,\n use_tpu=True,\n train_batch_size=32*4, #self.dataset['train'].batch_size,\n eval_batch_size=32*4, #self.dataset['validation'].batch_size,\n config=run_config,\n params={\"data_dir\": self.data_dir}\n )", "def test_is_trainable(estimator_fn, machine_settings):\n # Setup\n batch_size = 128 # Must be divisible by number of replicas (8 for TPU v2)\n crop_size = 24\n eval_count = 1024\n eval_steps = int(eval_count / batch_size)\n assert eval_steps * batch_size == eval_count\n estimator = estimator_fn(\n micronet.cifar.linear_model.create_model, batch_size, batch_size)\n\n # Replace with lambda?\n def input_fn(params):\n # Only the TPUEstimator needs to pass batch_size to the input_fn.\n if 'batch_size' in params:\n assert params['batch_size'] == batch_size\n del params\n mini_ds = cifar_ds.train_dataset(\n cloud_storage=machine_settings.is_cloud)\n mini_ds = mini_ds.map(\n cifar_ds.preprocess_fn(augment=False, crop_to=crop_size))\n # Take a small amount and repeat so that the test can show training\n # in a smaller amount of steps (so the test runs quickly).\n mini_ds.take(500).repeat()\n return mini_ds.batch(batch_size, drop_remainder=True)\n\n # Test\n # 1. Check that the untrained model predicts randomly.\n #\n # I want the test to pass 99% of the time.\n # For a 1000 trial experiment with success probability of 1% (100 classes),\n # CDF_inverse(0.01) ~= 3\n # CDF_inverse(0.99) ~= 19\n # (from binomial dist calculator:\n # https://www.di-mgt.com.au/binomial-calculator.html)\n # TODO: is it valid to assume a random output from the untrained model?\n results = estimator.evaluate(input_fn, steps=eval_steps)\n assert 3/eval_count < results[micronet.estimator.TOP_1_ACCURACY_KEY] \\\n <= 19/eval_count\n\n # 2. Check that the model can be trained.\n # Using the eval_steps as the max training steps. Could use something else.\n estimator.train(input_fn, max_steps=eval_steps)\n\n # 3. Check that the training has increased the model's accuracy.\n # Results is a dict containing the metrics defined by the model_fn.\n # FIXME 4: I should encapsulate/separate the metric creation so that it\n # is easy to assume that certain metrics are present.\n results = estimator.evaluate(input_fn, steps=eval_steps)\n # We should expect some improvement over the random case, 1/100. Running\n # it a few times gave ~4.5%, so using a value a little lower to make sure\n # the test reliably passes (while still being useful).\n assert results[micronet.estimator.TOP_1_ACCURACY_KEY] >= 0.040", "def __init__(self, img_rows=400, img_cols=400, vgg_weights=\"imagenet\", inference_only=False, net_name='default', gpus=1, vgg_device=None):\n \n # Settings\n self.img_rows = img_rows\n self.img_cols = img_cols\n self.img_overlap = 30\n self.inference_only = inference_only\n self.net_name = net_name\n self.gpus = gpus\n self.vgg_device = vgg_device\n\n # Scaling for VGG input\n self.mean = [0.485, 0.456, 0.406]\n self.std = [0.229, 0.224, 0.225]\n \n #get PowerSpect_CMB\n reader = np.zeros((2507,))\n fp = open('./data/COM_PowerSpect_CMB-base-plikHM-TTTEEE-lowl-lowE-lensing-minimum-theory_R3.01.txt')\n \n for i,line in enumerate(fp):\n if i >= 1:\n reader[i-1] = line.split()[1]\n \n fp.close() \n readers = np.log(reader)\n self.cl = K.constant(readers)\n # Assertions\n assert self.img_rows >= 256, 'Height must be >256 pixels'\n assert self.img_cols >= 256, 'Width must be >256 pixels'\n\n # Set current epoch\n self.current_epoch = 0\n \n # VGG layers to extract features from (first maxpooling layers, see pp. 7 of paper)\n self.vgg_layers = [3, 6, 10]\n\n # Instantiate the vgg network\n if self.vgg_device:\n with tf.device(self.vgg_device):\n self.vgg = self.build_vgg(vgg_weights)\n else:\n self.vgg = self.build_vgg(vgg_weights)\n \n # Create UNet-like model\n if self.gpus <= 1:\n self.model, inputs_mask= self.build_pconv_unet()\n self.compile_pconv_unet(self.model, inputs_mask) \n else:\n with tf.device(\"/cpu:0\"):\n self.model, inputs_mask = self.build_pconv_unet()\n self.model = multi_gpu_model(self.model, gpus=self.gpus)\n self.compile_pconv_unet(self.model, inputs_mask)", "def launch_ddp_task(\n gpu_id: Sequence[int], n_gpus: int, run_task_func: Callable,\n *run_task_arg\n):\n ddp_port = int(os.environ['DDP_PORT'])\n init_process_group(\n backend='nccl', init_method=f'tcp://localhost:{ddp_port}',\n world_size=n_gpus, rank=gpu_id)\n # tasks must be built after initializing the process group\n run_task_func([gpu_id], *run_task_arg)", "def renset50_64_gpu_1_real(self):\n test_id = 'resnet50.gpu_1.64.real'\n args = {}\n config = self.build_resnet_test_config(test_id, args, batch_size=64, gpus=1)\n self.run_test_suite(config)", "def test_image_task(self):\n args = BASE_ARGS.copy()\n args.update(IMAGE_ARGS)\n\n valid, test = testing_utils.train_model(args)\n self.assertLessEqual(\n valid['ppl'], 8.6, 'failed to train image_seq2seq on image task'\n )", "def renset50_64_gpu_2_real(self):\n test_id = 'resnet50.gpu_2.64.real'\n args = {}\n config = self.build_resnet_test_config(test_id, args, batch_size=64, gpus=2)\n self.run_test_suite(config)", "def main():\n grid_tester_cpu = GridTesterCPU()\n\n # parse args, load configuration and create all required objects.\n grid_tester_cpu.setup_grid_experiment()\n\n # GO!\n grid_tester_cpu.run_grid_experiment()", "def launch(rank, size, fn, backend='tcp',\n train_data=None, train_target=None,\n test_data=None, test_target=None,\n do_log=False,\n comms=None):\n # dist.init_process_group(backend, rank=rank, world_size=size)\n # Setting CUDA FOR TRAINING\n # use_cuda = torch.cuda.is_available()\n # device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n device = torch.device(\"cpu\")\n\n total_communication_time = 0\n local_training_time = 0\n local_testing_time = 0\n if (rank == 0):\n local_training_time = time.time()\n\n model, total_communication_time = fn(world_rank=rank, world_size=size, train_data=train_data,\n train_target=train_target, do_log=False, comms=comms)\n if (rank == 0):\n local_training_time = time.time() - local_training_time\n if (rank == 0):\n local_testing_time = time.time()\n\n predict(rank=rank, model=model, device=device, test_data=test_data, test_target=test_target, do_log=do_log,\n comms=comms)\n\n if (rank == 0):\n local_testing_time = time.time() - local_testing_time\n print(\"Total Training Time : {}\".format(local_training_time))\n print(\"Total Testing Time : {}\".format(local_testing_time))\n save_log(\"../stats.csv\",\n stat=\"{},{},{},{}\".format(size, local_training_time, total_communication_time, local_testing_time))", "def _benchmark_cnn(self):\n self.single_session = False\n (image_producer_ops, enqueue_ops, fetches) = self._build_model()\n fetches_list = nest.flatten(list(fetches.values()))\n main_fetch_group = tf.group(*fetches_list)\n execution_barrier = None\n \n\n global_step = tf.train.get_global_step()\n with tf.device(self.global_step_device):\n with tf.control_dependencies([main_fetch_group]):\n fetches['inc_global_step'] = global_step.assign_add(1)\n\n\n local_var_init_op = tf.local_variables_initializer()\n variable_mgr_init_ops = [local_var_init_op]\n with tf.control_dependencies([local_var_init_op]):\n variable_mgr_init_ops.extend(self.variable_mgr.get_post_init_ops())\n local_var_init_op_group = tf.group(*variable_mgr_init_ops)\n\n summary_op = tf.summary.merge_all()\n is_chief = (not self.job_name or self.task_index == 0)\n summary_writer = None\n \n # We run the summaries in the same thread as the training operations by\n # passing in None for summary_op to avoid a summary_thread being started.\n # Running summaries and training operations in parallel could run out of\n # GPU memory.\n saver = tf.train.Saver(\n self.variable_mgr.savable_variables(), save_relative_paths=True)\n ready_for_local_init_op = None\n \n sv = tf.train.Supervisor(\n is_chief=is_chief,\n logdir=self.params.train_dir,\n ready_for_local_init_op=ready_for_local_init_op,\n local_init_op=local_var_init_op_group,\n saver=saver,\n global_step=global_step,\n summary_op=None,\n save_model_secs=self.params.save_model_secs,\n summary_writer=summary_writer)\n\n step_train_times = []\n start_standard_services = (\n self.params.summary_verbosity >= 1 or\n self.dataset.queue_runner_required())\n target = self.cluster_manager.get_target() if self.cluster_manager else ''\n with sv.managed_session(\n master=target,\n config=create_config_proto(self.params),\n start_standard_services=start_standard_services) as sess:\n image_producer = cnn_util.ImageProducer(sess, image_producer_ops,\n self.batch_group_size)\n image_producer.start()\n for i in xrange(len(enqueue_ops)):\n sess.run(enqueue_ops[:(i + 1)])\n image_producer.notify_image_consumption()\n self.init_global_step, = sess.run([global_step])\n if not self.single_session:\n global_step_watcher = GlobalStepWatcher(\n sess, global_step,\n self.num_workers * self.num_warmup_batches +\n self.init_global_step,\n self.num_workers * (self.num_warmup_batches + self.num_batches) - 1)\n global_step_watcher.start()\n \n\n log_fn('Running warm up')\n local_step = -1 * self.num_warmup_batches\n done_fn = global_step_watcher.done\n loop_start_time = time.time()\n while not done_fn():\n if local_step == 0:\n log_fn('Done warm up')\n \n header_str = 'Step\\tImg/sec\\tloss'\n \n log_fn(header_str)\n \n # reset times to ignore warm up batch\n step_train_times = []\n loop_start_time = time.time()\n \n fetch_summary = None\n summary_str = benchmark_one_step(\n sess, fetches, local_step,\n self.batch_size * (self.num_workers if self.single_session else 1),\n step_train_times, self.trace_filename, image_producer, self.params,\n fetch_summary)\n \n local_step += 1\n loop_end_time = time.time()\n # Waits for the global step to be done, regardless of done_fn.\n \n num_steps = global_step_watcher.num_steps()\n elapsed_time = global_step_watcher.elapsed_time()\n\n average_wall_time = elapsed_time / num_steps if num_steps > 0 else 0\n images_per_sec = ((self.num_workers * self.batch_size) / average_wall_time\n if average_wall_time > 0 else 0)\n\n log_fn('-' * 64)\n log_fn('total images/sec: %.2f' % images_per_sec)\n log_fn('-' * 64)\n image_producer.done()\n #if is_chief:\n # store_benchmarks({'total_images_per_sec': images_per_sec}, self.params)\n # Save the model checkpoint.\n \n sv.stop()\n return {\n 'num_workers': self.num_workers,\n 'num_steps': num_steps,\n 'average_wall_time': average_wall_time,\n 'images_per_sec': images_per_sec\n }", "def run_test():\n # Get the sets of images and labels for training, validation, and\n # test on MNIST.\n train ,validation,test = datasets_mnist.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data)\n # Tell TensorFlow that the model will be built into the default Graph.\n with tf.Graph().as_default():\n # Generate placeholders for the images and labels.\n images_placeholder, labels_placeholder, phase_pl = placeholder_inputs(\n FLAGS.batch_size)\n\n # Build a Graph that computes predictions from the inference model.\n logits = mnist.inference(images_placeholder,\n FLAGS.hidden1,\n FLAGS.hidden2, \n phase_pl)\n\n eval_correct = mnist.evaluation(logits, labels_placeholder)\n # Add the variable initializer Op.\n all_variable = tf.global_variables()\n \n # Create a saver for writing training checkpoints.\n saver = tf.train.Saver()\n\n # Create a session for running Ops on the Graph.\n with tf.Session() as sess:\n\n saver.restore(sess, \"log/model.ckpt-1999\")\n for variable in all_variable:\n if \"moving\" in variable.name:\n print(variable.name, variable.eval())\n do_eval(sess,\n eval_correct,\n images_placeholder,\n labels_placeholder,\n phase_pl,\n test)", "def run(run_py, func):\n args = eval(\"test_args.{}\".format(func))\n print(args)\n\n res = {}\n\n default_args = {}\n for arg, value in args.items():\n default_args[arg] = value[0]\n\n current_args = dict2argstr(default_args)\n cmd = \"export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7; \\\n python -m paddle.distributed.launch --selected_gpus=0,1,2,3 {} {}\".format(\n run_py, current_args)\n status = os.system(cmd)\n if status != 0:\n res[cmd] = \"FAIL\"\n else:\n res[cmd] = \"SUCCESS\"\n cmd = \"rm -rf checkpoints\"\n os.system(cmd)\n\n for arg, value in args.items():\n if len(value) <= 1:\n continue\n current_args_dict = copy.deepcopy(default_args)\n for item in value[1:]:\n current_args_dict[arg] = item\n current_args = dict2argstr(current_args_dict)\n cmd = \"export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7; \\\n python -m paddle.distributed.launch --selected_gpus=0,1,2,3 {} {}\".format(\n run_py, current_args)\n status = os.system(cmd)\n if status != 0:\n res[cmd] = \"FAIL\"\n else:\n res[cmd] = \"SUCCESS\"\n cmd = \"rm -rf checkpoints\"\n os.system(cmd)\n\n total_num = len(res)\n fail_num = 0\n for cmd, status in res.items():\n if status == \"FAIL\":\n fail_num += 1\n print(\"-\" * 30)\n print(\"Failure Rate: {} / {}\".format(str(fail_num), str(total_num)))\n print(\"-\" * 30)\n print(\"Detail:\")\n for cmd, status in res.items():\n print(\"{} : {}\".format(status, cmd))", "def train(**kwargs):\n print(\"train(**kwargs) - kwargs: %s\" % (kwargs)) if debug_model else ''\n run_results = { \"status\": \"ok\",\n \"sys_info\": [],\n \"training\": [],\n }\n\n\n # Check if necessary local directories exist:\n if not os.path.exists(cfg.Retina_LocalDataRecords):\n print(\"[INFO] %s is not found locally, creating...\" % \n cfg.Retina_LocalDataRecords)\n os.makedirs(cfg.Retina_LocalDataRecords)\n if not os.path.exists(cfg.Retina_LocalModelsServe):\n print(\"[INFO] %s is not found locally, creating...\" % \n cfg.Retina_LocalModelsServe)\n os.makedirs(cfg.Retina_LocalModelsServe) \n\n # use the schema\n schema = cfg.TrainArgsSchema()\n # deserialize key-word arguments\n train_args = schema.load(kwargs)\n\n # Take parameters defined via deepaas by a user\n train_epochs = train_args['train_epochs']\n batch_size = train_args['batch_size']\n num_gpus = train_args['num_gpus']\n epochs_between_evals = train_args['epochs_between_evals']\n upload_back = train_args['upload_back']\n if debug_model:\n print(\"train_args:\", train_args)\n print(type(train_args['train_epochs']), type(train_args['batch_size']))\n print(\"Number of GPUs:\", train_args['num_gpus'], num_gpus)\n\n # from deep-nextcloud into the container\n e1=time.time()\n # check if retinopathy_tr.tfrecord.XX or retinopathy_va.tfrecord.XX files exist locally,\n # if not -> download them from the RemoteStorage\n train_files = 0\n val_files = 0\n for f in os.listdir(cfg.Retina_LocalDataRecords):\n f_path = os.path.join(cfg.Retina_LocalDataRecords, f)\n if (os.path.isfile(f_path) and cfg.Retina_TrainingData in f):\n train_files += 1\n if (os.path.isfile(f_path) and cfg.Retina_ValidationData in f):\n val_files += 1\n\n if train_files < 100 or val_files < 20:\n # Retina_RemoteDataRecords and Retina_LocalDataRecords are defined in config.py #vk\n print(\"[INFO] Either training or validation files NOT found locally, download them from %s\" % \n (cfg.Retina_RemoteDataRecords))\n output, error = rclone_copy(cfg.Retina_RemoteDataRecords, cfg.Retina_LocalDataRecords)\n if error:\n message = \"[ERROR] training data not copied. rclone returned: \" + error\n raise Exception(message)\n\n download_time=time.time()-e1\n time.sleep(60)\n\n e2=time.time()\n ### mimic retinopathy_main.py main()\n # we first delete all the FLAGS\n FLAGS = flags.FLAGS\n #FLAGS.unparse_flags()\n for name in list(FLAGS):\n delattr(FLAGS, name)\n\n tf.logging.set_verbosity(tf.logging.INFO)\n #tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)\n # define default FLAGS for retinopathy_main and _run_loop\n retimain.define_retinopathy_flags(batch_size=str(batch_size),\n train_epochs=str(train_epochs),\n num_gpus=str(num_gpus),\n epochs_between_evals=str(epochs_between_evals))\n\n # build list of FLAG names and parse them via FLAGS(list)(IMPORTANT!) #vk\n flag_names = []\n for name in FLAGS:\n flag_names.append(name)\n\n # According to the docs, actual parsing happens by either calling\n # FLAGS(list_of_arguments) or by app.run()\n FLAGS(flag_names)\n # call actual training with the set flags\n with logger.benchmark_context(flags.FLAGS):\n graph_zip_path = retimain.run_retinopathy(flags.FLAGS)\n\n\n try:\n graph_zip_path = graph_zip_path.decode()\n except (UnicodeDecodeError, AttributeError):\n pass\n graph_zip_path = graph_zip_path.rstrip()\n\n print(\"[INFO] Call of the training script returned: \", graph_zip_path)\n training_time=time.time()-e2\n time.sleep(60)\n\n e3=time.time()\n # Retina_LocalModelsServe and Retina_RemoteModelsUpload are defined in config.py #vk\n if(upload_back and os.path.exists(graph_zip_path)):\n graph_zip_dir, graph_zip_name = os.path.split(graph_zip_path)\n print(\"[INFO] Uploading {} to {} ...\".format(graph_zip_name, \n cfg.Retina_RemoteModelsUpload))\n output, error = rclone_copy(graph_zip_path,\n os.path.join(cfg.Retina_RemoteModelsUpload, \n graph_zip_name))\n if error:\n print(\"[ERROR] rclone returned: {}\".format(error))\n else:\n # if there is no error, remove zip file and the graph directory\n savedmodel_dir, _ = os.path.splitext(graph_zip_name) # split name, ext\n savedmodel_path = os.path.join(graph_zip_dir, savedmodel_dir)\n ## Try to remove tree, if it exists\n print(\"[INFO] Uploaded, deleting local {} and {}...\".format(graph_zip_path,\n savedmodel_path))\n os.remove(graph_zip_path) # remove zipped file\n if os.path.exists(savedmodel_path):\n shutil.rmtree(savedmodel_path) # remove corresponding directory\n else:\n print(\"[INFO] Saved model path, {}, doesn't exitst!\".format(\n savedmodel_path)) \n else:\n print(\"[INFO] Created zip file of the graph, %s, was NOT uploaded!\" % graph_zip_path)\n\n upload_time=time.time()-e3\n\n train_files_size = 0\n val_files_size = 0\n for f in os.listdir(cfg.Retina_LocalDataRecords):\n f_path = os.path.join(cfg.Retina_LocalDataRecords, f)\n if (os.path.isfile(f_path) and cfg.Retina_TrainingData in f):\n train_files_size += os.stat(f_path).st_size\n if (os.path.isfile(f_path) and cfg.Retina_ValidationData in f):\n val_files_size += os.stat(f_path).st_size\n\n message = {\n \"Message\": \"Training finished!\",\n \"Download time\": download_time, \n \"Training time\": training_time,\n \"Upload time\": upload_time,\n \"Training set size\": convert_bytes(train_files_size), \n \"Validation set size\": convert_bytes(val_files_size)\n }\n return message", "def run_cross_validation_process_test(info_string, models):\n\n batch_size = 64\n num_fold = 0\n yfull_test = []\n x_test_id = []\n nfolds = len(models)\n\n datagen_test = ImageDataGenerator(\n featurewise_center=True,\n featurewise_std_normalization=True,\n preprocessing_function=pre_processing_image\n )\n\n # print(image_array.shape)\n x_test, x_test_id = load_images_test()\n print(len(x_test))\n image_test_array = np.asarray(x_test, dtype=np.float32)\n start_time = time.time()\n print(\"Datagen.fit started\")\n datagen_test.fit(image_test_array, augment=False)\n print('Fit Completed: {} seconds'.format(round(time.time() - start_time, 2)))\n\n for i in range(nfolds):\n model = models[i]\n num_fold += 1\n print('Start KFold number {} from {}'.format(num_fold, nfolds))\n\n #test_prediction = model.predict_generator(generator=datagen_test.fit(image_test_array, seed=79),\n # steps=len(image_test_array)/32, max_q_size=20, workers=8, verbose=1)\n\n test_prediction = model.predict(image_test_array, batch_size=batch_size, verbose=1)\n\n yfull_test.append(test_prediction)\n\n test_res = merge_several_folds_mean(yfull_test, nfolds)\n info_string = 'loss_' + info_string \\\n + '_folds_' + str(nfolds)\n create_submission(test_res, x_test_id, info_string)\n d=pd.DataFrame(test_res,columns=FISH_CLASSES)", "def train_model(batch_size, epochs, model_type, model_save_path,\n optimizer,\n loss_name=\"bce_dice\",\n resize_img=400,\n tpu=False,\n **kwargs):\n if model_type == 'unet':\n preprocess_input = None\n elif model_type == 'ternaus':\n preprocess_input = vgg16.preprocess_input\n elif model_type == 'resnet':\n preprocess_input = resnet50.preprocess_input\n else:\n preprocess_input = None\n\n img = datatools.load_image(\"./data/training/train/images/satImage_001.png\")\n\n if tpu:\n tf.keras.backend.clear_session()\n\n if model_type == 'unet':\n model = convolutional_model_building(img.shape, **kwargs)\n elif model_type == 'ternaus':\n model = ternaus_model_building(img.shape)\n elif model_type == 'resnet':\n model = resnet_model_building(img.shape)\n else:\n model = ternaus_model_building(img.shape)\n\n if tpu:\n model = tf.contrib.tpu.keras_to_tpu_model(\n model,\n strategy=tf.contrib.tpu.TPUDistributionStrategy(\n tf.contrib.cluster_resolver.TPUClusterResolver(\n tpu='grpc://' + os.environ['COLAB_TPU_ADDR'])\n )\n )\n\n if loss_name == \"bce_dice\":\n loss = bce_dice_loss\n elif loss_name == \"bce\":\n loss = losses.binary_crossentropy\n elif loss_name == \"dice\":\n loss = dice_loss\n\n model.compile(optimizer=optimizer,\n loss=loss,\n metrics=[dice_loss, f1_score, 'accuracy'])\n model.summary()\n print(\"Trainable: \", model.trainable)\n\n checkpointer = tf.keras.callbacks.ModelCheckpoint(filepath=model_save_path,\n verbose=2,\n monitor=\"val_f1_score\",\n save_best_only=True)\n\n datagen = datatools.custom_image_generator(\"./data/training/train/images/\",\n \"./data/training/train/groundtruth/\", batch_size,\n preprocess=preprocess_input)\n\n data_gen_val = datatools.custom_image_generator(\"./data/training/validation/images/\",\n \"./data/training/validation/groundtruth/\",\n random=False, batch_size=batch_size)\n\n files = os.listdir(\"./data/training/train/images/\")\n files_test = os.listdir(\"./data/test_set_images/\")\n history = model.fit_generator(datagen,\n steps_per_epoch=int(np.ceil(len(files) / float(batch_size))),\n epochs=epochs,\n validation_data=data_gen_val,\n validation_steps=int(\n np.ceil(len(files_test) / float(batch_size))),\n callbacks=[checkpointer])\n\n return history, model", "def get_model_fn(num_gpus, variable_strategy, num_workers):\n\n def _resnet_model_fn(features, labels, mode, params):\n \"\"\"Resnet model body.\n\n Support single host, one or more GPU training. Parameter distribution can\n be either one of the following scheme.\n 1. CPU is the parameter server and manages gradient updates.\n 2. Parameters are distributed evenly across all GPUs, and the first GPU\n manages gradient updates.\n\n Args:\n features: a list of tensors, one for each tower\n labels: a list of tensors, one for each tower\n mode: ModeKeys.TRAIN or EVAL\n params: Hyperparameters suitable for tuning\n Returns:\n A EstimatorSpec object.\n \"\"\"\n is_training = mode == tf.estimator.ModeKeys.TRAIN\n weight_decay = params.weight_decay\n momentum = params.momentum\n\n tower_features = features\n tower_labels = labels\n tower_losses = []\n tower_gradvars = []\n tower_preds = []\n\n # channels first (NCHW) is normally optimal on GPU and channels last (NHWC)\n # on CPU. The exception is Intel MKL on CPU which is optimal with\n # channels_last.\n data_format = params.data_format\n if not data_format:\n if num_gpus == 0:\n data_format = \"channels_last\"\n else:\n data_format = \"channels_first\"\n\n if num_gpus == 0:\n num_devices = 1\n device_type = \"cpu\"\n else:\n num_devices = num_gpus\n device_type = \"gpu\"\n\n for i in range(num_devices):\n worker_device = \"/{}:{}\".format(device_type, i)\n if variable_strategy == \"CPU\":\n device_setter = cifar10_utils.local_device_setter(\n worker_device=worker_device\n )\n elif variable_strategy == \"GPU\":\n device_setter = cifar10_utils.local_device_setter(\n ps_device_type=\"gpu\",\n worker_device=worker_device,\n ps_strategy=tf.contrib.training.GreedyLoadBalancingStrategy(\n num_gpus, tf.contrib.training.byte_size_load_fn\n ),\n )\n with tf.variable_scope(\"resnet\", reuse=bool(i != 0)):\n with tf.name_scope(\"tower_%d\" % i) as name_scope:\n with tf.device(device_setter):\n loss, gradvars, preds = _tower_fn(\n is_training,\n weight_decay,\n tower_features[i],\n tower_labels[i],\n data_format,\n params.num_layers,\n params.batch_norm_decay,\n params.batch_norm_epsilon,\n )\n tower_losses.append(loss)\n tower_gradvars.append(gradvars)\n tower_preds.append(preds)\n if i == 0:\n # Only trigger batch_norm moving mean and variance update from\n # the 1st tower. Ideally, we should grab the updates from all\n # towers but these stats accumulate extremely fast so we can\n # ignore the other stats from the other towers without\n # significant detriment.\n update_ops = tf.get_collection(\n tf.GraphKeys.UPDATE_OPS, name_scope\n )\n\n # Now compute global loss and gradients.\n gradvars = []\n with tf.name_scope(\"gradient_averaging\"):\n all_grads = {}\n for grad, var in itertools.chain(*tower_gradvars):\n if grad is not None:\n all_grads.setdefault(var, []).append(grad)\n for var, grads in six.iteritems(all_grads):\n # Average gradients on the same device as the variables\n # to which they apply.\n with tf.device(var.device):\n if len(grads) == 1:\n avg_grad = grads[0]\n else:\n avg_grad = tf.multiply(\n tf.add_n(grads), 1.0 / len(grads)\n )\n gradvars.append((avg_grad, var))\n\n # Device that runs the ops to apply global gradient updates.\n consolidation_device = (\n \"/gpu:0\" if variable_strategy == \"GPU\" else \"/cpu:0\"\n )\n with tf.device(consolidation_device):\n # Suggested learning rate scheduling from\n # https://github.com/ppwwyyxx/tensorpack/blob/master/examples/ResNet/cifar10-resnet.py#L155\n num_batches_per_epoch = cifar10.Cifar10DataSet.num_examples_per_epoch(\n \"train\"\n ) // (\n params.train_batch_size * num_workers\n )\n boundaries = [\n num_batches_per_epoch * x\n for x in np.array([80, 120, 160], dtype=np.int64)\n ]\n staged_lr = [\n params.learning_rate * x for x in [1, 0.1, 0.01, 0.001]\n ]\n\n learning_rate = tf.train.piecewise_constant(\n tf.train.get_global_step(), boundaries, staged_lr\n )\n\n loss = tf.reduce_mean(tower_losses, name=\"loss\")\n\n # examples_sec_hook = cifar10_utils.ExamplesPerSecondHook(\n # params.train_batch_size, every_n_steps=10\n # )\n\n # tensors_to_log = {\"learning_rate\": learning_rate, \"loss\": loss}\n\n # logging_hook = tf.train.LoggingTensorHook(\n # tensors=tensors_to_log, every_n_iter=100\n # )\n\n # train_hooks = [logging_hook, examples_sec_hook]\n train_hooks = []\n\n # Hyper-parameter \"momentum\" is only used for the Momentum Optimizer\n # Other optimizers use their default parameters.\n if params.optimizer == \"momentum\":\n optimizer = tf.train.MomentumOptimizer(\n learning_rate=learning_rate, momentum=momentum\n )\n elif params.optimizer == \"adam\":\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n elif params.optimizer == \"adagrad\":\n optimizer = tf.train.AdagradOptimizer(\n learning_rate=learning_rate\n )\n elif params.optimizer == \"adadelta\":\n optimizer = tf.train.AdadeltaOptimizer(\n learning_rate=learning_rate\n )\n elif params.optimizer == \"sgd\":\n optimizer = tf.train.GradientDescentOptimizer(\n learning_rate=learning_rate\n )\n elif params.optimizer == \"rmsprop\":\n optimizer = tf.train.RMSPropOptimizer(\n learning_rate=learning_rate\n )\n else:\n raise ValueError(\"unrecognized optimizer name\")\n # TODO: RAdam is implemented in tensorflow-addons v0.6, which requires tf 2.0\n # Upgrade code by removing tf.contrib modules.\n # optimizer = tfa.optimizers.RectifiedAdam(lr=learning_rate)\n\n if params.sync:\n optimizer = tf.train.SyncReplicasOptimizer(\n optimizer, replicas_to_aggregate=num_workers\n )\n sync_replicas_hook = optimizer.make_session_run_hook(\n params.is_chief\n )\n train_hooks.append(sync_replicas_hook)\n\n # Create single grouped train op\n train_op = [\n optimizer.apply_gradients(\n gradvars, global_step=tf.train.get_global_step()\n )\n ]\n train_op.extend(update_ops)\n train_op = tf.group(*train_op)\n\n predictions = {\n \"classes\": tf.concat(\n [p[\"classes\"] for p in tower_preds], axis=0\n ),\n \"probabilities\": tf.concat(\n [p[\"probabilities\"] for p in tower_preds], axis=0\n ),\n }\n stacked_labels = tf.concat(labels, axis=0)\n metrics = {\n \"accuracy\": tf.metrics.accuracy(\n stacked_labels, predictions[\"classes\"]\n )\n }\n\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op,\n training_hooks=train_hooks,\n eval_metric_ops=metrics,\n )\n\n return _resnet_model_fn", "def train_gatn(atn_model_fn, clf_model_fn, student_model_fn, dataset_name, target_class_id,\n alpha=1.5, beta=0.01, epochs=1, batchsize=128, lr=1e-3,\n atn_name=None, clf_name=None, student_name=None, device=None, evaluate=True):\n\n if device is None:\n if tf.test.is_gpu_available():\n device = '/gpu:0'\n else:\n device = '/cpu:0'\n\n # Load the dataset\n (_, _), (X_test, y_test) = generic_utils.load_dataset(dataset_name)\n\n # Split test set to get adversarial train and test split.\n (X_train, y_train), (X_test, y_test) = generic_utils.split_dataset(X_test, y_test)\n\n num_classes = y_train.shape[-1]\n image_shape = X_train.shape[1:]\n\n # cleaning data\n idx = (np.argmax(y_train, axis=-1) != target_class_id)\n X_train = X_train[idx]\n y_train = y_train[idx]\n\n batchsize = min(batchsize, X_train.shape[0])\n\n num_train_batches = X_train.shape[0] // batchsize + int(X_train.shape[0] % batchsize != 0)\n num_test_batches = X_test.shape[0] // batchsize + int(X_test.shape[0] % batchsize != 0)\n\n # build the datasets\n train_dataset, test_dataset = generic_utils.prepare_dataset(X_train, y_train,\n X_test, y_test,\n batch_size=batchsize,\n device=device)\n\n # construct the model on the correct device\n with tf.device(device):\n if clf_name is not None:\n clf_model = clf_model_fn(num_classes, name=clf_name) # type: generic_utils.BaseClassicalModel\n else:\n clf_model = clf_model_fn(num_classes) # type: generic_utils.BaseClassicalModel\n\n if student_name is not None:\n student_model = student_model_fn(num_classes, name=student_name) # type: tf.keras.Model\n else:\n student_model = student_model_fn(num_classes) # type: tf.keras.Model\n\n if atn_name is not None:\n atn_model = atn_model_fn(image_shape, name=atn_name) # type: tf.keras.Model\n else:\n atn_model = atn_model_fn(image_shape) # type: tf.keras.Model\n\n lr_schedule = tf.train.exponential_decay(lr, tf.train.get_or_create_global_step(),\n decay_steps=num_train_batches, decay_rate=0.99,\n staircase=True)\n\n optimizer = tf.train.AdamOptimizer(lr_schedule)\n\n atn_checkpoint = tf.train.Checkpoint(model=atn_model, optimizer=optimizer,\n global_step=tf.train.get_or_create_global_step())\n\n student_checkpoint = tf.train.Checkpoint(model=student_model)\n\n clf_model_name = clf_model.name if clf_name is None else clf_name\n basepath = 'weights/%s/%s/' % (dataset_name, clf_model_name)\n\n if not os.path.exists(basepath):\n os.makedirs(basepath, exist_ok=True)\n\n checkpoint_path = basepath + clf_model_name + '.pkl'\n\n # Restore the weights of the classifier\n if os.path.exists(checkpoint_path):\n clf_model = clf_model.restore(checkpoint_path)\n print(\"Classifier model restored !\")\n\n # Restore student model\n student_model_name = student_model.name if student_name is None else student_name\n basepath = 'gatn_weights/%s/%s/' % (dataset_name, student_model_name)\n\n if not os.path.exists(basepath):\n os.makedirs(basepath, exist_ok=True)\n\n student_checkpoint_path = basepath + student_model_name\n\n student_checkpoint.restore(student_checkpoint_path)\n\n atn_model_name = atn_model.name if atn_name is None else atn_name\n gatn_basepath = 'gatn_weights/%s/%s/' % (dataset_name, atn_model_name + \"_%d\" % (target_class_id))\n\n if not os.path.exists(gatn_basepath):\n os.makedirs(gatn_basepath, exist_ok=True)\n\n atn_checkpoint_path = gatn_basepath + atn_model_name + \"_%d\" % (target_class_id)\n\n best_loss = np.inf\n\n print()\n\n # train loop\n for epoch_id in range(epochs):\n train_loss = tfe.metrics.Mean()\n train_acc = tfe.metrics.Mean()\n train_target_rate = tfe.metrics.Mean()\n\n with tqdm(train_dataset,\n desc=\"Epoch %d / %d: \" % (epoch_id + 1, epochs),\n total=num_train_batches, unit=' samples') as iterator:\n\n for train_iter, (x, y) in enumerate(iterator):\n # Train the ATN\n\n if train_iter >= num_train_batches:\n break\n\n with tf.GradientTape() as tape:\n _, x_grad = compute_target_gradient(x, student_model, target_class_id)\n x_adversarial = atn_model(x, x_grad, training=True)\n\n y_pred = student_model(x, training=False)\n y_pred_adversarial = student_model(x_adversarial, training=False)\n\n loss_x = tf.losses.mean_squared_error(x, x_adversarial, reduction=tf.losses.Reduction.NONE)\n loss_y = targetted_mse(y_pred_adversarial, y_pred, target_class_id, alpha)\n\n loss_x = tf.reduce_sum(tf.reshape(loss_x, [loss_x.shape[0], -1]), axis=-1)\n loss_y = tf.reduce_mean(loss_y, axis=-1)\n\n loss_y = tf.cast(loss_y, tf.float32)\n\n loss = beta * loss_x + loss_y\n\n # update model weights\n gradients = tape.gradient(loss, atn_model.variables)\n grad_vars = zip(gradients, atn_model.variables)\n\n optimizer.apply_gradients(grad_vars, tf.train.get_or_create_global_step())\n\n loss_val = tf.reduce_mean(loss)\n train_loss(loss_val)\n\n # Evaluate student for attacks\n acc_val, target_count = generic_utils.target_accuracy(y_pred, y_pred_adversarial, target_class_id)\n\n train_acc(acc_val)\n train_target_rate(target_count)\n\n print(\"\\nTraining accuracy : %0.6f | Training num_adv : %0.6f\" % (\n train_acc.result(), train_target_rate.result(),\n ))\n\n train_loss_val = train_loss.result()\n if best_loss > train_loss_val:\n print(\"Saving weights as training loss improved from %0.5f to %0.5f!\" % (best_loss, train_loss_val))\n print()\n\n best_loss = train_loss_val\n\n atn_checkpoint.write(atn_checkpoint_path)\n\n if evaluate:\n test_acc = tfe.metrics.Mean()\n test_target_rate = tfe.metrics.Mean()\n\n with tqdm(test_dataset, desc='Evaluating',\n total=num_test_batches, unit=' samples') as iterator:\n\n for test_iter, (x, y) in enumerate(iterator):\n\n if test_iter >= num_test_batches:\n break\n\n _, x_test_grad = compute_target_gradient(x, student_model, target_class_id)\n x_test_adversarial = atn_model(x, x_test_grad, training=False)\n\n y_pred_adversarial = clf_model(x_test_adversarial, training=False)\n\n # compute and update the test target_accuracy\n acc_val, target_rate = generic_utils.target_accuracy(y, y_pred_adversarial, target_class_id)\n\n test_acc(acc_val)\n test_target_rate(target_rate)\n\n print(\"\\nTest Acc = %0.6f | Target num_adv = %0.6f\" % (test_acc.result(), test_target_rate.result()))\n\n print(\"\\n\\n\")\n print(\"Finished training !\")", "def renset50_128_gpu_4_real(self):\n test_id = 'resnet50.gpu_4.128.real'\n args = {}\n config = self.build_resnet_test_config(test_id, args, batch_size=128, gpus=4)\n self.run_test_suite(config)", "def run_task(snapshot_config, *_):\n with LocalTFRunner(snapshot_config) as runner:\n env = TfEnv(gym.make('InvertedDoublePendulum-v2'))\n\n action_noise = GaussianStrategy(env.spec, max_sigma=0.1, min_sigma=0.1)\n\n policy = ContinuousMLPPolicy(env_spec=env.spec,\n hidden_sizes=[400, 300],\n hidden_nonlinearity=tf.nn.relu,\n output_nonlinearity=tf.nn.tanh)\n\n qf = ContinuousMLPQFunction(name='ContinuousMLPQFunction',\n env_spec=env.spec,\n hidden_sizes=[400, 300],\n action_merge_layer=0,\n hidden_nonlinearity=tf.nn.relu)\n\n qf2 = ContinuousMLPQFunction(name='ContinuousMLPQFunction2',\n env_spec=env.spec,\n hidden_sizes=[400, 300],\n action_merge_layer=0,\n hidden_nonlinearity=tf.nn.relu)\n\n replay_buffer = SimpleReplayBuffer(env_spec=env.spec,\n size_in_transitions=int(1e6),\n time_horizon=250)\n\n td3 = TD3(env_spec=env.spec,\n policy=policy,\n policy_lr=1e-4,\n qf_lr=1e-3,\n qf=qf,\n qf2=qf2,\n replay_buffer=replay_buffer,\n target_update_tau=1e-2,\n n_epoch_cycles=20,\n n_train_steps=1,\n smooth_return=False,\n discount=0.99,\n buffer_batch_size=100,\n min_buffer_size=1e4,\n exploration_strategy=action_noise,\n policy_optimizer=tf.train.AdamOptimizer,\n qf_optimizer=tf.train.AdamOptimizer)\n\n runner.setup(td3, env)\n runner.train(n_epochs=500, n_epoch_cycles=20, batch_size=250)", "def run_global_model(X_train, y_train, cohorts_train,\n X_val, y_val, cohorts_val,\n X_test, y_test, cohorts_test,\n all_tasks, fname_keys, fname_results,\n FLAGS):\n\n model_fname_parts = ['global', 'lstm_shared', str(FLAGS.num_lstm_layers), 'layers', str(FLAGS.lstm_layer_size), 'units',\n str(FLAGS.num_dense_shared_layers), 'dense_shared', str(FLAGS.dense_shared_layer_size), \n 'dense_units', 'readmission']\n\n if FLAGS.test_time:\n model_path = FLAGS.experiment_name + \\\n '/models/' + \"_\".join(model_fname_parts)\n model = load_model(model_path)\n cohort_aucs = []\n y_pred = model.predict(X_test)\n\n # all bootstrapped AUCs\n for task in all_tasks:\n if FLAGS.test_bootstrap:\n all_aucs = bootstrap_predict(X_test, y_test, cohorts_test, task, model, return_everything=True,\n test=True, num_bootstrap_samples=FLAGS.num_test_bootstrap_samples)\n cohort_aucs.append(np.array(all_aucs))\n else:\n y_pred_in_cohort = y_pred[cohorts_test == task]\n y_true_in_cohort = y_test[cohorts_test == task]\n auc = roc_auc_score(y_true_in_cohort, y_pred_in_cohort)\n cohort_aucs.append(auc)\n\n if FLAGS.test_bootstrap:\n # Macro AUC\n cohort_aucs = np.array(cohort_aucs)\n cohort_aucs = np.concatenate(\n (cohort_aucs, np.expand_dims(np.mean(cohort_aucs, axis=0), 0)))\n\n # Micro AUC\n all_micro_aucs = bootstrap_predict(X_test, y_test, cohorts_test, 'all', model,\n return_everything=True, \n test=True, num_bootstrap_samples=FLAGS.num_test_bootstrap_samples)\n cohort_aucs = np.concatenate(\n (cohort_aucs, np.array([all_micro_aucs])))\n\n else:\n # Macro AUC\n macro_auc = np.mean(cohort_aucs)\n cohort_aucs.append(macro_auc)\n\n # Micro AUC\n micro_auc = roc_auc_score(y_test, y_pred)\n cohort_aucs.append(micro_auc)\n\n suffix = 'single' if not FLAGS.test_bootstrap else 'all'\n test_auc_fname = 'test_auc_on_global_' + suffix\n np.save(FLAGS.experiment_name + '/results/' +\n test_auc_fname, cohort_aucs)\n return\n\n model = create_single_task_model(FLAGS.num_lstm_layers, FLAGS.lstm_layer_size,\n FLAGS.num_dense_shared_layers, FLAGS.dense_shared_layer_size, X_train.shape[1:], 1)\n early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=4)\n model_dir = FLAGS.experiment_name + \\\n '/checkpoints/' + \"_\".join(model_fname_parts)\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n model_fname = model_dir + '/{epoch:02d}-{val_loss:.2f}.hdf5'\n checkpointer = tf.keras.callbacks.ModelCheckpoint(model_fname, monitor='val_loss', verbose=1)\n\n model.fit(X_train, y_train,\n epochs=FLAGS.epochs, batch_size=100,\n sample_weight=samp_weights,\n callbacks=[checkpointer, early_stopping],\n validation_data=(X_val, y_val))\n\n model.save(FLAGS.experiment_name + '/models/' +\n \"_\".join(model_fname_parts))\n\n cohort_aucs = []\n y_pred = model.predict(X_val)\n for task in all_tasks:\n print('Global Model AUC on ', task, ':')\n if FLAGS.no_val_bootstrap:\n try:\n auc = roc_auc_score(\n y_val[cohorts_val == task], y_pred[cohorts_val == task])\n except:\n auc = np.nan\n cohort_aucs.append(auc)\n else:\n min_auc, max_auc, avg_auc = bootstrap_predict(\n X_val, y_val, cohorts_val, task, model, num_bootstrap_samples=FLAGS.num_val_bootstrap_samples)\n cohort_aucs.append(np.array([min_auc, max_auc, avg_auc]))\n print (\"(min/max/average): \")\n\n print(cohort_aucs[-1])\n\n cohort_aucs = np.array(cohort_aucs)\n\n # Add Macro AUC\n cohort_aucs = np.concatenate(\n (cohort_aucs, np.expand_dims(np.nanmean(cohort_aucs, axis=0), 0)))\n\n # Add Micro AUC\n if FLAGS.no_val_bootstrap:\n micro_auc = roc_auc_score(y_val, y_pred)\n cohort_aucs = np.concatenate((cohort_aucs, np.array([micro_auc])))\n else:\n min_auc, max_auc, avg_auc = bootstrap_predict(\n X_val, y_val, cohorts_val, 'all', model, num_bootstrap_samples=FLAGS.num_val_bootstrap_samples)\n cohort_aucs = np.concatenate(\n (cohort_aucs, np.array([[min_auc, max_auc, avg_auc]])))\n\n # Save Results\n current_run_params = [FLAGS.num_lstm_layers, FLAGS.lstm_layer_size,\n FLAGS.num_dense_shared_layers, FLAGS.dense_shared_layer_size]\n try:\n print('appending results.')\n global_model_results = np.load(fname_results)\n global_model_key = np.load(fname_keys)\n global_model_results = np.concatenate(\n (global_model_results, np.expand_dims(cohort_aucs, 0)))\n global_model_key = np.concatenate(\n (global_model_key, np.array([current_run_params])))\n\n except Exception as e:\n global_model_results = np.expand_dims(cohort_aucs, 0)\n global_model_key = np.array([current_run_params])\n\n np.save(fname_results, global_model_results)\n np.save(fname_keys, global_model_key)\n print('Saved global results.')", "def main():\n dataset = MNIST(BATCH_SIZE)\n \n inputs = Value(type=tf.float32, shape=(None, 784), cls=None)\n targets = Value(type=tf.int64, shape=(None), cls=10)\n fc_hidden = FCHidden(weights=[300, 150])\n\n config = Config(inputs, targets, fc_hidden, LEARNING_RATE)\n\n network_builder = FFNetworkBuilder(config)\n hidden_builder = FFHiddenBuilder()\n _ = network_builder.build_network(hidden_builder)\n\n train_config = TrainerConfig(\n epochs = EPOCHS, display_after = DISPLAY_STEP, \n keep_prob = KEEP_PROB,checkpoint_path=None, \n summary_path=None\n )\n trainer = Trainer(network_builder, train_config)\n trainer.train(dataset)" ]
[ "0.63982433", "0.63317746", "0.6286564", "0.62145746", "0.6099212", "0.6017336", "0.59935385", "0.5987405", "0.59732693", "0.5968696", "0.59411806", "0.58942217", "0.58580977", "0.58526134", "0.5831714", "0.58247334", "0.5802177", "0.57978106", "0.5790498", "0.5788643", "0.57848793", "0.5765881", "0.5725443", "0.5709479", "0.5701298", "0.5694823", "0.56922585", "0.5683883", "0.56756234", "0.5645465", "0.5624749", "0.56015843", "0.5598688", "0.5594936", "0.5585717", "0.5583159", "0.5582825", "0.5572068", "0.5570267", "0.55691487", "0.55571645", "0.55260205", "0.55251974", "0.5510304", "0.55064225", "0.549926", "0.54810953", "0.5472606", "0.54695594", "0.5469356", "0.5466782", "0.5466459", "0.5461894", "0.5461214", "0.5454253", "0.54540795", "0.5451537", "0.5440887", "0.54377675", "0.5436222", "0.5429684", "0.54201716", "0.5419608", "0.5417298", "0.5415388", "0.5406894", "0.54027754", "0.540093", "0.540093", "0.540093", "0.5392736", "0.53880036", "0.53877205", "0.53863347", "0.538492", "0.53815067", "0.5380968", "0.537761", "0.53767574", "0.5367028", "0.5361165", "0.5359632", "0.5351755", "0.53469574", "0.53418344", "0.5341005", "0.53394336", "0.53393334", "0.5339158", "0.53303176", "0.5323909", "0.53105646", "0.5309753", "0.5306727", "0.53023404", "0.5293547", "0.5284902", "0.5281186", "0.52791035", "0.5271406" ]
0.6722839
0
Takes a results list and puts it in a pandas dataframe together with other relevant variables (runs, generations, and language class)
def language_stats_to_dataframe(results, n_runs, n_gens, possible_form_lengths): if len(possible_form_lengths) == 1: n_language_classes = 4 else: n_language_classes = 7 #TODO: or should this be 6 (i.e. collapsing the two different reduplication strategies?) column_proportion = np.array(results) if n_language_classes == 4 and column_proportion.shape[2] > n_language_classes: column_proportion_compositional_summed = np.zeros((n_runs, n_gens, n_language_classes)) for r in range(len(column_proportion_compositional_summed)): for g in range(len(column_proportion_compositional_summed[0])): column_proportion_compositional_summed[r][g] = np.array([column_proportion[r][g][0], column_proportion[r][g][1], column_proportion[r][g][2]+column_proportion[r][g][3], column_proportion[r][g][4]]) column_proportion = column_proportion_compositional_summed.flatten() else: column_proportion = column_proportion.flatten() column_runs = [] for i in range(n_runs): for j in range(n_gens): for k in range(n_language_classes): column_runs.append(i) column_runs = np.array(column_runs) column_generation = [] for i in range(n_runs): for j in range(n_gens): for k in range(n_language_classes): column_generation.append(j) column_generation = np.array(column_generation) column_type = [] for i in range(n_runs): for j in range(n_gens): if len(possible_form_lengths) == 1: column_type.append('degenerate') column_type.append('holistic') column_type.append('compositional') column_type.append('other') else: column_type.append('degenerate') column_type.append('holistic') column_type.append('holistic_diversify_signal') column_type.append('compositional') column_type.append('compositional_reduplicate_segments') column_type.append('compositional_reduplicate_whole_signal') column_type.append('other') data = {'run': column_runs, 'generation': column_generation, 'proportion': column_proportion, 'class': column_type} lang_class_prop_over_gen_df = pd.DataFrame(data) return lang_class_prop_over_gen_df
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_dataframe(result):\n # List of elements in the search result\n names = []\n snippet = []\n url = []\n \n # Append search results to list\n for j,item in enumerate(result):\n for i,element in enumerate(result[j]['items']):\n names.append(result[j]['items'][i]['title'])\n snippet.append(result[j]['items'][i]['snippet'])\n url.append(result[j]['items'][i]['link'])\n \n # Create a dataframe\n df = pd.DataFrame(list(zip(names, snippet,url)), \n columns =['name', 'snippet','url']) \n \n return df", "def make_results_df(results):\n max_val = max(x[1] for x in results)\n\n df = []\n for i in range(max_val + 1):\n df.append([])\n for j in range(max_val + 1):\n df[-1].append(results.get((i, j), np.nan))\n return pd.DataFrame(df)", "def prepare_wg_data(results):\n wg_df = pd.DataFrame(results)\n wg_df['search_engine'] = 'wg-gesucht.de'\n return wg_df", "def _make_results_dataframe(self):\n LOG.debug(\"Creating Results Dataframes.\")\n results_df = tfs.TfsDataFrame(index=self.twiss_df.index)\n results_df[\"S\"] = self.twiss_df[\"S\"]\n return results_df", "def create_df_recommendations(api_results):\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n for items in api_results['tracks']:\r\n try:\r\n track_name.append(items['name'])\r\n track_id.append(items['id'])\r\n artist.append(items[\"artists\"][0][\"name\"])\r\n duration.append(items[\"duration_ms\"])\r\n album.append(items[\"album\"][\"name\"])\r\n popularity.append(items[\"popularity\"])\r\n except TypeError:\r\n pass\r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n\r\n return df", "def collect_results( results_dir = \"experiments\" ) :\n #%%\n import pandas as pd\n exps_fn = os.listdir( results_dir )\n dics = []\n for fname in exps_fn :\n with open( results_dir + \"/\" + fname, \"rt\", encoding=\"utf8\" ) as f_out :\n dics.append( json.load( f_out ) )\n\n results_df = pd.DataFrame( dics )\n #%%\n return results_df", "def parseResults(result):\n # Split the results based on newline characters\n results_cut = result.text.split('\\n')[12:-49]\n # Initialize lists of the values to be parsed from results_cut \n visit_id = []\n name = []\n ra_hour = []\n ra_min = []\n ra_sec = []\n dec_deg = []\n dec_min = []\n dec_sec = []\n v_mag = []\n ra_motion = []\n dec_motion = []\n # Iterate through results_cut and append them to the respective lists\n for line in results_cut:\n visit_id.append(int(line[6:12]))\n name.append(line[12:36])\n ra_hour.append(int(line[38:40]))\n ra_min.append(int(line[41:43]))\n ra_sec.append(float(line[44:48]))\n dec_deg.append(int(line[49:52]))\n dec_min.append(int(line[53:55]))\n dec_sec.append(int(line[56:58]))\n try:\n v_mag.append(float(line[60:64]))\n except ValueError:\n # If there is no reported v_mag for the object, return -99\n v_mag.append(-99.0)\n ra_motion.append('%s%i' % (line[84], int(line[82:84])))\n dec_motion.append('%s%i' % (line[91], int(line[89:91])))\n # Initialize the pandas dataframe to be returned\n results_df = pd.DataFrame(np.array([visit_id, name, ra_hour, ra_min, ra_sec, \n dec_deg, dec_min, dec_sec, v_mag, \n ra_motion, dec_motion]).T, \n columns=['visit_id', 'name', 'ra_hour', 'ra_min', 'ra_sec', \n 'dec_deg', 'dec_min', 'dec_sec', 'v_mag', \n 'ra_motion', 'dec_motion'])\n # Add the lists to the dataframe\n results_df['visit_id'] = pd.to_numeric(results_df['visit_id'])\n results_df['ra_hour'] = pd.to_numeric(results_df['ra_hour'])\n results_df['ra_min'] = pd.to_numeric(results_df['ra_min'])\n results_df['ra_sec'] = pd.to_numeric(results_df['ra_sec'])\n results_df['dec_deg'] = pd.to_numeric(results_df['dec_deg'])\n results_df['dec_min'] = pd.to_numeric(results_df['dec_min'])\n results_df['dec_sec'] = pd.to_numeric(results_df['dec_sec'])\n results_df['v_mag'] = pd.to_numeric(results_df['v_mag'])\n results_df['ra_motion'] = pd.to_numeric(results_df['ra_motion'])\n results_df['dec_motion'] = pd.to_numeric(results_df['dec_motion'])\n \n return results_df", "def get_results(r):\n myDict = {}\n for name in r[\"results\"]:\n myDict[name[\"name\"]] = {\n \"rank\": name[\"rank\"],\n \"ticker\": name[\"ticker\"],\n \"upvotes\": name[\"upvotes\"],\n \"mentions\": name[\"mentions\"],\n \"mentions_24h_ago\": name[\"mentions_24h_ago\"],\n }\n df = pd.DataFrame.from_dict(myDict, orient=\"index\")\n df[\"rank\"] = df[\"rank\"].astype(int)\n df[\"upvotes\"] = df[\"upvotes\"].astype(int)\n df[\"mentions\"] = df[\"mentions\"].astype(int)\n df[\"mentions_24h_ago\"] = df[\"mentions_24h_ago\"].astype(int)\n\n df[\"delta_mentions_24h\"] = df[\"mentions\"] - df[\"mentions_24h_ago\"]\n df = df[~(df[\"upvotes\"] <= 1000)]\n df = df.sort_values(by=[\"delta_mentions_24h\"], ascending=False)\n return df", "def _build_results(self):\n results = {}\n cols = []\n for pol in POLLUTANTS:\n for adj in ADJUSTMENTS:\n cols.append(get_rate_column(pol, adjustment=adj, generated=False))\n cols.append(get_column(pol, adjustment=adj))\n cols.append(\"net_consumed_mwh\")\n for ba in self.regions:\n results[ba] = pd.DataFrame(\n index=self.generation.index, columns=cols, dtype=np.float64\n )\n return results", "def analyzeResults(self):\n results = [self.analyzeClusterPerformance(c) for c in self.clusterLabels]\n rDF = pd.DataFrame(results)\n self.resultList.append(rDF)", "def interpret_results(rules):\n df_res = rules.sort_values(by=['lift'], ascending=False)\n # df_res.head()\n return df_res", "def check_results_as_data_frame(check_to_check_results: Dict[Check, CheckResult]) -> DataFrame:\n check_names = []\n status = []\n descriptions = []\n for check_result in check_to_check_results.values():\n check_names.append(check_result.check)\n status.append(check_result.status)\n descriptions.append(check_result.description)\n return DataFrame(zip(check_names, status, descriptions), columns=[\"check_name\", \"status\", \"description\"])", "def export_results(self):\n problemIDs = list(set([result.problemID for result in self.results]))\n configIDs = list(set([result.configID for result in self.results]))\n\n labels = []\n labels.extend(TestResults._fields)\n labels.extend(SizeMetrics._fields) \n # Remove unused columns\n labels.remove(\"size_metrics\")\n labels.remove(\"problemID\")\n labels.remove(\"configID\")\n\n # output = pd.Panel(items=labels, major_axis=problemIDs, minor_axis=configIDs)\n multiindex = pd.MultiIndex.from_product([problemIDs, configIDs], names=[\"problems\", \"configs\"])\n\n output = pd.DataFrame(index=multiindex, columns=labels)\n output.columns.names = [\"stats\"]\n\n for result in self.results:\n problemID = result.problemID\n configID = result.configID\n for label in [label for label in TestResults._fields if label in labels]:\n output.loc[(problemID, configID), label] = getattr(result, label)\n for label in [label for label in SizeMetrics._fields if label in labels]:\n output.loc[(problemID, configID), label] = getattr(result.size_metrics, label)\n\n # Compute Statistics\n output.fillna(value=np.nan, inplace=True)\n output.sort_index(inplace=True)\n try:\n TestFramework.compute_mosek_error(output, \"opt_val\", \"mosek_config\")\n except (KeyError): # pragma: no cover\n print(\"TestFramework.compute_mosek_error: 'mosek_config' or 'opt_val' field not found.\")\n try:\n TestFramework.compute_performance(output, \"solve_time\")\n except (KeyError): # pragma: no cover\n print(\"TestFramework.compute_performance: 'solve_time' field not found.\")\n return output", "def get_pandas(self):\n return pd.DataFrame(self.results)", "def results_to_df(ary, ks, ns):\n \n # create columns as dictionaries\n results = {}\n results['algorithm'] = ['knn' for i in range(ary.size / 4)] + ['cnn' for j in range(ary.size / 4)]\n results['sample_size'] = ns * (2 * len(ks))\n k = []\n for ii in range(len(ks)):\n k += [ks[ii] for jj in range(len(ns))]\n results['k'] = k + k\n results['run_time'] = ary[0].reshape(60)\n results['accuracy'] = ary[1].reshape(60)\n \n return pd.DataFrame(results)", "def concat_all_evaluation_results(list_of_folders):\n\n\n train_eval_df_list = []\n val_eval_df_list = []\n train_val_eval_df_list = []\n\n\n for item in list_of_folders:\n path_to_eval_folder = os.path.join(EMBEDDING_DEST, item)\n files = os.listdir(path_to_eval_folder)\n\n for f in files:\n\n # for each evaluation result csv file, see whether it is from training set, or validation set, or training+validation\n if f.endswith(\"image_level_evaluation_result_top_tri.csv\"):\n\n if \"random\" in f:\n if \"random_training_validation\" in f:\n df = pd.read_csv(os.path.join(path_to_eval_folder, f))\n train_val_eval_df_list.append(df)\n\n elif \"random_training\" in f:\n df = pd.read_csv(os.path.join(path_to_eval_folder, f))\n train_eval_df_list.append(df)\n\n\n elif \"random_validation\" in f:\n df = pd.read_csv(os.path.join(path_to_eval_folder, f))\n val_eval_df_list.append(df)\n\n\n else:\n if \"triplet\" in f:\n df = pd.read_csv(os.path.join(path_to_eval_folder, f))\n train_val_eval_df_list.append(df)\n\n elif \"training\" in f:\n df = pd.read_csv(os.path.join(path_to_eval_folder, f))\n train_eval_df_list.append(df)\n\n elif \"validation\" in f:\n df = pd.read_csv(os.path.join(path_to_eval_folder, f))\n val_eval_df_list.append(df)\n\n\n # add 'training_' or 'validation_' to the column names of evaluation results coming from training and validation sets.\n # This is to be able to distinguish them in the final general csv file.\n\n columns = list(train_val_eval_df_list[0])\n train_columns = [\"training_\"+item for item in columns[1:]]\n train_columns = [columns[0]] + train_columns\n train_columns_dict ={}\n \n val_columns = [\"validation_\"+item for item in columns[1:]]\n val_columns = [columns[0]] + val_columns\n val_columns_dict ={}\n\n #train_and_val_columns = [\"train_and_validation_\"+item for item in columns[1:]]\n #train_and_val_columns = [columns[0]] + train_and_val_columns\n #train_and_val_columns_dict ={}\n\n\n for i in range(len(columns)):\n train_columns_dict[columns[i]] = train_columns[i]\n val_columns_dict[columns[i]] = val_columns[i]\n #train_and_val_columns_dict[columns[i]] = train_and_val_columns[i]\n\n\n concatenated_training_df = pd.concat(train_eval_df_list, sort=False)\n concatenated_training_df = concatenated_training_df.rename(columns=train_columns_dict)\n\n concatenated_validation_df = pd.concat(val_eval_df_list, sort=False)\n concatenated_validation_df = concatenated_validation_df.rename(columns=val_columns_dict)\n \n concatenated_train_and_validation_df = pd.concat(train_val_eval_df_list, sort=False)\n #concatenated_train_and_validation_df = concatenated_train_and_validation_df.rename(columns=train_and_val_columns_dict)\n\n\n concatenated_training_df.to_csv(os.path.join(EMBEDDING_DEST,\"compare_with_no_sz\", \"training_all_evaluation_result_top_tri.csv\"),index=None)\n concatenated_validation_df.to_csv(os.path.join(EMBEDDING_DEST, \"compare_with_no_sz\", \"validation_all_evaluation_result_top_tri.csv\"),index=None)\n concatenated_train_and_validation_df.to_csv(os.path.join(EMBEDDING_DEST,\"compare_with_no_sz\",\"training_and_validation_all_evaluation_result_top_tri.csv\"), index=None)\n\n # ---------\n # If you have columns on arguments, keep them in training but drop them in validation and train_and_val to prevent duplicates\n list_of_cols_in_validation_df = list(concatenated_validation_df)\n list_of_cols_in_train_val_df = list(concatenated_train_and_validation_df)\n args_cols = get_json_argument_list()\n\n args_cols_val = [\"validation_\"+item for item in args_cols]\n \n if len(list_of_cols_in_train_val_df) == len(list_of_cols_in_validation_df) and len(list_of_cols_in_train_val_df) > 7:\n concatenated_validation_df = concatenated_validation_df.drop(args_cols_val, axis=1, errors='ignore')\n concatenated_train_and_validation_df = concatenated_train_and_validation_df.drop(args_cols, axis=1, errors='ignore')\n\n\n # ---------\n\n all_three_df_list = [concatenated_training_df, concatenated_validation_df, concatenated_train_and_validation_df]\n concatenated_all_df = pd.concat(all_three_df_list, axis=1)\n concatenated_all_df.to_csv(os.path.join(EMBEDDING_DEST,\"compare_with_no_sz\", \"all_evaluation_result_top_tri.csv\"), index=None)", "def result_to_dataframe(data):\n letters, statistics = zip(*data)\n dataframe = pd.DataFrame(data=list(statistics), index=letters, columns=['SUM', 'SUM_OF_SQUARES', 'MAX', 'MIN', 'COUNT']).sort_index()\n dataframe['MEAN'] = dataframe['SUM'] / dataframe['COUNT']\n dataframe['VARIANCE'] = dataframe['SUM_OF_SQUARES'] / dataframe['COUNT'] - dataframe['MEAN']**2\n dataframe['STANDARD_DEVIATION'] = dataframe['VARIANCE']**0.5\n logging.info(\"Total datapoints read: {}.\".format(dataframe['COUNT'].sum()))\n return dataframe", "def get_results(self, methods: list = None):\n df = pd.DataFrame(self._results)\n if (methods is not None) & ('method' in df.columns):\n df = df.loc[[x in methods for x in df.method.values]]\n return df", "def run_tests():\n with open(FILENAME) as file:\n # Loads testing parameters from the yaml file.\n tests = yaml.safe_load(file)\n\n # create a dataframe to keep the results\n test_dict = tests['Tests']\n results = pd.DataFrame(test_dict)\n results['Last Average Score'] = \"\"\n results['No of Q-Learning episodes'] = \"\"\n\n # run experiments:\n for i, test in enumerate(test_dict):\n grid = Rooms(test[\"env_size\"], testing=True)\n learning = QLearning(grid, test[\"gamma\"], test[\"alpha\"], test[\"agent_start_pos\"])\n e_greedy = Policy(\"e-greedy\", test[\"epsilon\"], test[\"decay\"])\n greedy = Policy(policy_type=\"greedy\")\n experiment = Experiments(grid, learning, greedy, test[\"iters\"],\n test[\"agent_start_pos\"], test[\"test_no\"])\n\n for session in range(test[\"iters\"]):\n learning.run_multiple_episodes(test[\"batch_episodes\"], e_greedy)\n mean_reward = experiment.run_experiments(test[\"exp_per_batch\"])\n\n results.loc[i,'Last Average Score'] = mean_reward\n results.loc[i,'No of Q-Learning episodes'] = (session + 1) * test[\"batch_episodes\"]\n\n # save results to csv file\n filename = 'results/' + 'test_table.csv'\n results.to_csv(filename)\n\n # plot & save graphs\n experiment.generate_results(test[\"test_no\"], test)\n\n return results", "def create_df_saved_songs(api_results):\r\n #create lists for df-columns\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n #loop through api_results\r\n for items in api_results[\"items\"]:\r\n try:\r\n track_name.append(items[\"track\"]['name'])\r\n track_id.append(items[\"track\"]['id'])\r\n artist.append(items[\"track\"][\"artists\"][0][\"name\"])\r\n duration.append(items[\"track\"][\"duration_ms\"])\r\n album.append(items[\"track\"][\"album\"][\"name\"])\r\n popularity.append(items[\"track\"][\"popularity\"])\r\n except TypeError: \r\n pass\r\n # Create the final df \r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n return df", "def save_to_dataframe(self):\n titles, years, months, days, authors = list(), list(), list(), list(), list()\n for doc in self.results[\"documents\"]:\n titles.append(doc['title'])\n years.append(doc['year'])\n months.append(doc['month'])\n days.append(doc['day'])\n authors.append(doc['authors'])\n return pd.DataFrame({\"title\": titles, \"years\": years, \"months\": months, \"days\": days, \"author\": authors})", "def read_results(\n self,\n model_run_names: list,\n model_names: list,\n output_names: list,\n timesteps: list = None,\n decisions: list = None,\n time_decision_tuples: list = None,\n ):\n\n self.validate_names(model_run_names, model_names, output_names)\n\n results_dict = self._store.get_results(\n model_run_names,\n model_names[0],\n output_names,\n timesteps,\n decisions,\n time_decision_tuples,\n )\n\n # Keep tabs on the units for each output\n for model_run_name in model_run_names:\n for output_name in output_names:\n res = results_dict[model_run_name][output_name]\n self._output_units[res.name] = res.unit\n\n # For each output, concatenate all requested model runs into a single data frame\n formatted_frames = []\n for output_name in output_names:\n # Get each DataArray as a pandas data frame and concatenate, resetting the index to\n # give back a flat data array\n list_of_df = [results_dict[x][output_name].as_df() for x in model_run_names]\n names_of_df = [x for x in results_dict.keys()]\n\n formatted_frames.append(\n pd.concat(\n list_of_df, keys=names_of_df, names=[\"model_run\"]\n ).reset_index()\n )\n\n # Append the other output columns to the first data frame\n formatted_frame = formatted_frames.pop(0)\n output_names.pop(0)\n\n for other_frame, output_name in zip(formatted_frames, output_names):\n assert (formatted_frame[\"model_run\"] == other_frame[\"model_run\"]).all()\n assert (\n formatted_frame[\"timestep_decision\"] == other_frame[\"timestep_decision\"]\n ).all()\n formatted_frame[output_name] = other_frame[output_name]\n\n # Unpack the timestep_decision tuples into individual columns and drop the combined\n formatted_frame[[\"timestep\", \"decision\"]] = pd.DataFrame(\n formatted_frame[\"timestep_decision\"].tolist(), index=formatted_frame.index\n )\n\n formatted_frame = formatted_frame.drop(columns=[\"timestep_decision\"])\n\n # Now reorder the columns. Want model_run then timestep then decision\n cols = formatted_frame.columns.tolist()\n\n assert cols[0] == \"model_run\"\n cols.insert(1, cols.pop(cols.index(\"timestep\")))\n cols.insert(2, cols.pop(cols.index(\"decision\")))\n assert cols[0:3] == [\"model_run\", \"timestep\", \"decision\"]\n\n return formatted_frame[cols]", "def parse_query_result(self):\n results = self.jsonData['results']\n\n df = pd.DataFrame(results)\n df.drop(['rootSource', 'uri'], axis=1, inplace=True)\n\n return df", "def parse_query_result(self):\n results = self.jsonData['results']\n\n df = pd.DataFrame(results)\n df.drop(['rootSource', 'uri'], axis=1, inplace=True)\n\n return df", "def save_results(self):\n results = pd.concat([\n pd.DataFrame(self.IDs.cpu().numpy(), columns= ['ID']), \n pd.DataFrame(self.predicted_labels.cpu().numpy(), columns= ['predicted_label']),\n pd.DataFrame(self.correct_predictions.cpu().numpy(), columns= ['correct_prediction']),\n pd.DataFrame(self.epistemic_uncertainty.cpu().numpy(), columns= ['epistemic_uncertainty']), \n pd.DataFrame(self.aleatoric_uncertainty.cpu().numpy(), columns= ['aleatoric_uncertainty']), \n pd.DataFrame(self.total_uncertainty.cpu().numpy(), columns= ['total_uncertainty']), \n ], axis=1)\n\n create_results_directory()\n results.to_csv('results/{}_{}_results.csv'.format(self.__class__.__name__, datetime.datetime.now().replace(microsecond=0).isoformat()), index=False)", "def _load_results(self):\n\n _LOG.debug(\"stats colnames: %s\", \", \".join(self._stats_colnames))\n _LOG.debug(\"additional colnames: %s\", \", \".join(self._more_colnames))\n\n for res in self.rsts:\n _LOG.debug(\"hover colnames: %s\", \", \".join(self._hov_colnames[res.reportid]))\n\n colnames = []\n for colname in self._hov_colnames[res.reportid] + self._more_colnames:\n if colname in res.colnames_set:\n colnames.append(colname)\n\n csel = Trivial.list_dedup(self._stats_colnames + colnames)\n res.clear_filts()\n res.set_csel(csel)\n res.load_df()\n\n # We'll be dropping columns and adding temporary columns, so we'll affect the original\n # dataframe. This is more effecient than creating copies.\n self._mangle_loaded_res(res)", "def _process_results(self):\n self.portfolio.create_backtest_result_dataframe()\n stats = self._show_stats()\n return stats", "def create_pandas_dataframes():\n train, test = Email.load_emails_from_data()\n\n train_y = [int(t.is_spam) for t in train]\n test_y = [int(t.is_spam) for t in test]\n\n vocab = get_vocabulary_vector(train)\n print(\"[ INF ] Vocab Size:\", len(vocab))\n\n train = [t.vectorize_tokens(vocab) for t in train]\n test = [t.vectorize_tokens(vocab) for t in test]\n\n train = pd.DataFrame.from_records(train, columns=vocab)\n test = pd.DataFrame.from_records(test, columns=vocab)\n\n train['is_spam'] = train_y\n test['is_spam'] = test_y\n\n return train, test", "def create_df_top_songs(api_results):\r\n #create lists for df-columns\r\n track_name = []\r\n track_id = []\r\n artist = []\r\n album = []\r\n duration = []\r\n popularity = []\r\n #loop through api_results\r\n for items in api_results['items']:\r\n try:\r\n track_name.append(items['name'])\r\n track_id.append(items['id'])\r\n artist.append(items[\"artists\"][0][\"name\"])\r\n duration.append(items[\"duration_ms\"])\r\n album.append(items[\"album\"][\"name\"])\r\n popularity.append(items[\"popularity\"])\r\n except TypeError:\r\n pass\r\n # Create the final df \r\n df = pd.DataFrame({ \"track_name\": track_name, \r\n \"album\": album, \r\n \"track_id\": track_id,\r\n \"artist\": artist, \r\n \"duration\": duration, \r\n \"popularity\": popularity})\r\n\r\n return df", "def write_results(self, results):\n predictions = open('hmm_results.csv', 'w')\n predictions.write(\"Type,Prediction\")\n for type in results:\n if type == 'O':\n continue\n predictions.write(\"\\n\" + str(type) + \",\")\n for interval in results[type]:\n predictions.write(str(interval) + \" \")\n predictions.close()", "def extract_results(grid_search):\n results = grid_search.cv_results_.copy()\n params = pd.DataFrame(results.pop('params'))\n values = pd.DataFrame(results)\n values = values.loc[:, ~values.columns.str.contains('param_')]\n df = pd.concat([params, values], axis=1)\n df = df.set_index(list(params.columns))\n df = df.sort_values('rank_test_neg_mean_squared_error')\n return df", "def frame(self):\n microseconds = np.array(self.results['times']) * 1e6\n return pd.DataFrame(self.results, index=microseconds)", "def create_state_df(runs, iters=100000):\n df_st = pd.DataFrame()\n for l in runs.keys():\n for i in range(runs[l].shape[0]):\n df = pd.DataFrame()\n df[\"Step\"] = np.arange(iters)\n df[\"Maximum\"] = np.maximum.accumulate(runs[l][i].flatten())\n df[\"run-type\"] = \"Short Burst\" if float(l) > 1 else \"Biased Run\" if float(l) < 1 else \"Unbiased Run\"\n df[\"param\"] = \"b = {}\".format(l) if float(l) > 1 else \"q = {}\".format(l)\n df_st = df_st.append(df, ignore_index=True)\n return df_st", "def load_results(task):\n\n ALGRESULTS = \"./results/task%d_formulas.csv.gz\" % (task)\n MLRESULTS = \"./results/task%d_ml.csv.gz\" % (task)\n\n dftest = pd.read_csv(\"./dftest_task%d.csv\" % (task))\n\n dfalg = pd.read_csv(ALGRESULTS)\n dfml = pd.read_csv(MLRESULTS)\n dfnn = get_nns(task)\n dfml = dfml.rename(columns={\"Unnamed: 0\":\"algs\"})\n\n merged = pd.merge(dfalg, dfml, on=[\"mesaid\",\"linetime\",\"actValue\",\"gt\",\"gt_sleep_block\"]) #\n merged = pd.merge(merged, dfnn, on=[\"mesaid\",\"linetime\",\"actValue\",\"gt\",\"gt_sleep_block\"]) #\n merged = pd.merge(merged, dftest, on=[\"mesaid\",\"linetime\",\"gt\",\"gt_sleep_block\"]) #\n\n merged[\"time\"] = pd.to_datetime(merged[\"linetime\"])\n merged[\"always1\"] = 1\n merged[\"always0\"] = 0\n\n merged[\"sleep\"] = (~merged[\"wake\"].astype(np.bool)).astype(float)\n return merged", "def getResults():", "def __init__(self, results: List[Measurement]):\n self.results = results", "def run_tests():\n with open(FILENAME) as file:\n\n # Loads the test hyper-parameters as dictionaries.\n tests = yaml.safe_load(file)\n \n # create a dataframe to keep the results\n test_dict = tests['Tests']\n results = pd.DataFrame(test_dict)\n results[\"Episode\"] = \"\"\n results['Max average score'] = \"\"\n\n for i, test in enumerate(tests['Tests']):\n\n env = gym.make(test['env'])\n env.reset()\n\n actor_critic = ActorCritic(env, test['episodes'], test['max_score'], \n test['hidden_size'], test['gamma'], test['save'])\n\n ## run training \n best_score, episode, rew_hist = actor_critic.train()\n\n results.loc[i,'Episode'] = episode\n results.loc[i,'Max average score'] = best_score\n\n plot_graphs(test, rew_hist)\n\n # save results to csv file\n filename = 'results/' + 'test_table.csv'\n results.to_csv(filename)\n\n return results", "def group_results(results):\n equiv_classes = {}\n for tbl_prog, vis_spec in results:\n full_trace = vis_spec.eval()\n fronzen_trace = json.dumps(visual_trace.trace_to_table(full_trace), sort_keys=True)\n if fronzen_trace not in equiv_classes:\n equiv_classes[fronzen_trace] = []\n equiv_classes[fronzen_trace].append((tbl_prog, vis_spec))\n return equiv_classes", "def print_results(results):\n data = []\n for idx in results.index:\n item = results.loc[idx]\n row = (str(item.gid), \n re.sub(r'\\n', ' ', item.creators),\n re.sub(r'[\\n\\r]+', ' ', item.title), \n gut_utf8.format(item.gid))\n data.append(row)\n _ = [print('|'.join(row)) for row in data]\n print('#', len(data), \"items returned\")\n return data", "def record_results(results, scenario_name, output_file=\"ValidationResults.csv\"):\n import pandas as pd\n if os.path.isfile(output_file):\n table = pd.read_csv(output_file, index_col=0)\n else:\n table = pd.DataFrame({})\n metrics = {r[2]:r[0] for r in results}\n index = scenario_name\n metrics = pd.DataFrame(metrics, index=[index])\n table = metrics.combine_first(table)\n table = table[metrics.keys()]\n table.to_csv(output_file)", "def make_summary_tables( res ):\n\n # transform second table to csv and read this as a dataFrame\n result_fit_df = pd.read_csv(StringIO( res.tables[1].as_csv() ), sep=\",\",index_col=0)\n result_fit_df.columns = [i.strip() for i in result_fit_df.columns]\n result_fit_df.index = [i.strip() for i in result_fit_df.index]\n\n # first table is trickier because the data is spread on to columns, and there is title line\n L = res.tables[0].as_html().split('\\n')\n L.pop(1) # get rid of the title\n tmp = pd.read_html('\\n'.join(L) , header=None)[0] # read as a dataframe, but with 4 columns \n\n names = list(tmp[0]) + list(tmp[2])[:-2] # columns 0 and 2 are metric names\n values = list(tmp[1]) + list(tmp[3])[:-2] # columns 1 and 3 are the corresponding values\n # NB : I exclude the last 2 elements which are empty \n \n result_general_df = pd.DataFrame( {'Name': names , 'Value' : values}, index = names , columns=['Value'] )\n \n return result_general_df , result_fit_df", "def parse_results(results_file, measure_text):\r\n \r\n #Read in results file\r\n with open(results_file) as f_in:\r\n results_text = f_in.readlines()\r\n \r\n #Set-up output data frame\r\n results_df = pd.DataFrame(columns=('effect', 'time_window', 'electrodes'))\r\n \r\n r = -1 #data frame row index\r\n \r\n #Parse lines and read relevant results into data frame\r\n for i in range(len(results_text)):\r\n \r\n line = results_text[i]\r\n \r\n #Starting to parse a new simulation\r\n if 'SIMULATION SUMMARY' in line:\r\n r += 1\r\n \r\n #Parse basic simulation parameters\r\n elif 'Simulated experiments' in line:\r\n (field, value) = line.split('\\t')\r\n results_df.loc[r, 'n_experiments'] = int(value)\r\n \r\n elif 'Permutations' in line:\r\n (field, value) = line.split('\\t')\r\n results_df.loc[r, 'n_permutations'] = int(value)\r\n \r\n elif 'Sample size' in line:\r\n (field, value) = line.split('\\t')\r\n results_df.loc[r, 'n_subjects'] = int(value)\r\n \r\n elif 'Effect:' in line:\r\n (field, value) = line.split('\\t')\r\n results_df.loc[r, 'effect'] = os.path.basename(value).strip()\r\n \r\n elif 'Time window' in line:\r\n (field, value) = line.split('\\t')\r\n results_df.loc[r, 'time_window'] = value.strip()\r\n \r\n elif 'Electrodes:' in line:\r\n (field, value) = line.split('\\t')\r\n results_df.loc[r, 'electrodes'] = value.strip()\r\n \r\n elif 'Trials =' in line:\r\n (field, value) = line.split('\\t')\r\n results_df.loc[r, 'n_trials'] = int(value.split()[0])\r\n \r\n elif 'MEAN WINDOW' in line:\r\n (field, value) = results_text[i+1].split('\\t')\r\n results_df.loc[r, 'mean_amp'] = float(value)\r\n \r\n #Find correction method for current point in file\r\n elif 'UNCORRECTED' in line:\r\n method = 'uncorrected'\r\n elif 'SIDAK' in line:\r\n method = 'sidak'\r\n elif 'FMAX' in line:\r\n method = 'Fmax'\r\n elif 'CLUSTER 0.05' in line:\r\n method = 'cluster_05'\r\n elif 'CLUSTER 0.01' in line:\r\n method = 'cluster_01'\r\n elif 'BH' in line:\r\n method = 'BH'\r\n elif 'BY' in line:\r\n method = 'BY'\r\n elif 'BKY' in line:\r\n method = 'BKY'\r\n \r\n #Get the measure of interst and associate with method (from above)\r\n elif measure_text in line:\r\n (field, value) = line.split('\\t')\r\n results_df.loc[r, method] = float(value)\r\n \r\n return results_df", "def result_df(self, regex=None) -> pd.DataFrame:\n if regex:\n # get one random item from dict, and get keys from this random (dict) item\n # FIXME: how to do this better? - this is not efficient...\n keys = self.result[next(iter(self.result))].keys()\n\n if type(regex) == str:\n comp_regexe = re.compile(regex)\n columns = list(filter(comp_regexe.search, keys))\n else:\n columns = list(filter(regex.search, keys))\n\n df = pd.DataFrame.from_dict(self.result, orient='index')\n return df[columns]\n else:\n return pd.DataFrame.from_dict(self.result, orient='index')", "def logreg_results_to_pandas(common_molids_cache=False):\n results = ResultInDisk.collect_results_under_dir(MALARIA_LOGREGS_EXPERIMENT_ROOT,\n factory=malaria_result_factory)\n\n # --- molids cache\n molids_cache = None\n if common_molids_cache:\n rf_lab, rf_amb, rf_unl, rf_scr = malaria_logreg_fpt_providers(None)\n # Labelled molids\n lab_molids = rf_lab.ids()\n amb_molids = rf_amb.ids() # To prioritize confirmatory tests on labelled data\n # Unlabelled molids\n unl_molids = rf_unl.ids()\n scr_molids = rf_scr.ids()\n # Let's avoid the need to reread them...\n molids_cache = {\n 'lab': lab_molids,\n 'amb': amb_molids,\n 'unl': unl_molids,\n 'scr': scr_molids\n }\n\n results_dict_of_dicts = {}\n for result in results:\n if common_molids_cache:\n result.ids_cache = molids_cache # dodgy, rework with a copying constructor\n rdict = copy(result.info())\n rdict['result'] = result\n rdict['class_weight'] = 'uniform' if rdict['class_weight'] is None else rdict['class_weight']\n # Some more ad-hoc keys for the model\n rdict['num_present_folds'] = result.num_present_folds()\n rdict['auc_mean'] = result.auc_mean()\n rdict['enrichement5_mean'] = result.enrichement5_mean()\n # Some more ad-hoc keys for the fingerprint folder\n folder = result.fingerprint_folder()\n rdict['folder_seed'] = int(folder.seed) if folder is not None else -1\n rdict['folder_size'] = int(folder.fold_size) if folder is not None else 0\n # Add this result to the data frame\n results_dict_of_dicts[result.root_key()] = rdict\n\n return DataFrame(results_dict_of_dicts).T", "def make_output_df(self):\n df = pd.concat([pd.DataFrame(dat) for dat in [self.qdata, self.pdata]], axis=1)\n columns = np.hstack(([['{}{}'.format(x, c) for c in self.actions] for x in ['q', 'p']]))\n df.columns = columns\n df.insert(0, 'trial', np.arange(1, df.shape[0]+1))\n df['choice'] = self.choices\n df['feedback'] = self.feedback\n# r = np.array(self.bandits.rvalues)\n# p = np.array(self.bandits.preward)\n df['optimal'] = self.demand\n df.insert(0, 'agent', 1)\n self.data = df.copy()", "def add_to_table(self):\n if len(self.result) == 0:\n self.result = {self.title: [self.accuracy, self.f1, self.precision]}\n self.result = pd.DataFrame(self.result, index=['Accuracy', 'F-score', 'Precision'])\n return self.result\n else:\n conact = {self.title: [self.accuracy, self.f1, self.precision]}\n conact = pd.DataFrame(conact, index=['Accuracy', 'F-score', 'Precision'])\n self.result = pd.concat([self.result, conact], axis=1)\n return self.result", "def convert_to_data_frame(result, exp_name, nets, critic, loss, seed):\n label = \"{}, {}, {}\".format(nets, critic, loss)\n rows = list(\n zip(\n itertools.repeat(exp_name),\n itertools.repeat(nets),\n itertools.repeat(critic),\n itertools.repeat(loss),\n itertools.repeat(seed),\n result.iterations,\n [-loss for loss in result.testing_losses], # Loss -> bound.\n result.classification_accuracies,\n itertools.repeat(label)))\n df_eval = pd.DataFrame(\n rows,\n columns=(\"exp_name\", \"nets\", \"Critic\", \"Estimator\",\n \"run\", \"iteration\", \"bound_value\", \"accuracy\", \"label\"))\n\n df_eval[\"Estimator\"] = df_eval[\"Estimator\"].replace(\n to_replace={\n \"cpc\": \"$CPC$\",\n \"pcc\": \"$PCC$\",\n \"drfc\": \"$D-RFC$\",\n \"wpc\": \"$WPC$\"\n })\n df_eval[\"Critic\"] = df_eval[\"Critic\"].replace(\n to_replace={\n \"concat\": \"MLP\",\n \"separable\": \"Separable\",\n \"innerprod\": \"Inner product\",\n \"bilinear\": \"Bilinear\"\n })\n return df_eval", "def dwn_analysis_csv(request):\n data = []\n for i in results:\n data.append((i['sentence'], i['head'], i['tail'], i['pred_relation'], i['sent'], i['conf']))\n df = pd.DataFrame(data, columns=['Sentence', 'Head', 'Tail', 'Predicted Relation', 'Predicted Sentiment', 'Confidence'])\n df.to_csv(\"temp/analysis_results.csv\", index=False)\n \n return FileResponse(open('temp/analysis_results.csv','rb'))", "def dataframe(sorter, run_function):\n\n df = None\n for run in sorter:\n data = {\n 'mouse': [run.mouse],\n 'date': [run.date],\n 'run': [run.run],\n 'reversed': [flow.metadata.reversal(run.mouse) < run.date],\n 'dprime': [calc.performance.dprime(run.parent)],\n }\n\n t2p = run.trace2p()\n for cs in config.stimuli():\n for err in [0, 1]:\n evs = t2p.csonsets(cs, errortrials=err)\n csdata = deepcopy(data)\n for key in csdata:\n csdata[key] = csdata[key]*len(evs)\n\n csdata['frame'] = evs\n csdata['stimulus'] = [cs]*len(evs)\n csdata['error'] = [err]*len(evs)\n\n default = pd.DataFrame(csdata)\n date_df = run_function(run, default)\n\n if df is None:\n df = date_df\n else:\n df = pd.concat([df, date_df], ignore_index=True, sort=True)\n\n return df", "def aggregateResultsToDfResults(self, arrays=True, fillna=False):\n nan_value = np.nan\n # defines which variable types will be saved in the results dataframe\n SUPPORTED_TYPES = (float, int, np.ndarray, list)\n SCALAR_TYPES = (float, int)\n ARRAY_TYPES = (np.ndarray, list)\n\n logging.info(\"Aggregating results to `dfResults` ...\")\n for runId, parameters in tqdm.tqdm(self.dfResults.iterrows(), total=len(self.dfResults)):\n # if the results were previously loaded into memory, use them\n if hasattr(self, \"results\"):\n # only if the length matches the number of results\n if len(self.results) == len(self.dfResults):\n result = self.results[runId]\n # else, load results individually from hdf file\n else:\n result = self.getRun(runId)\n # else, load results individually from hdf file\n else:\n result = self.getRun(runId)\n\n for key, value in result.items():\n # only save floats, ints and arrays\n if isinstance(value, SUPPORTED_TYPES):\n # save 1-dim arrays\n if isinstance(value, ARRAY_TYPES) and arrays:\n # to save a numpy array, convert column to object type\n if key not in self.dfResults:\n self.dfResults[key] = None\n self.dfResults[key] = self.dfResults[key].astype(object)\n self.dfResults.at[runId, key] = value\n elif isinstance(value, SCALAR_TYPES):\n # save scalars\n self.dfResults.loc[runId, key] = value\n else:\n self.dfResults.loc[runId, key] = nan_value\n # drop nan columns\n self.dfResults = self.dfResults.dropna(axis=\"columns\", how=\"all\")\n\n if fillna:\n self.dfResults = self.dfResults.fillna(0)", "def dwn_all_saved_results(request):\n \n sources = []\n for i in Source.objects.filter(user=request.user):\n sources.append((i.source_id, i.datetime_extracted.strftime('%d/%m/%Y %H:%M'), i.source))\n \n data = []\n for s, timee, s_name in sources:\n objs = ExtractedRelation.objects.filter(source=s)\n for i in objs:\n data.append((i.sentence, i.head, i.tail, i.pred_relation, i.sentiment, i.conf, timee, s_name, i.rel_id, os.path.basename(i.ckpt)))\n \n df = pd.DataFrame(data, columns=['Sentence', 'Head', 'Tail', 'Predicted Relation', 'Predicted Sentiment', 'Confidence', 'Extraction Time', 'Source', 'rel_id', 'Checkpoint'])\n df.to_csv(\"temp/all_analysis_results.csv\", index=False)\n \n return FileResponse(open('temp/all_analysis_results.csv','rb'))", "def main(self, model=None):\n result = pd.DataFrame()\n if model is None:\n model = Model.default()\n for season in [2011, 2012, 2013]:\n print(season)\n r = Runner(Season(season), model)\n current = r.run()\n result = result.append(current)\n return result", "def load_results_runs(filename):\n # Load the necessary columns from the csv into panda\n data = pd.read_csv(filename)\n\n # Cleans the data\n data = data[['Run', 'Total Distance']]\n data['Run'] = pd.to_numeric(data['Run'])\n data['Total Distance'] = pd.to_numeric(data['Total Distance'])\n\n return data", "def creat_df(urlist):\n country = []\n head = []\n for i in range(len(urlist)):\n country.append(urlist[i][2])\n head.append(urlist[i][4])\n df = pd.DataFrame.from_dict({\"Country\": country, \"Head\": head})\n return df", "def get_results_frames(results_df, times):\n # coherence check:\n for t in times:\n assert results_df['t'].iloc[0] <= t <= results_df['t'].iloc[-1], \\\n 'time={} is outside the results_df range'.format(t)\n\n frames = pd.DataFrame(columns=results_df.columns)\n frames.loc[:, 't'] = times\n ignore_columns = {'t'}\n for col in results_df.columns:\n if col not in ignore_columns:\n vals_at_times = np.interp(times, results_df['t'], results_df[col])\n frames.loc[:, col] = vals_at_times\n return frames", "def loadDfResults(self, filename=None, trajectoryName=None):\n # chose HDF file to load\n filename = filename or self.HDF_FILE\n self.pypetTrajectory = pu.loadPypetTrajectory(filename, trajectoryName)\n self.nResults = len(self.pypetTrajectory.f_get_run_names())\n\n exploredParameters = self.pypetTrajectory.f_get_explored_parameters()\n\n # create pandas dataframe of all runs with parameters as keys\n logging.info(\"Creating `dfResults` dataframe ...\")\n niceParKeys = [p[11:] for p in exploredParameters.keys()]\n if not self.parameterSpace:\n niceParKeys = [p.split(\".\")[-1] for p in niceParKeys]\n self.dfResults = pd.DataFrame(columns=niceParKeys, dtype=object)\n for nicep, p in zip(niceParKeys, exploredParameters.keys()):\n self.dfResults[nicep] = exploredParameters[p].f_get_range()", "def GridSearchResultToDF(search):\n return(pd.concat([pd.DataFrame(data=search.cv_results_['params']),\n pd.DataFrame(data={'mean': search.cv_results_['mean_test_score'],\n 'std': search.cv_results_['std_test_score']}),\n pd.DataFrame(data={'mean_fit_time': search.cv_results_['mean_fit_time']})],\n axis = 1))", "def consolidate_results(path='./Data'):\n model_files = [load(os.path.join(path, f)) \n for f in os.listdir(path) if os.path.isfile(os.path.join(path, f)) and f.startswith('model_')]\n df_final = pd.DataFrame(columns=['model_name','train_accuracy','test_accuracy',\n 'macro_avg_precision','macro_avg_recall',\n 'macro_avg_f1-score','weighted_avg_precision',\n 'weighted_avg_recall','weighted_avg_f1-score'])\n for model_file in model_files:\n results = model_file['model_results']\n class_report = classification_report(results.category, results.pred, output_dict=True)\n df_final = df_final.append({'model_name':model_file['model_name'],\n 'train_accuracy':'{0:.2f}'.format(model_file['model_CV'].best_score_),\n 'test_accuracy':'{0:.2f}'.format(class_report['accuracy']),\n 'macro_avg_precision':class_report['macro avg']['precision'],\n 'macro_avg_recall':class_report['macro avg']['recall'],\n 'macro_avg_f1-score':class_report['macro avg']['f1-score'],\n 'weighted_avg_precision':class_report['weighted avg']['precision'],\n 'weighted_avg_recall':class_report['weighted avg']['recall'],\n 'weighted_avg_f1-score':class_report['weighted avg']['f1-score']\n },ignore_index=True)\n return(df_final)", "def _parse_suite(\n self, results: dict, extra_tags: dict = None\n ) -> List[BenchmarkResult]:\n # all results share a batch id\n batch_id = uuid.uuid4().hex\n\n parsed_results = []\n for result in results[\"benchmarks\"]:\n result_parsed = self._parse_benchmark(\n result=GoogleBenchmark(**result),\n batch_id=batch_id,\n extra_tags=extra_tags,\n )\n parsed_results.append(result_parsed)\n\n return parsed_results", "def parse_result_series(result):\n if isinstance(result, np.ndarray):\n return result\n\n if result is None or not len(result):\n return None\n\n dates, values = result\n return pd.DataFrame({0:dates.astype(int)/1000,1:values})", "def get_study_results():\n study_results_path = \"data/Study_results.csv\"\n df = pd.read_csv(study_results_path)\n return df", "def dataframe_to_language_stats(dataframe, n_runs, n_batches, n_gens, possible_form_lengths):\n if len(possible_form_lengths) == 1:\n n_language_classes = 4\n else:\n n_language_classes = 7 #TODO: or should this be 6 (i.e. collapsing the two different reduplication strategies?)\n proportion_column = np.array(dataframe['proportion'])\n proportion_column_as_results = proportion_column.reshape((n_runs*n_batches, n_gens, n_language_classes))\n return proportion_column_as_results", "def assemble_results(output_root):\n all_params = []\n for run in sorted(os.listdir(output_root)):\n run_dir = os.path.join(output_root, run)\n if os.path.isdir(run_dir):\n r = {'dir': run}\n json_file = os.path.join(run_dir, f'params.json')\n try:\n with open(json_file, 'r') as fp:\n d = json.load(fp)\n r.update(d)\n except (FileNotFoundError, KeyError) as e:\n print(str(e))\n print(f'removing {run_dir}')\n shutil.rmtree(run_dir)\n all_params.append(r)\n\n data = [pd.json_normalize(d, sep='__').to_dict(orient='records')[0] for d in all_params]\n\n # save CSV file of all results\n csv_file = os.path.join(output_root, 'results.csv')\n pd.DataFrame(data).to_csv(csv_file, index=False)\n\n # assemble list of params to check what's been done\n best_val_loss = float('inf')\n best_params = None\n all_params2 = []\n for d in all_params:\n if 'results' in d:\n if d['results']['valid']['loss'] < best_val_loss:\n best_val_loss = d['results']['valid']['loss']\n best_params = copy.deepcopy(d)\n del d['results']\n del d['dir']\n all_params2.append(d)\n\n if best_params is not None:\n print(f'best params: {best_params}')\n print(f'best val loss: {best_params[\"results\"][\"valid\"][\"loss\"]:.6f}')\n print(f'best val acc: {best_params[\"results\"][\"valid\"][\"accuracy\"]:.4%}')\n return all_params2, best_params", "def construct_results(\n self,\n source: str,\n results: Any,\n print_results: bool = True,\n ) -> List[Dict]:\n output = \"\"\n response = []\n\n for er in results:\n message = (\n f\"\\nSource: {source}\\n\"\n f\"Evaluated Action Name: {er['EvalActionName']}\\n\"\n f\"\\tEvaluated Resource name: {er['EvalResourceName']}\\n\"\n f\"\\tDecision: {er['EvalDecision']}\\n\"\n f\"\\tMatched statements: {er['MatchedStatements']}\"\n )\n r = {\n \"source\": source,\n \"action\": er['EvalActionName'],\n \"resource\": er['EvalResourceName'],\n \"decision\": er['EvalDecision'],\n \"matched_statements\": er['MatchedStatements'],\n }\n response.append(r)\n output += message\n if print_results:\n click.secho(output, fg=\"red\")\n\n return response", "def _convert_loops_to_df(text_loops):\n \n # Convert the list to a table\n df_loop = DataFrame(text_loops, columns=[u'text'])\n \n # Append columns which classify each row as a loop tag,\n # stop tag, label tab, or data values\n df_loop = _set_loops(df_loop)\n df_loop = _set_labels(df_loop)\n df_loop = _set_stops(df_loop)\n df_loop = _set_values(df_loop)\n \n # Extract the data into a table\n df_list = _extract_loop_data(df_loop)\n \n return df_list", "def combineAllListsIntoPandasDataframe(lss):\n # combine all lists into a dataframe\n #print lss\n df = p.DataFrame(range(len(lss[0])))\n for i in lss:\n try:\n df[i.columns[0]] = i\n except AttributeError, e:\n print \"{0}: {1}\".format(i, e)\n return df", "def concat_disease_evaluation_results(study, list_of_folders):\n\n eval_df_list = []\n\n for item in list_of_folders:\n if item == \"random\" or \"resnet\" in item:\n path_to_eval_folder = os.path.join(DATA_DIR, study, \"segmentation_embeddings\", item)\n base_case = True\n else:\n path_to_eval_folder = os.path.join(EMBEDDING_DEST, item)\n base_case = False\n\n files = os.listdir(path_to_eval_folder)\n\n for f in files:\n\n # for each evaluation result csv file, see whether it is from training set, or validation set, or training+validation\n\n if base_case == True:\n if f.endswith(\"image_level_evaluation_result_top_tri.csv\"):\n df = pd.read_csv(os.path.join(path_to_eval_folder, f))\n eval_df_list.append(df)\n else:\n if f.endswith(\"image_level_evaluation_result_top_tri.csv\") and study in f:\n df = pd.read_csv(os.path.join(path_to_eval_folder, f))\n eval_df_list.append(df)\n\n\n columns = list(eval_df_list[0])\n\n concatenated_df = pd.concat(eval_df_list, sort=False)\n \n concatenated_df.to_csv(os.path.join(EMBEDDING_DEST, study+ \"_all_evaluation_result_top_tri.csv\"),index=None)", "def extract_format_results_test(self):\n assert len(self.results.keys()) != 0\n TESTS = [\n {\n \"output\": [\n {\n \"file_name\": \"BSA1.mzML\",\n \"spec_id\": 1337,\n \"formula\": \"C(37)H(59)N(9)O(16)\",\n \"scaling_factor\": 100,\n \"score\": 1,\n \"charge\": 2,\n },\n {\n \"file_name\": \"BSA1.mzML\",\n \"spec_id\": 1338,\n \"formula\": \"C(37)H(59)N(9)O(16)\",\n \"scaling_factor\": 100,\n \"score\": 0.9,\n \"charge\": 2,\n },\n {\n \"file_name\": \"BSA2.mzML\",\n \"spec_id\": 1337,\n \"formula\": \"C(43)H(75)N(15)O(17)S(2)\",\n \"scaling_factor\": 10,\n \"score\": 1,\n \"charge\": 3,\n },\n ]\n }\n ]\n for test_dict in TESTS:\n values = self.results.format_all_results()\n\n assert isinstance(values, pd.DataFrame)\n\n for out_data in test_dict[\"output\"]:\n result = values.loc[\n (values[\"file_name\"] == out_data[\"file_name\"])\n & (values[\"spec_id\"] == out_data[\"spec_id\"])\n ]\n assert (result[\"formula\"] == out_data[\"formula\"]).all()\n assert (result[\"scaling_factor\"] == out_data[\"scaling_factor\"]).all()\n assert (result[\"score\"] == out_data[\"score\"]).all()\n assert (result[\"charge\"] == out_data[\"charge\"]).all()", "def experiment_list_table_format(result):\n table = []\n for item in result:\n table.append(experiment_show_table_format(item))\n return table", "def df(self) -> \"pandas.DataFrame\":\n titles = []\n comments = []\n alternative_codes = []\n for cat in self.values():\n titles.append(cat.title)\n comments.append(cat.comment)\n alternative_codes.append(cat.codes[1:])\n return pandas.DataFrame(\n index=list(self.keys()),\n data={\n \"title\": titles,\n \"comment\": comments,\n \"alternative_codes\": alternative_codes,\n },\n )", "def FetchQueryResultToDF(data, col_name: List[str]) -> pd.DataFrame:\r\n result = []\r\n for row in data:\r\n to_be_append = []\r\n for col in row:\r\n to_be_append.append(col)\r\n result.append(to_be_append)\r\n df = pd.DataFrame(result, columns=col_name)\r\n print(df)\r\n return df", "def parse_multeval_results_table(fname, task, testset):\n result = []\n\n def parse_line(line, headers):\n values = list(filter(\n lambda s: s, map(lambda s: s.strip(), line.split(' '))))\n return OrderedDict({k: v for k, v in zip(headers, values)})\n\n def convert_to_link(sys_name):\n if sys_name.startswith('baseline:'):\n return sys_name\n url = url_for('results', task=task, testset=testset,\n system=sys_name)\n return f'<a href=\"{url}\">{sys_name}</a>'\n\n with open(fname) as f:\n header = f.readline().strip()\n n_runs = int(header.split()[4][-1])\n metrics = header.split()[5:]\n # add description of parentheses fields\n metrics = [f'{m} <em>(\\u03C3-sel/\\u03C3-opt/p)</em>' for m in metrics]\n headers = [f'SYSTEM ({n_runs} runs)'] + metrics\n # Skip empty line\n f.readline()\n # Get baseline system\n result.append(parse_line(f.readline().strip(), headers))\n baseline_name = result[0][headers[0]].split()[-1]\n for line in f:\n line = line.strip()\n if line:\n system = parse_line(line, headers)\n # Skip double baseline\n if system[headers[0]] != baseline_name:\n result.append(system)\n\n return pd.DataFrame.from_dict(result).to_html(\n index=False, border=0, justify='left', escape=False,\n classes=\"display compact row-border multeval\",\n formatters={\n # Convert system names to links\n headers[0]: lambda s: convert_to_link(s)}), baseline_name", "def load_stats_dataframe(files, aggregated_results=None):\n if os.path.exists(aggregated_results) and all(\n [os.path.getmtime(f) < os.path.getmtime(aggregated_results) for f in files]):\n return pd.read_pickle(aggregated_results)\n\n df = pd.DataFrame()\n for f in files:\n tmp_dict = pd.read_pickle(f)\n tmp_dict['emb_size'] = f.split('_')[2]\n tmp_dict['negative_ratio'] = f.split('_')[4]\n tmp_dict['batch_size'] = f.split('_')[6]\n tmp_dict['epochs'] = f.split('_')[8]\n tmp_dict['classification'] = f.split('_')[-1].split('.')[0]\n\n tmp_df = pd.DataFrame.from_dict(tmp_dict)\n df = pd.concat([df, tmp_df])\n\n if aggregated_results:\n df.to_pickle(aggregated_results)\n\n return df", "def create_df(files_list=my_files):\n\n all_records = list()\n\n for file in files_list:\n all_records += zr_parser(path.join(my_dir, file))\n\n return pd.DataFrame(all_records)", "def _compute_TTR(self) -> pd.DataFrame:\n\n # user options\n res = self.options.resolution\n dim = self.options.dimension\n\n # sample the corpus\n x_choices = (np.arange(res) + 1) / res\n TTR = [self.sample(x=x).as_datarow(dim) for x in x_choices]\n\n # save to self.TTR as dataframe\n colnames = [\"m_tokens\", \"n_types\", \"alpha\", \"gamma\"]\n if dim is not None:\n colnames += [\"lego_\" + str(x) for x in range(dim)]\n TTR = pd.DataFrame(TTR, columns=colnames)\n\n # types *not* drawn\n TTR.lego_0 = self.N - TTR.n_types\n\n # return\n return TTR", "def results(query, model): \n # use model to predict classification for query\n classification_labels = model.predict([query])[0]\n classification_results = dict(zip(df.columns[4:], classification_labels))\n return classification_results", "def _transform_results(self) -> List[BenchmarkResult]:\n with open(self.result_file, \"r\") as f:\n raw_results = json.load(f)\n\n parsed_results = []\n for suite in raw_results[\"suites\"]:\n parsed_results += self._parse_suite(\n results=suite,\n extra_tags={\"suite\": suite[\"name\"], \"source\": \"cpp-micro\"},\n )\n\n return parsed_results", "def convert_response_to_df(response):\n\n list = []\n\n for report in response.get('reports', []):\n columnHeader = report.get('columnHeader', {})\n dimensionHeaders = columnHeader.get('dimensions', [])\n metricHeaders = columnHeader.get('metricHeader', {}).get('metricHeaderEntries', [])\n rows = report.get('data', {}).get('rows', [])\n sampled = True if report.get('samplesReadCounts') else False\n\n for row in rows:\n dict = {}\n dict['sampling'] = sampled\n dimensions = row.get('dimensions', [])\n dateRangeValues = row.get('metrics', [])\n\n for header, dimension in zip(dimensionHeaders, dimensions):\n dict[header] = dimension\n\n for i, values in enumerate(dateRangeValues):\n for metric, value in zip(metricHeaders, values.get('values')):\n if ',' in value or '.' in value:\n dict[metric.get('name')] = float(value)\n else:\n dict[metric.get('name')] = int(value)\n list.append(dict)\n\n df = pd.DataFrame(list)\n return df", "def json2pd(json_results):\n\n data = []\n for line in json_results.split(\"\\n\"):\n if line:\n data.append(json.loads(line))\n\n df = pd.DataFrame(data)\n # process some of the fields\n df.timestamp = pd.to_datetime(df.timestamp, unit=\"s\")\n # drop rows whose \"metric\" is \"Timestamp\"\n df = df[[\"Timestamp\" not in x for x in df.metric]]\n # Set a multiindex\n df = df.set_index([\"test\", \"metric\", \"timestamp\"])\n # Keep only some columns\n df = df[[\"labels\", \"value\", \"unit\", \"run_uri\"]]\n return df", "def _prepare_geocode_result(results):\n # Prepare the data for the DataFrame as a dict of lists\n d = defaultdict(list)\n index = []\n\n for i, s in iteritems(results):\n address, loc = s\n\n # loc is lat, lon and we want lon, lat\n if loc is None:\n p = Point()\n else:\n p = Point(loc[1], loc[0])\n\n if address is None:\n address = np.nan\n\n d['geometry'].append(p)\n d['address'].append(address)\n index.append(i)\n\n df = gpd.GeoDataFrame(d, index=index)\n df.crs = from_epsg(4326)\n\n return df", "def make_all_html_results(cmd, folder_names = [], jobs=[]):\n run = cmd.results.current['run_name']\n if not os.path.exists(pjoin(cmd.me_dir, 'HTML', run)):\n os.mkdir(pjoin(cmd.me_dir, 'HTML', run))\n \n unit = cmd.results.unit\n P_text = \"\" \n Presults = collect_result(cmd, folder_names=folder_names, jobs=jobs)\n \n for P_comb in Presults:\n P_text += P_comb.get_html(run, unit, cmd.me_dir) \n P_comb.compute_values()\n if cmd.proc_characteristics['ninitial'] == 1:\n P_comb.write_results_dat(pjoin(cmd.me_dir, 'SubProcesses', P_comb.name,\n '%s_results.dat' % run))\n \n Presults.write_results_dat(pjoin(cmd.me_dir,'SubProcesses', 'results.dat')) \n \n fsock = open(pjoin(cmd.me_dir, 'HTML', run, 'results.html'),'w')\n fsock.write(results_header)\n fsock.write('%s <dl>' % Presults.get_html(run, unit, cmd.me_dir))\n fsock.write('%s </dl></body>' % P_text)\n\n return Presults.xsec, Presults.xerru", "def generate_report(path_to_results: str, save_path: str, format: str) -> None:\n path_to_results = (\n path_to_results if path_to_results[-1] != \"/\" else path_to_results[:-1]\n )\n files = glob.glob(f\"{path_to_results}/*\")\n\n if len(files) == 0:\n raise Exception(f\"There were no result files in the path {path_to_results}\")\n\n raw_results = []\n for result_file in files:\n with open(result_file, \"rb\") as handle:\n raw_results.append(pickle.load(handle))\n\n results_dict = []\n for raw in raw_results:\n runtime = np.mean([i[\"runtime\"] for i in raw[\"results\"]])\n memory = np.mean(\n [\n i[\"memory_stats\"][\"max_memory\"]\n for i in raw[\"results\"]\n if i[\"memory_stats\"] is not None\n ]\n )\n result = {\n \"layer\": raw[\"layer\"],\n \"batch_size\": raw[\"batch_size\"],\n \"num_runs\": raw[\"num_runs\"],\n \"num_repeats\": raw[\"num_repeats\"],\n \"forward_only\": raw[\"forward_only\"],\n \"gsm_mode\": raw[\"gsm_mode\"],\n \"runtime\": runtime,\n \"memory\": memory,\n }\n results_dict.append(result)\n\n results = pd.DataFrame(results_dict)\n\n results[\"gsm_mode\"][results[\"layer\"].str.startswith(\"dp\")] = (\n \"dp_\" + results[\"gsm_mode\"]\n )\n results[\"layer\"] = results[\"layer\"].str.replace(\"dp\", \"\")\n\n pivot = results.pivot_table(\n index=[\"batch_size\", \"num_runs\", \"num_repeats\", \"forward_only\", \"layer\"],\n columns=[\"gsm_mode\"],\n values=[\"runtime\"],\n )\n\n def add_ratio(df, metric, variant):\n if variant not in df.columns.get_level_values(\"gsm_mode\"):\n for ametric in df.columns.get_level_values(0):\n df[(ametric, variant)] = np.nan\n\n df[(metric, f\"{variant}/baseline\")] = (\n df.loc[:, (metric, variant)] / df.loc[:, (metric, \"baseline\")]\n )\n\n if \"baseline\" in results[\"gsm_mode\"].tolist():\n for m in set(results[\"gsm_mode\"].tolist()) - {\"baseline\"}:\n add_ratio(pivot, \"runtime\", m)\n pivot.columns = pivot.columns.set_names(\"value\", level=1)\n\n output = pivot.sort_index(axis=1).sort_values(\n [\"batch_size\", \"num_runs\", \"num_repeats\", \"forward_only\"]\n )\n if format == \"csv\":\n output.to_csv(save_path)\n else:\n output.to_pickle(save_path)", "def retrieve_multiple_time_series(self,run='latest',run_data=None,criteria={},timestep='daily',name_fn=name_element_variable):\n if timestep==\"daily\":\n suffix = \"\"\n else:\n suffix = \"/aggregated/%s\"%timestep\n\n if run_data is None:\n run_data = self.retrieve_run(run)\n\n retrieved={}\n def name_column(result):\n col_name = name_fn(result)\n if col_name in retrieved:\n i = 1\n alt_col_name = '%s %d'%(col_name,i)\n while alt_col_name in retrieved:\n i += 1\n alt_col_name = '%s %d'%(col_name,i)\n col_name = alt_col_name\n return col_name\n\n units_store = {}\n for result in run_data['Results']:\n if self.result_matches_criteria(result,criteria):\n d = self.retrieve_json(result['TimeSeriesUrl']+suffix)\n result.update(d)\n col_name = name_column(result)\n# raise Exception(\"Duplicate column name: %s\"%col_name)\n if 'Events' in d:\n retrieved[col_name] = d['Events']\n units_store[col_name] = result['Units']\n else:\n all_ts = d['TimeSeries']\n for ts in all_ts:\n col_name = name_column(ts)\n units_store[col_name] = ts['Units']\n\n vals = ts['Values']\n s = self.parse_veneer_date(ts['StartDate'])\n e = self.parse_veneer_date(ts['EndDate'])\n if ts['TimeStep']=='Daily':\n f='D'\n elif ts['TimeStep']=='Monthly':\n f='M'\n elif ts['TimeStep']=='Annual':\n f='A'\n dates = pd.date_range(s,e,freq=f)\n retrieved[col_name] = [{'Date':d,'Value':v} for d,v in zip(dates,vals)]\n # Multi Time Series!\n\n result = self._create_timeseries_dataframe(retrieved)\n for k,u in units_store.items():\n result[k].units = u\n\n return result", "def results():\n df = mdr.elo_ratings.sort_values([\"elo\", \"matches\"], ascending=False)\n df[\"score\"] = (1 / (1 + 10**((1500 - df[\"elo\"]) / 400))) * 100\n df = df[[\"items\", \"score\", \"matches\"]]\n\n html_formatted = df.to_html(classes=[\"table\", \"table-dark\", \"table-hover\"], index=False, float_format=\"%.1f\")\n\n return render_template('results.html', table=html_formatted)", "def run_test(self):\n self.output_analytics = self.run_inference()\n self.output_df = pd.DataFrame(self.output_analytics)", "def get_df_all_results(self, file):\n # read csv into dataframe\n df = pd.read_csv(file)\n # rename columns\n names = [\"index\", \"samp1\", \"samp2\", \"es\", \"sd1\", \"sd2\", \"k\", \"perm\",\n \"t_test\"]\n df.columns = names\n return df", "def get_data( filepath_query, filepath_results ):\n with open( filepath_query, 'r' ) as query_file:\n query = json.load( query_file )\n \n query_text = query['query']['multi_match']['query']\n query_scores = query['nlp_scores']\n query_data = {\n 'query_text' : query_text,\n 'bias_score' : query_scores['bias_score'],\n 'vocab_richness' : query_scores['stylo_scores']['vocab_richness'],\n 'hapax_legomena' : query_scores['stylo_scores']['hepax_legomena'],\n 'wordlength' : query_scores['stylo_scores']['readability_measures']['average_wordlength'],\n 'sentlength' : query_scores['stylo_scores']['readability_measures']['average_sentlength'],\n 'spelling_errors' : query_scores['stylo_scores']['spelling_errors'],\n 'topics' : query_scores['topics']\n }\n\n with open( filepath_results ) as results_file:\n results = json.load( results_file )\n \n results_data = []\n for doc in results:\n argID = doc['_source']['argsMeID']\n premise = doc['_source']['premise']\n average_wordlength = doc['nlp_scores']['stylo_scores']['readability_measures']['average_wordlength']\n average_sentlength = doc['nlp_scores']['stylo_scores']['readability_measures']['average_sentlength']\n bias_score = doc['nlp_scores']['bias_score']\n bias_distance = doc['bias_distance']\n stylo_distance = doc['stylo_distance']\n topic_match_count = doc['topic_match_count']\n old_score = doc['old_score']\n new_score = doc['new_score']\n scoring_distance = doc['scoring_distance']\n old_rank = doc['old_rank']\n new_rank = doc['new_rank']\n \n doc_data = {\n 'argID' : argID,\n 'premise' : premise,\n 'wordlength' : average_wordlength,\n 'sentlength' : average_sentlength,\n 'bias_score' : bias_score,\n 'bias_distance' : bias_distance,\n 'stylo_distance' : stylo_distance,\n 'topic_match_count' : topic_match_count,\n 'old_score' : old_score,\n 'new_score' : new_score,\n 'scoring_distance' : scoring_distance,\n 'old_rank' : old_rank,\n 'new_rank' : new_rank\n }\n results_data.append( doc_data )\n\n data_tuple = ( query_data, results_data )\n return data_tuple", "def construct_df():\n iterable = [['approach', 'contact', 'retract', 'pause'], ['force', 'height']]\n index = pd.MultiIndex.from_product(iterable, names=['segment', 'channel'])\n return pd.DataFrame(columns=index)", "def compute_metrics(self, results: list) -> dict:", "def gather_data_from_multiple_runs(folder_path, runs, dataset=\"test\", tag='train/cross_entropy'):\n\n series = []\n for run in runs:\n path = os.path.join(folder_path,run,dataset)\n steps,values = extract_from_all_eventfiles(path, tag)\n print(\"run: {} extracted {} values\".format(run,len(values))) \n\n if len(values) > 0:\n\n s = pd.Series(data=values, index=steps,name=run).sort_index()\n # drop duplicates\n s = s[~s.index.duplicated(keep='last')]\n\n #.drop_duplicates(keep='last')\n series.append(s)\n \n return pd.concat(series,axis=1,join=\"outer\")", "def get_rater_dataframe(parsed_dir, rater_name):\n print('Converting to dataframe: ', rater_name)\n list_df = []\n for parsed_files in parsed_dir:\n file_name = parsed_files[0]\n annotations = parsed_files[1]\n tmp_df = pd.DataFrame(annotations, columns=['raw_text','annotation_list'])\n tmp_df['file_name'] = file_name\n tmp_df['rater_name'] = rater_name\n list_df.append(tmp_df)\n df = pd.concat(list_df)\n\n df = mark_empty_annotations(df)\n df = normalize_annotations(df)\n return(df)", "def analyze_results(results): #, result_nonprivate):\n res_dimensions = zip(*results)\n mean, std = [], []\n \n for resdim in res_dimensions:\n mean.append ( numpy.average(resdim) )\n std.append ( numpy.std(resdim) )\n\n return mean, std", "def get_results(self):\r\n r ={\r\n 'Gp_s' :np.array(self.Gp_guess_s), #Means of the posteriors\r\n 'Gm_s' :np.array(self.Gm_guess_s), #Means of the posteriors\r\n 'eGp_s':np.array(self.eGp_guess_s) , #Standard deviations of the posteriors\r\n 'eGm_s':np.array(self.eGm_guess_s) , #Standard deviations of the posteriors\r\n 'covGpGm_s':np.array(self.cov_GpGm_s) , #Covariances of the posteriors\r\n 't_tot_pulseSequences_s':np.array(self.t_tot_pulseSequences_s), #Time it takes for each batch of pulse sequences\r\n 't_tot_process_s':np.array(self.t_tot_process_s), #CPU time it takes for processing the Bayes stuff at each iteration\r\n 't_probe_p_s':np.array(self.t_probe_p_s), # Times probed for the diff p\r\n 't_probe_m_s':np.array(self.t_probe_m_s), # Times probed for the diff m\r\n 't_pulseSequences_s':np.array(self.t_pulseSequences_s),\r\n 't_process_s':np.array(self.t_process_s),\r\n 'iteration_s':np.array(self.nb_iteration_s), #Number of iterations performed\r\n 'R_tot_s':np.array(self.R_tot_s) #Total number of readout performed\r\n }\r\n return r", "def get_results_df(fname, problem):\n t = '\\t'\n \n # Cols to add:\n val_cols = ['Actions','Expansions','GoalTests','NewNodes','PlanLength','ElapsedSeconds']\n err = ''\n df = pd.read_csv(fname, sep=t)\n if df.shape[0] < len(val_cols):\n err = f'Data for {fname.name} is incomplete.'\n return None, err\n \n # Rename cols: c (temp) -> Searcher\n df.columns = ['c', 'Searcher']\n # Add new cols & reindex\n df = df.reindex(columns = df.columns.tolist() + val_cols)\n \n # Populate new cols according to row with search name:\n sr = df.loc[df.c == 'Searcher', 'Searcher'] \n for (idx, sr_row) in sr.items():\n j = idx\n for c in df.columns[2:].tolist():\n j += 1\n if c == 'ElapsedSeconds':\n df.loc[idx, c] = float(df.loc[j, 'Searcher'])\n else:\n df.loc[idx, c] = int(df.loc[j, 'Searcher'])\n\n df.dropna(inplace=True)\n # Add a minute column:\n df['Minutes'] = np.round(df.ElapsedSeconds/60, 3)\n \n # Replace values of 1st col with problem name & update col name:\n df['c'] = problem\n df.rename(columns={'c': 'Problem'}, inplace=True)\n df.reset_index(drop=True, inplace=True)\n \n return df, ''", "def create_dataframe(self):\n sessions = pandas.DataFrame().from_dict(self.values)\n sessions_lists = pandas.DataFrame().from_dict(self.lists)\n return sessions, sessions_lists", "def interpret_results(self):\n rows = []\n headers = [\"option\", \"\"] + self.results_metadata[\"names\"]\n monitored_values = []\n\n # Construct the rows, using the metadata from the results dict\n for v in self.results_dict.values():\n config_str = \" \".join(v[\"configuration\"])\n vals, stds = [], []\n for i, name in enumerate(self.results_metadata[\"names\"]):\n val, std = self._avg_sd_from_list(v[name])\n vals.append(val)\n stds.append(std)\n if i in self.results_metadata[\"indices_to_monitor\"]:\n monitored_values.append(val)\n rows.append([config_str, \"mean\"] + [str(i) for i in vals])\n rows.append([\"\", \"std dev\"] + [str(i) for i in stds])\n\n # Now go through monitored values, finding the best and adding a '*'\n n_monitored = len(self.results_metadata[\"indices_to_monitor\"])\n for i in range(n_monitored):\n vals = monitored_values[i::n_monitored]\n if self.results_metadata[\"best_criterion\"][i] == \"max\":\n best_idx = vals.index(max(vals)) * 2 # *2 to skip std rows\n elif self.results_metadata[\"best_criterion\"][i] == \"min\":\n best_idx = vals.index(min(vals)) * 2 # *2 to skip std rows\n rows[best_idx][self.results_metadata[\"indices_to_monitor\"][i] + 2] += \"*\"\n # line above, 2 is to offset first two columns in table\n\n return simple_table(rows, headers)", "def _process_extraction_results(self, result, metapaths, start_ids, end_ids, start_name, end_name,\n return_sparse=False, sparse_df=True, verbose=False):\n from itertools import product\n\n if return_sparse:\n # Turn each result matrix into a series\n if verbose:\n print('\\nReshaping Result Matrices...')\n time.sleep(0.5)\n\n size = result[0].shape[0]*result[0].shape[1]\n if verbose:\n result = [mt.reshape(res, (size, 1)) for res in tqdm(result)]\n else:\n result = [mt.reshape(res, (size, 1)) for res in result]\n\n if verbose:\n print('Stacking columns...')\n result = hstack(result)\n\n if sparse_df:\n if verbose:\n # Past all the series together into a DataFrame\n print('\\nGenerating DataFrame...')\n result = pd.SparseDataFrame(result, columns=metapaths, default_fill_value=0.0)\n\n start_end_df = pd.DataFrame(list(product(start_ids, end_ids)), columns=[start_name, end_name])\n\n # Return a list of the metapath names that indicies correspond to result columns\n if not sparse_df:\n return (start_end_df, metapaths), result\n\n return start_end_df, result\n\n # Turn each result matrix into a series\n if verbose:\n print('\\nFormatting results to series...')\n time.sleep(0.5)\n\n # Currently running in series. Extensive testing has found no incense in speed via Parallel processing\n # However, parallel usually results in an inaccurate counter.\n if verbose:\n for i in tqdm(range(len(metapaths))):\n result[i] = mt.to_series(result[i], name=metapaths[i]).reset_index(drop=True)\n else:\n for i in range(len(metapaths)):\n result[i] = mt.to_series(result[i], name=metapaths[i]).reset_index(drop=True)\n\n # Paste all the series together into a DataFrame\n if verbose:\n print('\\nConcatenating series to DataFrame...')\n start_end_df = pd.DataFrame(list(product(start_ids, end_ids)), columns=[start_name, end_name])\n\n return start_end_df, pd.concat(result, axis=1)", "def make_results(self):\n statistic_value, p_value = self.stats\n accept_hypothesis = self.accept_hypothesis(statistic_value)\n\n return FrequentistTestResults(\n control=self.comparison.d2,\n variation=self.comparison.d1,\n delta=self.comparison.delta,\n delta_relative=self.comparison.delta_relative,\n effect_size=self.comparison.effect_size,\n alpha=self.comparison.alpha,\n power=self.comparison.power,\n confidence_interval=self.ci,\n test_statistic=self.test_statistic,\n statistic_value=statistic_value,\n p_value=p_value,\n df=None,\n hypothesis=self.hypothesis_text,\n accept_hypothesis=accept_hypothesis,\n inference_procedure=self,\n warnings=self.comparison.warnings\n )", "def collect_dataset_experiment_results(ray_task_list: list):\n res_list = []\n total_jobs = len(ray_task_list)\n logger.info('Collecting jobs. total_jobs={}'.format(total_jobs))\n for job_num in range(total_jobs):\n t1 = time.time()\n ready_id, ray_task_list = ray.wait(ray_task_list)\n res_i = ray.get(ready_id[0])\n res_list.append(res_i)\n\n # Report\n dataset_name = res_i['dataset_name'][0]\n n_trainset, n_valset, n_testset = res_i['trainset_size'][\n 0], res_i['valset_size'][0], res_i['testset_size'][0]\n logger.info('[{:04d}/{}] {}. Size [train val test]=[{:03d} {} {}] in {:3.1f}s.'.format(\n job_num, total_jobs - 1, dataset_name, n_trainset, n_valset, n_testset, time.time() - t1))\n\n # Save to file\n res_df = pd.concat(res_list, ignore_index=True, sort=False)\n res_df = res_df.sort_values(by=['dataset_name', 'num_features', 'trainset_size', 'split'],\n ascending=[False, True, True, True])\n return res_df", "def mptt_result_list(cl):\n return {'cl': cl,\n 'result_headers': list(result_headers(cl)),\n 'results': list(mptt_results(cl))}" ]
[ "0.72283614", "0.71309644", "0.6913391", "0.6884854", "0.6688005", "0.65761745", "0.6487272", "0.6475751", "0.6433946", "0.632648", "0.6261752", "0.6226283", "0.6154", "0.6113945", "0.610395", "0.6074205", "0.6056309", "0.60420865", "0.6016165", "0.5957813", "0.59397256", "0.5927495", "0.59171116", "0.59171116", "0.59165233", "0.58962923", "0.58832914", "0.5858185", "0.58419687", "0.58330846", "0.5824556", "0.58169895", "0.5809914", "0.5775016", "0.5769334", "0.5766955", "0.57599235", "0.5750376", "0.57472306", "0.5719968", "0.57169795", "0.57100415", "0.5709618", "0.57024693", "0.5684153", "0.5683374", "0.56787014", "0.5661047", "0.5660301", "0.5655489", "0.5648003", "0.5645456", "0.56388694", "0.56249887", "0.5622374", "0.5616412", "0.5612645", "0.56115925", "0.56105393", "0.5608848", "0.5604047", "0.5599147", "0.55841386", "0.5566865", "0.55592495", "0.55581117", "0.5540803", "0.5537473", "0.5534688", "0.5526765", "0.5515264", "0.5513018", "0.55129915", "0.5504016", "0.55006444", "0.54952556", "0.5491216", "0.54899585", "0.5479973", "0.54786694", "0.54741746", "0.5470635", "0.54698247", "0.5456702", "0.54498214", "0.5438903", "0.5438667", "0.54299676", "0.5428695", "0.5428093", "0.5427484", "0.54269075", "0.54262346", "0.5416442", "0.5406463", "0.5397416", "0.5385635", "0.5384633", "0.5383917", "0.53783417" ]
0.73413634
0
Takes a pandas dataframe of results and turns it back into a simple results array, which only contains the populations' posterior probability distributions over generations.
def dataframe_to_language_stats(dataframe, n_runs, n_batches, n_gens, possible_form_lengths): if len(possible_form_lengths) == 1: n_language_classes = 4 else: n_language_classes = 7 #TODO: or should this be 6 (i.e. collapsing the two different reduplication strategies?) proportion_column = np.array(dataframe['proportion']) proportion_column_as_results = proportion_column.reshape((n_runs*n_batches, n_gens, n_language_classes)) return proportion_column_as_results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pandas(self):\n names,prior,posterior = [],[],[]\n for iname,name in enumerate(self.posterior_parameter.row_names):\n names.append(name)\n posterior.append(np.sqrt(float(\n self.posterior_parameter[iname, iname]. x)))\n iprior = self.parcov.row_names.index(name)\n prior.append(np.sqrt(float(self.parcov[iprior, iprior].x)))\n for pred_name, pred_var in self.posterior_prediction.items():\n names.append(pred_name)\n posterior.append(np.sqrt(pred_var))\n prior.append(self.prior_prediction[pred_name])\n return pd.DataFrame({\"posterior\": posterior, \"prior\": prior},\n index=names)", "def _reformat_results(self, results, strategy='wavelet_peaks'):\n return np.array(results)", "def make_results_df(results):\n max_val = max(x[1] for x in results)\n\n df = []\n for i in range(max_val + 1):\n df.append([])\n for j in range(max_val + 1):\n df[-1].append(results.get((i, j), np.nan))\n return pd.DataFrame(df)", "def get_pandas(self):\n return pd.DataFrame(self.results)", "def __array__(self):\n return pa.column(\"dummy\", self.data).to_pandas().values", "def pr2sample(self, df_prval):\n try:\n var_rand = list(self.marginals.keys())\n except AttributeError:\n var_rand = []\n\n ## Empty case\n if len(var_rand) == 0:\n return DataFrame()\n\n ## Variables to convert\n var_comp = list(set(var_rand).intersection(set(df_prval.columns)))\n if len(var_comp) == 0:\n raise ValueError(\n \"Intersection of df_prval.columns and var_rand must be nonempty\"\n )\n\n samples = zeros(df_prval[var_comp].shape)\n ## Ensure correct column ordering\n prval = df_prval[var_comp].values\n\n ## Apply appropriate marginal\n for ind in range(len(var_comp)):\n ## Map with inverse density\n var = var_comp[ind]\n samples[:, ind] = self.marginals[var].q(prval[:, ind])\n\n return DataFrame(data=samples, columns=var_comp)", "def get_results(self):\r\n r ={\r\n 'Gp_s' :np.array(self.Gp_guess_s), #Means of the posteriors\r\n 'Gm_s' :np.array(self.Gm_guess_s), #Means of the posteriors\r\n 'eGp_s':np.array(self.eGp_guess_s) , #Standard deviations of the posteriors\r\n 'eGm_s':np.array(self.eGm_guess_s) , #Standard deviations of the posteriors\r\n 'covGpGm_s':np.array(self.cov_GpGm_s) , #Covariances of the posteriors\r\n 't_tot_pulseSequences_s':np.array(self.t_tot_pulseSequences_s), #Time it takes for each batch of pulse sequences\r\n 't_tot_process_s':np.array(self.t_tot_process_s), #CPU time it takes for processing the Bayes stuff at each iteration\r\n 't_probe_p_s':np.array(self.t_probe_p_s), # Times probed for the diff p\r\n 't_probe_m_s':np.array(self.t_probe_m_s), # Times probed for the diff m\r\n 't_pulseSequences_s':np.array(self.t_pulseSequences_s),\r\n 't_process_s':np.array(self.t_process_s),\r\n 'iteration_s':np.array(self.nb_iteration_s), #Number of iterations performed\r\n 'R_tot_s':np.array(self.R_tot_s) #Total number of readout performed\r\n }\r\n return r", "def __call__(self, results):\n if np.random.rand() > self.prob:\n return results\n self._imequalize(results)\n return results", "def language_stats_to_dataframe(results, n_runs, n_gens, possible_form_lengths):\n\n if len(possible_form_lengths) == 1:\n n_language_classes = 4\n else:\n n_language_classes = 7 #TODO: or should this be 6 (i.e. collapsing the two different reduplication strategies?)\n\n column_proportion = np.array(results)\n\n if n_language_classes == 4 and column_proportion.shape[2] > n_language_classes:\n column_proportion_compositional_summed = np.zeros((n_runs, n_gens, n_language_classes))\n for r in range(len(column_proportion_compositional_summed)):\n for g in range(len(column_proportion_compositional_summed[0])):\n column_proportion_compositional_summed[r][g] = np.array([column_proportion[r][g][0], column_proportion[r][g][1], column_proportion[r][g][2]+column_proportion[r][g][3], column_proportion[r][g][4]])\n column_proportion = column_proportion_compositional_summed.flatten()\n\n else:\n column_proportion = column_proportion.flatten()\n\n column_runs = []\n for i in range(n_runs):\n for j in range(n_gens):\n for k in range(n_language_classes):\n column_runs.append(i)\n column_runs = np.array(column_runs)\n\n column_generation = []\n for i in range(n_runs):\n for j in range(n_gens):\n for k in range(n_language_classes):\n column_generation.append(j)\n column_generation = np.array(column_generation)\n\n column_type = []\n for i in range(n_runs):\n for j in range(n_gens):\n if len(possible_form_lengths) == 1:\n column_type.append('degenerate')\n column_type.append('holistic')\n column_type.append('compositional')\n column_type.append('other')\n else:\n column_type.append('degenerate')\n column_type.append('holistic')\n column_type.append('holistic_diversify_signal')\n column_type.append('compositional')\n column_type.append('compositional_reduplicate_segments')\n column_type.append('compositional_reduplicate_whole_signal')\n column_type.append('other')\n\n data = {'run': column_runs,\n 'generation': column_generation,\n 'proportion': column_proportion,\n 'class': column_type}\n\n lang_class_prop_over_gen_df = pd.DataFrame(data)\n\n return lang_class_prop_over_gen_df", "def _build_results(self):\n results = {}\n cols = []\n for pol in POLLUTANTS:\n for adj in ADJUSTMENTS:\n cols.append(get_rate_column(pol, adjustment=adj, generated=False))\n cols.append(get_column(pol, adjustment=adj))\n cols.append(\"net_consumed_mwh\")\n for ba in self.regions:\n results[ba] = pd.DataFrame(\n index=self.generation.index, columns=cols, dtype=np.float64\n )\n return results", "def transform(self, X, treatment_values=None):\n treatment_values = 1 if treatment_values is None else treatment_values\n\n res = self.learner.predict_proba(X)[:, treatment_values]\n res = pd.DataFrame(res, index=X.index, columns=[\"propensity\"])\n if self.include_covariates:\n res = X.join(res)\n return res", "def sample2pr(self, df_sample):\n try:\n var_rand = list(self.marginals.keys())\n except AttributeError:\n var_rand = []\n\n ## Empty case\n if len(var_rand) == 0:\n return DataFrame()\n\n ## Variables to convert\n var_comp = list(set(var_rand).intersection(set(df_sample.columns)))\n if len(var_comp) == 0:\n raise ValueError(\n \"Intersection of df_sample.columns and var_rand must be nonempty\"\n )\n\n prval = zeros(df_sample[var_comp].shape)\n ## Ensure correct column ordering\n sample = df_sample[var_comp].values\n\n ## Apply appropriate marginal\n for ind in range(len(var_comp)):\n ## Map with inverse density\n var = var_comp[ind]\n prval[:, ind] = self.marginals[var].p(sample[:, ind])\n\n return DataFrame(data=prval, columns=var_comp)", "def gen_ppmi_dataframe(df):\n print(\"Finding ppmi values.\")\n total_playcount = sum(df.sum())\n user_playcounts = df.sum(axis=1)\n artist_playcounts = df.sum(axis=0)\n ppmi_df = copy.copy(df)\n count = 0\n for user, user_artist_playcounts in df.iterrows():\n count += 1\n for artist in user_artist_playcounts.index:\n user_artist_playcount = user_artist_playcounts[artist]\n if user_artist_playcount == 0.0:\n ppmi = 0.0\n else:\n x = total_playcount * user_artist_playcount\n y = user_playcounts[user] * artist_playcounts[artist]\n ppmi = max(0.0, x / y)\n ppmi_df.at[user, artist] = ppmi\n print(str(count) + \"/\" + str(len(user_playcounts)) + \" users counted.\")\n\n return ppmi_df", "def _make_results_dataframe(self):\n LOG.debug(\"Creating Results Dataframes.\")\n results_df = tfs.TfsDataFrame(index=self.twiss_df.index)\n results_df[\"S\"] = self.twiss_df[\"S\"]\n return results_df", "def extract_results(grid_search):\n results = grid_search.cv_results_.copy()\n params = pd.DataFrame(results.pop('params'))\n values = pd.DataFrame(results)\n values = values.loc[:, ~values.columns.str.contains('param_')]\n df = pd.concat([params, values], axis=1)\n df = df.set_index(list(params.columns))\n df = df.sort_values('rank_test_neg_mean_squared_error')\n return df", "def process_results(df, method, res):\n minimum = np.ones(res[\"x\"].shape[0])\n dist = np.linalg.norm(res[\"x\"] - minimum) / np.linalg.norm(minimum)\n df.loc[method, :] = [res[\"nfev\"], dist]\n return df", "def get_aggregation_popensity_df():\n df = pandas.read_csv(fixpath('DnaK_reactions_parameters_5.csv'))\n\n return df.set_index('gene')", "def competitions(self) -> DataFrame[Any]:", "def create_beta_posteriors(df):\n goods = df.num_matured - df.fpd\n df['alpha_p'] = df.alpha + df.fpd\n df['beta_p'] = df.beta + goods\n return df", "def get_final_values(self):\n temp_scaled_val = []\n for question in self.question_list:\n test = question.get_final_value()\n temp_scaled_val.append(test)\n return pd.DataFrame(temp_scaled_val).transpose()", "def probability_array(self):\n q = self.apply_weights()\n return np.exp(q)/(1 + np.exp(q))", "def computeRawPercNaNTable(df):\n species = df[\"PG.Organisms\"].unique()\n percentageList = []\n specieList = []\n for i in species:\n percentages = allSamplesPercNaN(df, specie = i)\n percentageList.append(percentages)\n specieList.append(i)\n \n valuesList = []\n samples = percentageList[0][\"sample\"]\n for i in range(len(percentageList)):\n valuesList.append(percentageList[i][\"percentage_NaN\"].values)\n percTable = pd.DataFrame(np.array([samples.values] + valuesList).T, columns = [\"sample\"] + list(species))\n return percTable", "def predict_proba(self):\n if self.rank_prob is None:\n raise ValueError('No results available. Did you already call predict(...)?')\n\n return np.array([sum(map(lambda x: x[1], result)) / len(result) for result in self.rank_prob])", "def to_dataframe(self):\n import pandas as pd\n\n d = OrderedDict({\"y\": self.y})\n for pred in self.predictors:\n d[pred.name] = pred.x\n \n df = pd.DataFrame(d)\n return df", "def prepare_wg_data(results):\n wg_df = pd.DataFrame(results)\n wg_df['search_engine'] = 'wg-gesucht.de'\n return wg_df", "def prepare_output_results(df, cell_features, pop_features, pop_type, neg, output):\n\n print('\\n Calculating penetrance values per %s...' % pop_type)\n\n # Initialize output file\n columns = pop_features + ['Num_cells', 'Penetrance', 'KS_Penetrance', 'WT_percentile_at_threshold']\n df_output = pd.DataFrame(columns=columns)\n\n # WT scores at each percentile for maximum difference calculation\n wt_s_all = np.sort(df[df['Strain ID'].isin(neg)]['Score'].values)\n wt_s_sampled = np.array([stats.scoreatpercentile(wt_s_all, p) for p in range(1, 101)])\n wt_s_sampled_cdf = np.arange(len(wt_s_sampled)) / float(len(wt_s_sampled) - 1)\n wt_scores = (wt_s_all, wt_s_sampled, wt_s_sampled_cdf)\n\n # Initialize scores file\n df_scores = pd.DataFrame(columns=cell_features + ['Score', 'Is_outlier'])\n\n # Calculate penetrance for all wells\n this_row = 0\n for pop in df['Population'].unique():\n # Well data\n df_pop = df[df['Population'] == pop][cell_features + ['Population', 'Score']].reset_index(drop=True)\n pop_info, df_scores_pop = calculate_penetrace_per_group(df_pop, pop_features, wt_scores)\n\n # Append scores\n df_scores = pd.concat([df_scores, df_scores_pop], sort=False, ignore_index=True)\n\n # Append results for this well\n df_output.loc[this_row, ] = pop_info\n this_row += 1\n\n # Save results\n df_output = df_output.sort_values('Penetrance', ascending=False)\n df_output = df_output.reset_index(drop=True)\n df_output.drop(labels='Population', axis=1).to_csv(output['ODresults'].replace('.', '_%s.' % pop_type),\n index=False)\n df_scores.drop(labels='Population', axis=1).to_csv(output['ScoreCells'].replace('.', '_%s.' % pop_type),\n index=False)\n\n return df_output, df_scores", "def results_psavg_sims():\n posterior_means = [[1.18040327516, 7.55106444832, 3.27420103073, 3.51998795534, 0.67212630002],\n [0.619197296326, 6.49420626987, 2.22495505139, 2.27682390376, 0.678172183554],\n [0.856628471666, 5.94732402905, 3.97580346111, 3.85788708662, 0.690090617623],\n [0.774906025167, 7.34275742443, 2.69729821931, 2.97994334746, 0.663015258594]]\n\n\n sgr1900_results.results_psavg_sims(posterior_means, [5,6,8,12], \"sgr1806\")\n\n return", "def parse_result_series(result):\n if isinstance(result, np.ndarray):\n return result\n\n if result is None or not len(result):\n return None\n\n dates, values = result\n return pd.DataFrame({0:dates.astype(int)/1000,1:values})", "def ana_results_to_plotables(ana_results,analysis_attributes):\n plot_attributes = [\n (ana_attr.numbers,ana_attr.range)\n for ana_attr in analysis_attributes\n ]\n plotable_fnc = {\n 'SMAP': dec_tupl(create_mesh_grid, 'MESH'),\n 'PTS': dec_tupl(create_histogram, 'HIST')\n }\n return [\n plotable_fnc[stype](data,*plt_attr)\n for (data, stype), plt_attr in zip(ana_results,plot_attributes)\n ]", "def __call__(self, results):\n if np.random.rand() > self.prob:\n return results\n h, w = results['image'].shape[:2]\n center = self.center\n if center is None:\n center = ((w - 1) * 0.5, (h - 1) * 0.5)\n angle = random_negative(self.angle, self.random_negative_prob)\n self._rotate_img(results, angle, center, self.scale)\n return results", "def example_two():\n \n ids = ['PLA', 'PHA', 'Sp']\n d1 = np.random.uniform(11, 12, (10,1))\n d2 = np.random.uniform(1000, 2000, (10,1))\n d3 = np.random.uniform(0.1, 0.3, (10,1))\n data = np.hstack((d1, d2, d3))\n data[5,-1] = 100\n \n results = []\n for row in range(data.shape[0]):\n pr_obj = Promiscuity(ids, np.concatenate((\n data[:row], data[row+1:]), axis=0), None)\n results.append(pr_obj.results())\n return results", "def _process_results(self):\n self.portfolio.create_backtest_result_dataframe()\n stats = self._show_stats()\n return stats", "def obtain_percentile_values(results, simulation_parameters):\n percentile = simulation_parameters['percentile']\n\n path_loss_values = []\n received_power_values = []\n interference_values = []\n sinr_values = []\n spectral_efficiency_values = []\n estimated_capacity_values = []\n estimated_capacity_values_km2 = []\n\n for result in results:\n\n path_loss_values.append(result['path_loss'])\n\n received_power_values.append(result['received_power'])\n\n interference_values.append(result['interference'])\n\n sinr = result['sinr']\n if sinr == None:\n sinr = 0\n else:\n sinr_values.append(sinr)\n\n spectral_efficiency = result['spectral_efficiency']\n if spectral_efficiency == None:\n spectral_efficiency = 0\n else:\n spectral_efficiency_values.append(spectral_efficiency)\n\n estimated_capacity = result['capacity_mbps']\n if estimated_capacity == None:\n estimated_capacity = 0\n else:\n estimated_capacity_values.append(estimated_capacity)\n\n estimated_capacity_km2 = result['capacity_mbps_km2']\n if estimated_capacity_km2 == None:\n estimated_capacity_km2 = 0\n else:\n estimated_capacity_values_km2.append(estimated_capacity_km2)\n\n percentile_site_results = {\n 'results_type': (\n '{}_percentile'.format(percentile)\n ),\n 'path_loss': np.percentile(\n path_loss_values, percentile\n ),\n 'received_power': np.percentile(\n received_power_values, percentile\n ),\n 'interference': np.percentile(\n interference_values, percentile\n ),\n 'sinr': np.percentile(\n sinr_values, percentile\n ),\n 'spectral_efficiency': np.percentile(\n spectral_efficiency_values, percentile\n ),\n 'capacity_mbps': np.percentile(\n estimated_capacity_values, percentile\n ),\n 'capacity_mbps_km2': np.percentile(\n estimated_capacity_values_km2, percentile\n ),\n }\n\n return percentile_site_results", "def predict_uncertainty(\n prophet_model: Prophet, df: pd.DataFrame, vectorized: bool\n) -> pd.DataFrame:\n sim_values = sample_posterior_predictive(prophet_model, df, vectorized)\n\n lower_p = 100 * (1.0 - prophet_model.interval_width) / 2\n upper_p = 100 * (1.0 + prophet_model.interval_width) / 2\n\n series = {}\n\n for key in [\"yhat\", \"trend\"]:\n series[\"{}_lower\".format(key)] = prophet_model.percentile(\n sim_values[key], lower_p, axis=0\n )\n series[\"{}_upper\".format(key)] = prophet_model.percentile(\n sim_values[key], upper_p, axis=0\n )\n\n return pd.DataFrame(series)", "def get_responses(self, pm_kwargs, rm_kwargs):\n if self.responses is None:\n self.responses = np.zeros(self.T + 1, dtype = np.int)\n self.responses[0] = -1\n for t in range(1, self.T+1):\n if self.rp_t[t-1].sum() == 0:\n self.estimate_response_probability(t-1, pm_kwargs = pm_kwargs,\n rm_kwargs = rm_kwargs)\n \n #sample new response\n self.responses[t] = self.get_response(t-1, sample = True)\n \n #generate new observation from that response\n self.pm.env.generate_observations(t, self.responses[t] )\n \n return pd.DataFrame({r'$r_t$': self.responses})", "def quantile(self, probs, return_pandas=True):\n\n import pandas as pd\n\n probs = np.asarray(probs)\n probs = np.atleast_1d(probs)\n\n if self.data.ndim == 1:\n rslt = self._quantile(self.data, probs)\n if return_pandas:\n rslt = pd.Series(rslt, index=probs)\n else:\n rslt = []\n for vec in self.data.T:\n rslt.append(self._quantile(vec, probs))\n rslt = np.column_stack(rslt)\n if return_pandas:\n columns = [\"col%d\" % (j + 1) for j in range(rslt.shape[1])]\n rslt = pd.DataFrame(data=rslt, columns=columns, index=probs)\n\n if return_pandas:\n rslt.index.name = \"p\"\n\n return rslt", "def make_output_df(self):\n df = pd.concat([pd.DataFrame(dat) for dat in [self.qdata, self.pdata]], axis=1)\n columns = np.hstack(([['{}{}'.format(x, c) for c in self.actions] for x in ['q', 'p']]))\n df.columns = columns\n df.insert(0, 'trial', np.arange(1, df.shape[0]+1))\n df['choice'] = self.choices\n df['feedback'] = self.feedback\n# r = np.array(self.bandits.rvalues)\n# p = np.array(self.bandits.preward)\n df['optimal'] = self.demand\n df.insert(0, 'agent', 1)\n self.data = df.copy()", "def _process(self, data: np.ndarray) -> np.ndarray:\n probabilities = np.empty(data.size, dtype=object)\n\n for idx, counts_dict in enumerate(data):\n shots = sum(counts_dict.values())\n freq = counts_dict.get(self._outcome, 0)\n alpha_posterior = [freq + self._alpha_prior[0], shots - freq + self._alpha_prior[1]]\n alpha_sum = sum(alpha_posterior)\n\n p_mean = alpha_posterior[0] / alpha_sum\n p_var = p_mean * (1 - p_mean) / (alpha_sum + 1)\n\n probabilities[idx] = ufloat(nominal_value=p_mean, std_dev=np.sqrt(p_var))\n\n return probabilities", "def resampler_records_dataframe(self, run_idxs):\n\n return pd.DataFrame(self.resampler_records(run_idxs))", "def pert_stats(df, verbose=False):\n df[df != 0] = 1\n df['num_features'] = df.sum(axis=1)\n df_mean, df_med, df_min, df_max =\\\n df['num_features'].mean(), df['num_features'].median(), df['num_features'].min(), df['num_features'].max()\n if verbose:\n print \"##########Pert Statistics#############\"\n print \"Mean: \", df_mean\n print \"Median: \", df_med\n print \"Min: \", df_min\n print \"Max: \", df_max\n print \"######################################\"\n return df_mean, df_med, df_min, df_max", "def store_predictions(df):\n ts = df[df.columns[1]]\n base = pd.DataFrame(ts)\n preds = make_preds(ts, 'Predicted '+ df.columns[1])\n base.index = df['year']\n base = base.append(pd.DataFrame(preds), sort = True)\n for col in df.columns[2:]:\n ts = df[col]\n temp = pd.DataFrame(ts)\n preds = make_preds(ts, 'Predicted ' + col)\n temp.index = df['year']\n temp = temp.append(pd.DataFrame(preds), sort = True)\n base = base.join(temp)\n return base", "def predictions_for_df(df, inferrer):\n working_df = df.copy()\n working_df['predictions'] = inferrer.get_activations(\n working_df.sequence.values).tolist()\n return working_df", "def probability_array(self, unnormalized=False):\n if self.dice_array is not None:\n raise Exception(\"P-array when using a dice_array not implemented yet.\")\n else:\n if (self.sides is not 6) or (self.number is not 2):\n raise Exception(\"Anything but two six-sided dice not supported yet.\")\n N_combinations = self.sides**self.number\n #Hack for 2d6\n combinations = np.array([1,2,3,4,5,6,5,4,3,2,1])\n probability = combinations/float(N_combinations)\n self.N_combinations = N_combinations\n if unnormalized:\n probability = combinations #don't do the divide\n return probability", "def prepareDataframeForPivot(self, result):\n df = result\n if isinstance(df, pd.Series):\n df = pd.DataFrame({\"values\": df})\n if self._isIndexedDataframe(df):\n if isinstance(df.columns, pd.MultiIndex):\n df.columns = df.columns.map(' | '.join)\n df = df.select_dtypes(include=['float64', 'int64'])\n if df.size == 0:\n df[\"values\"] = np.nan\n # try to keep group measures\n try:\n df.groupMeasures = result.groupMeasures\n except:\n pass\n # try to keep aggMeasures\n try:\n df.aggMeasures = result.aggMeasures\n except:\n pass\n\n return df", "def get_as_pandas_dataframe(self):\n pd_df = pd.DataFrame()\n for name in self.dict_colname_to_index:\n pd_df[name] = np.copy(self[name])\n return pd_df", "def export_results(self):\n problemIDs = list(set([result.problemID for result in self.results]))\n configIDs = list(set([result.configID for result in self.results]))\n\n labels = []\n labels.extend(TestResults._fields)\n labels.extend(SizeMetrics._fields) \n # Remove unused columns\n labels.remove(\"size_metrics\")\n labels.remove(\"problemID\")\n labels.remove(\"configID\")\n\n # output = pd.Panel(items=labels, major_axis=problemIDs, minor_axis=configIDs)\n multiindex = pd.MultiIndex.from_product([problemIDs, configIDs], names=[\"problems\", \"configs\"])\n\n output = pd.DataFrame(index=multiindex, columns=labels)\n output.columns.names = [\"stats\"]\n\n for result in self.results:\n problemID = result.problemID\n configID = result.configID\n for label in [label for label in TestResults._fields if label in labels]:\n output.loc[(problemID, configID), label] = getattr(result, label)\n for label in [label for label in SizeMetrics._fields if label in labels]:\n output.loc[(problemID, configID), label] = getattr(result.size_metrics, label)\n\n # Compute Statistics\n output.fillna(value=np.nan, inplace=True)\n output.sort_index(inplace=True)\n try:\n TestFramework.compute_mosek_error(output, \"opt_val\", \"mosek_config\")\n except (KeyError): # pragma: no cover\n print(\"TestFramework.compute_mosek_error: 'mosek_config' or 'opt_val' field not found.\")\n try:\n TestFramework.compute_performance(output, \"solve_time\")\n except (KeyError): # pragma: no cover\n print(\"TestFramework.compute_performance: 'solve_time' field not found.\")\n return output", "def _ES_res_to_pandas(self, res, columns, thresh=None):\n \n sources = [x['hits']['hits'][0]['_source'] for x in res['responses']]\n if columns is not None:\n if isinstance(columns, list):\n sources = [{key: val for key, val in x.items() if key in columns} for x in sources]\n elif callable(columns):\n sources = [{key: val for key, val in x.items() if columns(key)} for x in sources]\n assert sources\n columns = list(sources[0].keys())\n else:\n raise TypeError('Variable \"columns\" should be list or callable or None.')\n \n dtype = {col: self._choose_dtype(col) for col in columns}\n \n ids = [x['hits']['hits'][0]['_id'] for x in res['responses']]\n\n tab = pd.DataFrame(sources, index=ids)[columns]\n \n # Workaround for pandas bug: https://stackoverflow.com/a/38750433/7856919\n for k, v in dtype.items():\n if v == bool:\n tab[k] = tab[k].astype(str) == 'True'\n else:\n tab[k] = tab[k].astype(v)\n \n if thresh is not None:\n # Select rows that are not above the threshold \n sel = ~(tab['__CONFIDENCE'] >= thresh)\n columns_to_remove = [x for x in tab.columns if '__' in x]\n tab.loc[sel, columns_to_remove] = pd.np.nan\n \n # Dirty fix for np.nan that transforms dtype bool into float.\n tab['__IS_MATCH'].fillna(False, inplace=True)\n tab['__IS_MATCH'] = tab['__IS_MATCH'].astype(bool) \n\n return tab", "def sample_posterior_predictive(\n prophet_model: Prophet, df: pd.DataFrame, vectorized: bool\n) -> Dict[str, np.ndarray]:\n n_iterations = prophet_model.params[\"k\"].shape[0]\n samp_per_iter = max(\n 1, int(np.ceil(prophet_model.uncertainty_samples / float(n_iterations)))\n )\n # Generate seasonality features once so we can re-use them.\n (\n seasonal_features,\n _,\n component_cols,\n _,\n ) = prophet_model.make_all_seasonality_features(df)\n sim_values = {\"yhat\": [], \"trend\": []}\n for i in range(n_iterations):\n if vectorized:\n sims = sample_model_vectorized(\n prophet_model,\n df=df,\n seasonal_features=seasonal_features,\n iteration=i,\n s_a=component_cols[\"additive_terms\"],\n s_m=component_cols[\"multiplicative_terms\"],\n n_samples=samp_per_iter,\n )\n for k in sim_values:\n sim_values[k].append(sims[k])\n else:\n sims = [\n prophet_model.sample_model(\n df=df,\n seasonal_features=seasonal_features,\n iteration=i,\n s_a=component_cols[\"additive_terms\"],\n s_m=component_cols[\"multiplicative_terms\"],\n )\n for _ in range(samp_per_iter)\n ]\n for key in sim_values:\n for sim in sims:\n sim_values[key].append(sim[key].values)\n for k, v in sim_values.items():\n sim_values[k] = np.row_stack(v)\n return cast(Dict[str, np.ndarray], sim_values)", "def pd_to_np(df, squeeze):\n # Extract the labels and create a samples vector out of it\n labels = df.iloc[:, df.columns.get_level_values(0) == 'label']\n labels = labels.droplevel('event')\n if squeeze:\n labels = labels[~labels.index.duplicated(keep='first')]\n labels_np = np.squeeze(labels.values)\n else:\n labels_np = labels.values\n\n # Drop the labels from data\n dataframe = df.drop('label', axis=1)\n\n dim_0 = len(dataframe.index.get_level_values(0).unique())\n dim_1 = int(len(dataframe.index.get_level_values(1)) / dim_0)\n dim_2 = dataframe.shape[1]\n\n dataframe_np = dataframe.values.reshape((dim_0, dim_1, dim_2))\n\n return dataframe_np, labels_np", "def predict(model, df, variables, to_df=True, method='max', verbose=3):\n if not isinstance(df, pd.DataFrame): raise Exception('[bnlearn] >Error: Input requires a pd.DataFrame.')\n if not isinstance(model, dict): raise Exception('[bnlearn] >Error: Input requires a dict that contains the key: model.')\n if isinstance(variables, str): variables=[variables]\n # Remove columns that are used as priors\n dfX = df.loc[:, ~np.isin(df.columns.values, variables)]\n if verbose>=3: print('[bnlearn]> Remaining columns for inference: %d' %(dfX.shape[1]))\n\n # Get only the unique records in the DataFrame to reduce computation time.\n dfU = dfX.drop_duplicates()\n # Make empty array\n P = np.array([None]*dfX.shape[0])\n for i in tqdm(range(dfU.shape[0])):\n # Get input data and create a dict.\n evidence = dfU.iloc[i,:].to_dict()\n # Do the inference.\n query = bnlearn.inference.fit(model, variables=variables, evidence=evidence, to_df=False, verbose=0)\n # Find original location of the input data.\n loc = np.sum((dfX==dfU.iloc[i,:]).values, axis=1)==dfU.shape[1]\n # Store inference\n P[loc] = _get_prob(query, method=method)\n P = list(P)\n\n # Loop the dataframe\n # P1 = []\n # for i in tqdm(range(dfX.shape[0])):\n # # Setup input data\n # evidence = dfX.iloc[i,:].to_dict()\n # # Do the inferemce\n # query = inference.fit(model, variables=variables, evidence=evidence, to_df=False, verbose=0)\n # # Store in list\n # P1.append(_get_max_prob(query))\n\n if to_df: P = pd.DataFrame(P)\n return P", "def aggregateResultsToDfResults(self, arrays=True, fillna=False):\n nan_value = np.nan\n # defines which variable types will be saved in the results dataframe\n SUPPORTED_TYPES = (float, int, np.ndarray, list)\n SCALAR_TYPES = (float, int)\n ARRAY_TYPES = (np.ndarray, list)\n\n logging.info(\"Aggregating results to `dfResults` ...\")\n for runId, parameters in tqdm.tqdm(self.dfResults.iterrows(), total=len(self.dfResults)):\n # if the results were previously loaded into memory, use them\n if hasattr(self, \"results\"):\n # only if the length matches the number of results\n if len(self.results) == len(self.dfResults):\n result = self.results[runId]\n # else, load results individually from hdf file\n else:\n result = self.getRun(runId)\n # else, load results individually from hdf file\n else:\n result = self.getRun(runId)\n\n for key, value in result.items():\n # only save floats, ints and arrays\n if isinstance(value, SUPPORTED_TYPES):\n # save 1-dim arrays\n if isinstance(value, ARRAY_TYPES) and arrays:\n # to save a numpy array, convert column to object type\n if key not in self.dfResults:\n self.dfResults[key] = None\n self.dfResults[key] = self.dfResults[key].astype(object)\n self.dfResults.at[runId, key] = value\n elif isinstance(value, SCALAR_TYPES):\n # save scalars\n self.dfResults.loc[runId, key] = value\n else:\n self.dfResults.loc[runId, key] = nan_value\n # drop nan columns\n self.dfResults = self.dfResults.dropna(axis=\"columns\", how=\"all\")\n\n if fillna:\n self.dfResults = self.dfResults.fillna(0)", "def pare_data(dataframe, columns=None):\n if columns is None:\n columns = {\n 'RUN' : int,\n 'PHINUMBER': int,\n 'SERIES': str,\n 'H': int,\n 'K': int,\n 'L': int,\n 'MERGEDH': int,\n 'MERGEDK': int,\n 'MERGEDL': int,\n 'IOBS': float,\n 'SIGMA(IOBS)': float,\n 'D': float, \n }\n columns.update({i: float for i in dataframe.keys() if 'ipm' in i.lower()})\n dataframe = dataframe[[i for i in columns if i in dataframe]]\n for k in dataframe:\n if k in columns:\n dataframe[k] = dataframe[k].astype(columns[k])\n else:\n del dataframe[k]\n\n #print(\"Number of reflection observations: {}\".format(len(dataframe)))\n #print(\"Multiplicity: {}\".format(len(dataframe)/len(dataframe.groupby(['H', 'K', 'L']))))\n\n #This removes reflections which were not observed in the 'on' and 'off' datasets at a given rotation\n #TODO: This line could use some serious optimization. It seems inocuous but runs very slow\n #dataframe = dataframe.groupby(['H', 'K', 'L', 'RUN', 'PHINUMBER']).filter(lambda x: x.SERIES.str.contains('on').max() and x.SERIES.str.contains('off').max())\n\n dataframe['on'] = dataframe.SERIES.str.contains('on')\n #dataframe['off'] = dataframe.SERIES.str.contains('off')\n dataframe = dataframe.groupby(['H', 'K', 'L', 'RUN', 'PHINUMBER']).filter(lambda x: x.on.max() and ~x.on.min())\n\n del dataframe['on']\n #gammaobs = len(dataframe.groupby(['H', 'K', 'L', 'RUN', 'PHINUMBER']))\n #gammamult = gammaobs / len(dataframe.groupby(['H', 'K', 'L']))\n #print(\"Number of ratio observations: {}\".format(gammaobs))\n #print(\"Ratio multiplicity: {}\".format(gammamult)) \n return dataframe", "def probability_array(self):\n R = np.exp(self.apply_weights())\n assert R.shape == (self.N, self.C)\n # When you sum across a row, axis=1\n R_sum_ax1 = np.sum(R, axis=1)\n R_sum_ax1 = np.reshape(R_sum_ax1, newshape=(self.N, 1))\n assert R_sum_ax1.shape == (self.N, 1)\n\n # Divide each element in R by\n return R/R_sum_ax1", "def create_report(result, *, decimal_places=3):\n\n # TODO add effect sizes to multiple comparisons.\n def single_population_string(population, with_stats=False, pop_pval=None, with_rank=True):\n if pop_pval is not None:\n return \"%s (p=%.*f)\" % (population, decimal_places, pop_pval)\n if with_stats:\n halfwidth = (result.rankdf.at[population, 'ci_upper'] - result.rankdf.at[population, 'ci_lower']) / 2\n mystats = []\n if (result.force_mode is not None and result.force_mode=='parametric') or \\\n (result.force_mode is None and result.all_normal):\n mystats.append(\"M=%.*f+-%.*f\" % (decimal_places, result.rankdf.at[population, 'mean'],\n decimal_places, halfwidth))\n mystats.append(\"SD=%.*f\" % (decimal_places, result.rankdf.at[population, 'std']))\n else:\n mystats.append(\"MD=%.*f+-%.*f\" % (decimal_places, result.rankdf.at[population, 'median'],\n decimal_places, halfwidth))\n mystats.append(\"MAD=%.*f\" % (decimal_places, result.rankdf.at[population, 'mad']))\n if with_rank:\n mystats.append(\"MR=%.*f\" % (decimal_places, result.rankdf.at[population, 'meanrank']))\n return \"%s (%s)\" % (population, \", \".join(mystats))\n else:\n return str(population)\n\n def create_population_string(populations, with_stats=False, pop_pvals=None, with_rank=False):\n if isinstance(populations, str):\n populations = [populations]\n population_strings = []\n for index, population in enumerate(populations):\n if pop_pvals is not None:\n cur_pval = pop_pvals[index]\n else:\n cur_pval = None\n population_strings.append(single_population_string(population, with_stats, cur_pval, with_rank))\n if len(populations) == 1:\n popstr = population_strings[0]\n elif len(populations) == 2:\n popstr = \" and \".join(population_strings)\n else:\n popstr = \", \".join(population_strings[:-1]) + \", and \" + population_strings[-1]\n return popstr\n\n if not isinstance(result, RankResult):\n raise TypeError(\"result must be of type RankResult and should be the outcome of calling the autorank function.\")\n\n print(\"The statistical analysis was conducted for %i populations with %i paired samples.\" % (len(result.rankdf),\n result.num_samples))\n print(\"The family-wise significance level of the tests is alpha=%.*f.\" % (decimal_places, result.alpha))\n\n if result.all_normal:\n not_normal = []\n min_pvalue = min(result.pvals_shapiro)\n print(\"We failed to reject the null hypothesis that the population is normal for all populations \"\n \"(minimal observed p-value=%.*f). Therefore, we assume that all populations are \"\n \"normal.\" % (decimal_places, min_pvalue))\n else:\n not_normal = []\n pvals = []\n normal = []\n for i, pval in enumerate(result.pvals_shapiro):\n if pval < result.alpha_normality:\n not_normal.append(result.rankdf.index[i])\n pvals.append(pval)\n else:\n normal.append(result.rankdf.index[i])\n if len(not_normal) == 1:\n population_term = 'population'\n else:\n population_term = 'populations'\n print(\"We rejected the null hypothesis that the population is normal for the %s %s. \"\n \"Therefore, we assume that not all populations are \"\n \"normal.\" % (population_term, create_population_string(not_normal, pop_pvals=pvals)))\n\n if result.omnibus == 'bayes':\n if result.all_normal:\n central_tendency = 'mean value'\n central_tendency_long = 'mean value (M)'\n variability = 'standard deviation (SD)'\n effect_size = 'd'\n else:\n central_tendency = 'median'\n central_tendency_long = 'median (MD)'\n variability = 'median absolute deviation (MAD)'\n effect_size = 'gamma'\n print(\n \"We used a bayesian signed rank test to determine differences between the mean values of the \"\n \"populations and report the %s and the %s for each population. We distinguish \"\n \"between populations being pair-wise smaller, equal, or larger and make a decision for one \"\n \"of these cases if we estimate that the posterior probability is at least \"\n \"alpha=%.*f.\" % (central_tendency_long, variability, decimal_places, result.alpha))\n if result.rope_mode == 'effsize':\n print(\n 'We used the effect size to define the region of practical equivalence (ROPE) around the %s '\n 'dynamically as %.*f*%s.' % (central_tendency, decimal_places, result.rope, effect_size))\n else:\n print(\n 'We used a fixed value of %.*f to define the region of practical equivalence (ROPE) around the '\n '%s.' % (decimal_places, result.rope, central_tendency))\n decision_set = set(result.rankdf['decision'])\n decision_set.remove('NA')\n if {'inconclusive'} == decision_set:\n print(\"We failed to find any conclusive evidence for differences between the populations \"\n \"%s.\" % create_population_string(result.rankdf.index, with_stats=True))\n elif {'equal'} == decision_set:\n print(\n \"All populations are equal, i.e., the are no significant and practically relevant differences \"\n \"between the populations %s.\" % create_population_string(result.rankdf.index,\n with_stats=True))\n elif {'equal', 'inconclusive'} == decision_set:\n print(\n \"The populations %s are all either equal or the results of the analysis are inconclusive.\")\n print(result.decision_matrix)\n else:\n print(\"We found significant and practically relevant differences between the populations \"\n \"%s.\" % create_population_string(result.rankdf.index, with_stats=True))\n for i in range(len(result.rankdf)):\n if len(result.rankdf.index[result.decision_matrix.iloc[i, :] == 'smaller']) > 0:\n print('The %s of the population %s is larger than of the populations '\n '%s.' % (central_tendency, result.rankdf.index[i],\n create_population_string(\n result.rankdf.index[\n result.decision_matrix.iloc[i, :] == 'smaller'])))\n equal_pairs = []\n for i in range(len(result.rankdf)):\n for j in range(i + 1, len(result.rankdf)):\n if result.decision_matrix.iloc[i, j] == 'equal':\n equal_pairs.append(result.rankdf.index[i] + ' and ' + result.rankdf.index[j])\n if len(equal_pairs) > 0:\n equal_pairs_str = create_population_string(equal_pairs).replace(',', ';')\n print('The following pairs of populations are equal: %s.' % equal_pairs_str)\n if 'inconclusive' in set(result.rankdf['decision']):\n print('All other differences are inconclusive.')\n elif len(result.rankdf) == 2:\n print(\"No check for homogeneity was required because we only have two populations.\")\n if result.effect_size == 'cohen_d':\n effect_size = 'd'\n elif result.effect_size == 'cliff_delta':\n effect_size = 'delta'\n elif result.effect_size == 'akinshin_gamma':\n effect_size = 'gamma'\n else:\n raise ValueError('unknown effect size method, this should not be possible: %s' % result.effect_size)\n if result.omnibus == 'ttest':\n larger = np.argmax(result.rankdf['mean'].values)\n smaller = int(bool(larger - 1))\n if result.all_normal:\n print(\"Because we have only two populations and both populations are normal, we use the t-test to \"\n \"determine differences between the mean values of the populations and report the mean value (M)\"\n \"and the standard deviation (SD) for each population. \")\n else:\n if len(not_normal) == 1:\n notnormal_str = 'one of them is'\n else:\n notnormal_str = 'both of them are'\n print(\"Because we have only two populations and %s not normal, we use should Wilcoxon's signed rank \"\n \"test to determine the differences in the central tendency and report the median (MD) and the \"\n \"median absolute deviation (MAD) for each population. However, the user decided to force the \"\n \"use of the t-test which assumes normality of all populations and we report the mean value (M) \"\n \"and the standard deviation (SD) for each population.\" % notnormal_str)\n if result.pvalue >= result.alpha:\n print(\"We failed to reject the null hypothesis (p=%.*f) of the paired t-test that the mean values of \"\n \"the populations %s are are equal. Therefore, we \"\n \"assume that there is no statistically significant difference between the mean values of the \"\n \"populations.\" % (decimal_places, result.pvalue,\n create_population_string(result.rankdf.index, with_stats=True)))\n else:\n print(\"We reject the null hypothesis (p=%.*f) of the paired t-test that the mean values of the \"\n \"populations %s are \"\n \"equal. Therefore, we assume that the mean value of %s is \"\n \"significantly larger than the mean value of %s with a %s effect size (%s=%.*f).\"\n % (decimal_places, result.pvalue,\n create_population_string(result.rankdf.index, with_stats=True),\n result.rankdf.index[larger], result.rankdf.index[smaller],\n result.rankdf.magnitude[larger], effect_size, decimal_places, result.rankdf.effect_size[larger]))\n elif result.omnibus == 'wilcoxon':\n larger = np.argmax(result.rankdf['median'].values)\n smaller = int(bool(larger - 1))\n if result.all_normal:\n print(\"Because we have only two populations and both populations are normal, we should use the t-test \"\n \"to determine differences between the mean values of the populations and report the mean value \"\n \"(M) and the standard deviation (SD) for each population. However, the user decided to force the \"\n \"use of the less powerful Wilcoxon signed rank test and we report the median (MD) and the median \"\n \"absolute devivation (MAD) for each population.\")\n else:\n if len(not_normal) == 1:\n notnormal_str = 'one of them is'\n else:\n notnormal_str = 'both of them are'\n print(\"Because we have only two populations and %s not normal, we use Wilcoxon's signed rank test to \"\n \"determine the differences in the central tendency and report the median (MD) and the median \"\n \"absolute deviation (MAD) for each population.\" % notnormal_str)\n if result.pvalue >= result.alpha:\n print(\"We failed to reject the null hypothesis (p=%.*f) of Wilcoxon's signed rank test that \"\n \"population %s is not greater than population %s . Therefore, we \"\n \"assume that there is no statistically significant difference between the medians of the \"\n \"populations.\" % (decimal_places, result.pvalue,\n create_population_string(result.rankdf.index[larger], with_stats=True),\n create_population_string(result.rankdf.index[smaller], with_stats=True)))\n else:\n print(\"We reject the null hypothesis (p=%.*f) of Wilcoxon's signed rank test that population \"\n \"%s is not greater than population %s. Therefore, we assume \"\n \"that the median of %s is \"\n \"significantly larger than the median value of %s with a %s effect size (%s=%.*f).\"\n % (decimal_places, result.pvalue,\n create_population_string(result.rankdf.index[larger], with_stats=True),\n create_population_string(result.rankdf.index[smaller], with_stats=True),\n result.rankdf.index[larger], result.rankdf.index[smaller],\n result.rankdf.magnitude[larger], effect_size, decimal_places, result.rankdf.effect_size[larger]))\n else:\n raise ValueError('Unknown omnibus test for difference in the central tendency: %s' % result.omnibus)\n else:\n if result.all_normal:\n if result.homoscedastic:\n print(\"We applied Bartlett's test for homogeneity and failed to reject the null hypothesis \"\n \"(p=%.*f) that the data is homoscedastic. Thus, we assume that our data is \"\n \"homoscedastic.\" % (decimal_places, result.pval_homogeneity))\n else:\n print(\"We applied Bartlett's test for homogeneity and reject the null hypothesis (p=%.*f) that the\"\n \"data is homoscedastic. Thus, we assume that our data is \"\n \"heteroscedastic.\" % (decimal_places, result.pval_homogeneity))\n\n if result.omnibus == 'anova':\n if result.all_normal and result.homoscedastic:\n print(\"Because we have more than two populations and all populations are normal and homoscedastic, we \"\n \"use repeated measures ANOVA as omnibus \"\n \"test to determine if there are any significant differences between the mean values of the \"\n \"populations. If the results of the ANOVA test are significant, we use the post-hoc Tukey HSD \"\n \"test to infer which differences are significant. We report the mean value (M) and the standard \"\n \"deviation (SD) for each population. Populations are significantly different if their confidence \"\n \"intervals are not overlapping.\")\n else:\n if result.all_normal:\n print(\n \"Because we have more than two populations and the populations are normal but heteroscedastic, \"\n \"we should use the non-parametric Friedman test \"\n \"as omnibus test to determine if there are any significant differences between the mean values \"\n \"of the populations. However, the user decided to force the use of \"\n \"repeated measures ANOVA as omnibus test which assume homoscedascity to determine if there are \"\n \"any significant difference between the mean values of the populations. If the results of the \"\n \"ANOVA test are significant, we use the post-hoc Tukey HSD test to infer which differences are \"\n \"significant. We report the mean value (M) and the standard deviation (SD) for each \"\n \"population. Populations are significantly different if their confidence intervals are not \"\n \"overlapping.\")\n else:\n if len(not_normal) == 1:\n notnormal_str = 'one of them is'\n else:\n notnormal_str = 'some of them are'\n print(\"Because we have more than two populations and the populations and %s not normal, \"\n \"we should use the non-parametric Friedman test \"\n \"as omnibus test to determine if there are any significant differences between the median \"\n \"values of the populations and report the median (MD) and the median absolute deviation \"\n \"(MAD). However, the user decided to force the use of repeated measures ANOVA as omnibus \"\n \"test which assume homoscedascity to determine if there are any significant difference \"\n \"between the mean values of the populations. If the results of the ANOVA test are \"\n \"significant, we use the post-hoc Tukey HSD test to infer which differences are \"\n \"significant. We report the mean value (M) and the standard deviation (SD) for each \"\n \"population. Populations are significantly different if their confidence intervals are not \"\n \"overlapping.\" % (notnormal_str))\n if result.pvalue >= result.alpha:\n print(\"We failed to reject the null hypothesis (p=%.*f) of the repeated measures ANOVA that there is \"\n \"a difference between the mean values of the populations %s. Therefore, we \"\n \"assume that there is no statistically significant difference between the mean values of the \"\n \"populations.\" % (decimal_places, result.pvalue,\n create_population_string(result.rankdf.index, with_stats=True)))\n else:\n print(\"We reject the null hypothesis (p=%.*f) of the repeated measures ANOVA that there is \"\n \"a difference between the mean values of the populations %s. Therefore, we \"\n \"assume that there is a statistically significant difference between the mean values of the \"\n \"populations.\" % (decimal_places, result.pvalue,\n create_population_string(result.rankdf.index, with_stats=True)))\n meanranks, names, groups = get_sorted_rank_groups(result, False)\n if len(groups) == 0:\n print(\"Based on post-hoc Tukey HSD test, we assume that all differences between the populations \"\n \"are significant.\")\n else:\n groupstrs = []\n for group_range in groups:\n group = range(group_range[0], group_range[1] + 1)\n if len(group) == 1:\n cur_groupstr = names[group[0]]\n elif len(group) == 2:\n cur_groupstr = \" and \".join([names[pop] for pop in group])\n else:\n cur_groupstr = \", \".join([names[pop] for pop in group[:-1]]) + \", and \" + names[group[-1]]\n groupstrs.append(cur_groupstr)\n print(\"Based post-hoc Tukey HSD test, we assume that there are no significant differences within \"\n \"the following groups: %s. All other differences are significant.\" % (\"; \".join(groupstrs)))\n print()\n elif result.omnibus == 'friedman':\n if result.all_normal and result.homoscedastic:\n print(\"Because we have more than two populations and all populations are normal and homoscedastic, we \"\n \"should use repeated measures ANOVA as omnibus \"\n \"test to determine if there are any significant differences between the mean values of the \"\n \"populations. However, the user decided to force the use of the less powerful Friedman test as \"\n \"omnibus test to determine if there are any significant differences between the mean values \"\n \"of the populations. We report the mean value (M), the standard deviation (SD) and the mean rank \"\n \"(MR) among all populations over the samples. Differences between populations are significant, \"\n \"if the difference of the mean rank is greater than the critical distance CD=%.*f of the Nemenyi \"\n \"test.\" % (decimal_places, result.cd))\n elif result.all_normal:\n print(\"Because we have more than two populations and the populations are normal but heteroscedastic, \"\n \"we use the non-parametric Friedman test \"\n \"as omnibus test to determine if there are any significant differences between the mean values \"\n \"of the populations. We use the post-hoc Nemenyi test to infer which differences are \"\n \"significant. We report the mean value (M), the standard deviation (SD) and the mean rank (MR) \"\n \"among all populations over the samples. Differences between populations are significant, if the \"\n \"difference of the mean rank is greater than the critical distance CD=%.*f of the Nemenyi \"\n \"test.\" % (decimal_places, result.cd))\n else:\n if len(not_normal) == 1:\n notnormal_str = 'one of them is'\n else:\n notnormal_str = 'some of them are'\n print(\"Because we have more than two populations and the populations and %s not normal, \"\n \"we use the non-parametric Friedman test \"\n \"as omnibus test to determine if there are any significant differences between the median values \"\n \"of the populations. We use the post-hoc Nemenyi test to infer which differences are \"\n \"significant. We report the median (MD), the median absolute deviation (MAD) and the mean rank \"\n \"(MR) among all populations over the samples. Differences between populations are significant, \"\n \"if the difference of the mean rank is greater than the critical distance CD=%.*f of the Nemenyi \"\n \"test.\" % (notnormal_str, decimal_places, result.cd))\n if result.pvalue >= result.alpha:\n print(\"We failed to reject the null hypothesis (p=%.*f) of the Friedman test that there is no \"\n \"difference in the central tendency of the populations %s. Therefore, we \"\n \"assume that there is no statistically significant difference between the median values of the \"\n \"populations.\" % (decimal_places, result.pvalue,\n create_population_string(result.rankdf.index, with_stats=True, with_rank=True)))\n else:\n print(\"We reject the null hypothesis (p=%.*f) of the Friedman test that there is no \"\n \"difference in the central tendency of the populations %s. Therefore, we \"\n \"assume that there is a statistically significant difference between the median values of the \"\n \"populations.\" % (decimal_places, result.pvalue,\n create_population_string(result.rankdf.index, with_stats=True, with_rank=True)))\n meanranks, names, groups = get_sorted_rank_groups(result, False)\n if len(groups) == 0:\n print(\"Based on the post-hoc Nemenyi test, we assume that all differences between the populations \"\n \"are significant.\")\n else:\n groupstrs = []\n for group_range in groups:\n group = range(group_range[0], group_range[1] + 1)\n if len(group) == 1:\n cur_groupstr = names[group[0]]\n elif len(group) == 2:\n cur_groupstr = \" and \".join([names[pop] for pop in group])\n else:\n cur_groupstr = \", \".join([names[pop] for pop in group[:-1]]) + \", and \" + names[group[-1]]\n groupstrs.append(cur_groupstr)\n print(\"Based on the post-hoc Nemenyi test, we assume that there are no significant differences \"\n \"within the following groups: %s. All other differences are \"\n \"significant.\" % (\"; \".join(groupstrs)))\n else:\n raise ValueError('Unknown omnibus test for difference in the central tendency: %s' % result.omnibus)", "def as_DF(self):\n\n gs_df = pd.DataFrame(self.P, columns=self.xvec, index=self.yvec)\n gs_df.columns.name = 'x'\n gs_df.index.name = 'y'\n\n return gs_df", "def generate(data: pd.DataFrame) -> np.ndarray:\n # TODO tests\n return data.apply(pd.to_numeric, errors='coerce')", "def result_array(self) -> np.ndarray:\n return np.array([r[\"time\"] for r in self.profile_result])", "def frame(self):\n microseconds = np.array(self.results['times']) * 1e6\n return pd.DataFrame(self.results, index=microseconds)", "def process_real(df):\n df_c = df.copy()\n df_c = df_c.apply(lambda s: H.to_quants(s, std=1), axis=1)\n df_c = df_c > 0\n if type(df.index) == pd.MultiIndex:\n df_c.index = map(lambda s: '_'.join(s), df_c.index)\n return df_c.T", "def __call__(self, results):\n if np.random.rand() > self.prob:\n return results\n self._adjust_contrast_img(results, self.factor)\n return results", "def generate_ratio_result(X_train, X_test, y_train, y_test):\n # generate the result for random samples\n ratio_result = pd.DataFrame(y_test, columns=['ratio_baseline'])\n\n model1 = linear_model.LinearRegression()\n model1.fit(X_train, y_train)\n y_pred = model1.predict(X_test)\n ratio_result['single_linear_regression'] = y_pred\n\n model2 = svm.SVR()\n model2.fit(X_train, y_train)\n y_pred = model2.predict(X_test)\n ratio_result['single_SVM'] = y_pred\n\n model3 = neural_network.MLPRegressor(solver='lbfgs', max_iter=1000, learning_rate_init=0.005)\n model3.fit(X_train, y_train)\n y_pred = model3.predict(X_test)\n ratio_result['single_NN'] = y_pred\n\n kernel = GPy.kern.Matern32(input_dim=6, ARD=True)\n m_full = GPy.models.SparseGPRegression(X_train, y_train.reshape(len(y_train), 1), kernel)\n m_full.optimize('bfgs')\n y_pred, y_var = m_full.predict(X_test)\n ratio_result['single_GP'] = y_pred\n\n return ratio_result", "def __call__(self, results):\n if np.random.rand() > self.prob:\n return results\n self._adjust_color_img(results, self.factor)\n return results", "def collect_scores(true_values, pred_df):\n csv_data = []\n for index in true_values.index.unique():\n if index not in pred_df.index:\n continue\n true_confirmed = true_values.loc[index][\"confirmed\"]\n pred_confirmed = pred_df.loc[index][\"prediction_confirmed\"]\n\n csv_data.append(\n [\n index[0],\n index[1],\n true_values.loc[index][\"geoname_code\"],\n ale(true_confirmed, pred_confirmed),\n ]\n )\n\n csv_data = pd.DataFrame(csv_data)\n csv_data.columns = [\"region_code\", \"date\", \"geoname_code\", \"cases_male\"]\n return csv_data.set_index([\"region_code\", \"geoname_code\", \"date\"])", "def make_results(self, X, y, verbose=True):\n import xarray as xr\n from sklearn.metrics import r2_score\n from sklearn.metrics import explained_variance_score\n feature_dim, mt_dim = get_feature_multitask_dim(X, y, self.sample_dim)\n rds = y.to_dataset(name='original').copy(deep=False, data=None)\n if sk_attr(self, 'coef_') and sk_attr(self, 'intercept_'):\n rds[feature_dim] = X[feature_dim]\n if mt_dim:\n rds['params'] = xr.DataArray(self.coef_, dims=[mt_dim,\n feature_dim])\n rds['intercept'] = xr.DataArray(self.intercept_, dims=[mt_dim])\n pvals = get_p_values(X, y, self.sample_dim)\n rds['pvalues'] = xr.DataArray(pvals, dims=[mt_dim,\n feature_dim])\n else:\n rds['params'] = xr.DataArray(self.coef_, dims=feature_dim)\n rds['intercept'] = xr.DataArray(self.intercept_)\n pvals = get_p_values(X, y, self.sample_dim)\n rds['pvalues'] = xr.DataArray(pvals, dims=feature_dim)\n elif sk_attr(self, 'feature_importances_'):\n if mt_dim:\n rds['feature_importances'] = xr.DataArray(self.\n feature_importances_,\n dims=[mt_dim,\n feature_dim])\n else:\n rds['feature_importances'] = xr.DataArray(self.\n feature_importances_,\n dims=[feature_dim])\n predict = self.predict(X)\n if mt_dim:\n predict = predict.rename({self.reshapes: mt_dim})\n rds['predict'] = predict\n r2 = r2_score(y, predict, multioutput='raw_values')\n rds['r2'] = xr.DataArray(r2, dims=mt_dim)\n else:\n rds['predict'] = predict\n r2 = r2_score(y, predict)\n rds['r2'] = xr.DataArray(r2)\n if feature_dim:\n r2_adj = 1.0 - (1.0 - rds['r2']) * (len(y) - 1.0) / \\\n (len(y) - X.shape[1])\n else:\n r2_adj = 1.0 - (1.0 - rds['r2']) * (len(y) - 1.0) / (len(y))\n rds['r2_adj'] = r2_adj\n rds['predict'].attrs = y.attrs\n rds['resid'] = y - rds['predict']\n rds['resid'].attrs = y.attrs\n rds['resid'].attrs['long_name'] = 'Residuals'\n rds['dw_score'] = (rds['resid'].diff(self.sample_dim)**2).sum(self.sample_dim,\n keep_attrs=True) / (rds['resid']**2).sum(self.sample_dim, keep_attrs=True)\n exp_var = explained_variance_score(y, rds['predict'].values)\n rds['explained_variance'] = exp_var\n\n# rds['corrcoef'] = self.corrcoef(X, y)\n # unstack dims:\n if mt_dim:\n rds = rds.unstack(mt_dim)\n # put coords attrs back:\n# for coord, attr in y.attrs['coords_attrs'].items():\n# rds[coord].attrs = attr\n# # remove coords attrs from original, predict and resid:\n# rds.original.attrs.pop('coords_attrs')\n# rds.predict.attrs.pop('coords_attrs')\n# rds.resid.attrs.pop('coords_attrs')\n all_var_names = [x for x in rds.data_vars.keys()]\n sample_types = [x for x in rds.data_vars.keys()\n if self.sample_dim in rds[x].dims]\n feature_types = [x for x in rds.data_vars.keys()\n if feature_dim in rds[x].dims]\n error_types = list(set(all_var_names) - set(sample_types +\n feature_types))\n rds.attrs['sample_types'] = sample_types\n rds.attrs['feature_types'] = feature_types\n rds.attrs['error_types'] = error_types\n rds.attrs['sample_dim'] = self.sample_dim\n rds.attrs['feature_dim'] = feature_dim\n # add X to results:\n rds['X'] = X\n if verbose:\n print('Producing results...Done!')\n return rds", "def query2df(query):\n df = pd.DataFrame(data = list(itertools.product([0, 1], repeat=len(query.variables))), columns=query.variables)\n df['p'] = query.values.flatten()\n return df", "def simulationDelayedTreatment(numTrials):\n \n \n results = []\n gutresults = []\n for a in range(300):\n results.append([])\n gutresults.append([])\n for b in range(numTrials):\n viruses = []\n for c in range(10000):\n resistances = {'guttagonol': False}\n vir = ResistantVirus(.1, .05, resistances, .005)\n viruses.append(vir)\n \n Mark = TreatedPatient(viruses, 1000)\n \n for d in range(150):\n pop = Mark.update()\n results[d].append(pop)\n gutpop = Mark.getResistPop(['guttagonol'])\n gutresults[d].append(gutpop)\n \n Mark.addPrescription('guttagonol')\n \n for e in range(150, 300):\n newpop = Mark.update()\n results[e].append(newpop)\n newgutpop = Mark.getResistPop(['guttagonol'])\n gutresults[e].append(newgutpop)\n \n FinalResults = results[299]\n print len(FinalResults)\n \n \n \n pylab.figure(5)\n pylab.hist(FinalResults, bins = 10)\n pylab.title('Simulation with Drugs - Frequency')\n pylab.xlabel('Virus Population')\n pylab.ylabel('Number of Trials with Population') \n pylab.legend()\n pylab.show()", "def expected_df():\n return pd.DataFrame(\n {\n \"growth\": [0.873922, 0.814298, 0.0],\n \"gene\": [\"b2935\", \"b0723\", \"b0451\"],\n \"status\": [\"optimal\", \"optimal\", \"optimal\"],\n }\n )", "def make_results_plot( df, k, reg ):\n\tuid = smalldf['user_id'].values\n\tbid = smalldf['business_id'].values\n\tactual = smalldf['stars'].values\n\tpredicted = np.zeros( len(actual) )\n\tcounter = 0\n\tfor biz_id, user_id in izip( bid, uid ):\n\t\tpredicted[counter] = rating( biz_id, user_id, k = k, reg = reg ) \n\t\tcounter = counter + 1\n\t# compare_results( actual, predicted )", "def test_sample_posterior_predictive():\n df = pd.DataFrame(dict(x=[1.0, 2.0, 3.0, 4.0]))\n\n class Model(Poisson):\n dv = \"y\"\n features = dict(x=dict(transformer=lambda x: x.x, prior=dist.Normal(0, 1)))\n\n config = {\"samples\": {\"x\": onp.ones((10, 100000))}}\n model = Model.from_dict(config)\n pred = model.sample_posterior_predictive(df, rng_key=onp.array([0, 0]))\n log_pred = onp.log(pred).round(2)\n assert df.x.astype(\"float32\").equals(log_pred.astype(\"float32\"))", "def result_to_dataframe(data):\n letters, statistics = zip(*data)\n dataframe = pd.DataFrame(data=list(statistics), index=letters, columns=['SUM', 'SUM_OF_SQUARES', 'MAX', 'MIN', 'COUNT']).sort_index()\n dataframe['MEAN'] = dataframe['SUM'] / dataframe['COUNT']\n dataframe['VARIANCE'] = dataframe['SUM_OF_SQUARES'] / dataframe['COUNT'] - dataframe['MEAN']**2\n dataframe['STANDARD_DEVIATION'] = dataframe['VARIANCE']**0.5\n logging.info(\"Total datapoints read: {}.\".format(dataframe['COUNT'].sum()))\n return dataframe", "def _get_selection_probabilities(self):\r\n probabilities = np.arange(1, self.population_size+1, dtype=float)[::-1]\r\n probabilities /= probabilities.sum()\r\n return probabilities", "def dataframe(self):\n return self.generator.dataframe", "def _agg_proportions(df, members=None):\n p = df.copy()\n if members is not None:\n p = p.iloc[members]\n p = p.T.assign(\n group=pd.factorize(p.columns)[0],\n label=pd.factorize(p.columns)[-1],\n value=p.sum(), #/ p.sum().sum() * p.shape[0],\n row_count=p.shape[0]\n )\n p = p[['label', 'group', 'value', 'row_count']]\n p.columns = ['label', 'group', 'value', 'row_count']\n p = list(p.T.to_dict().values())\n return p", "def simulationTwoDrugsDelayedTreatment(numTrials):\n results = []\n gutresults = []\n \n for a in range(375):\n results.append([])\n gutresults.append([])\n \n for b in range(numTrials):\n viruses = []\n for c in range(100):\n resistances = {'guttagonol': False, 'grimpex': False}\n vir = ResistantVirus(.1, .05, resistances, .02)\n viruses.append(vir)\n \n Mark = TreatedPatient(viruses, 1000)\n \n for d in range(150):\n pop = Mark.update()\n results[d].append(pop)\n gutpop = Mark.getResistPop(['guttagonol'])\n gutresults[d].append(gutpop)\n \n Mark.addPrescription('guttagonol')\n \n for e in range(150, 225):\n newpop = Mark.update()\n results[e].append(newpop)\n newgutpop = Mark.getResistPop(['guttagonol'])\n gutresults[e].append(newgutpop)\n \n Mark.addPrescription('grimpex')\n \n for f in range(225, 375):\n newpop = Mark.update()\n results[f].append(newpop)\n \n \n FinalResults = results[374]\n print len(FinalResults)\n \n \n pylab.figure(6)\n pylab.hist(FinalResults, bins = 10)\n pylab.title('300 day delay')\n pylab.xlabel('Virus Population')\n pylab.ylabel('Number of Trials with Population') \n pylab.show()", "def edit_probs(result):\n for i in range(TOP_E):\n p = result.data[i][1]\n p = round(p, 4)\n # p_str = str(p)[1:]\n result.data[i][1] = p\n\n return result", "def nullScore2pvalTable(scores):\n scores = pd.Series(scores)\n score_counts = scores.value_counts().sort_index(ascending=True)\n score_cum_counts = np.cumsum(score_counts.values[::-1])[::-1]\n count_sum = np.sum(score_counts)\n pval_array = score_cum_counts / count_sum\n pval_series = pd.Series(pval_array, index=score_counts.index)\n return pval_series", "def to_frame(self):\n # Create a set of dictionaries/lists for each column\n data = dict([(i_var.name, []) for i_var in self.inputs])\n data.update({self.OUTPUT_LABEL: [], self.INPUT_LABEL: [], self.name: []})\n\n # A very ugly loop to produce all the probabilities in a nice way.\n # Note that this just reproduces what is already in `self.lookup`.\n # Honestly, I just haven't thought of a better way to get nice output.\n for i_index, i_state in enumerate(self.input_states):\n for o_var, results in zip(self.outputs, self.per_state_results):\n for o_state, o_p in enumerate(results[i_index]):\n for i_var, s in zip(self.inputs, i_state):\n data[i_var.name].append(s)\n data[self.OUTPUT_LABEL].append(o_var.name)\n data[self.INPUT_LABEL].append(o_state)\n data[self.name].append(o_p)\n all_data = pd.DataFrame(data=data)\n\n # The magnificent pivot table function does all the work\n return pd.pivot_table(data=all_data, values=[self.name],\n index=[i_var.name for i_var in self.inputs],\n columns=[self.OUTPUT_LABEL, self.INPUT_LABEL])", "def make_prediction(*,X:pd.DataFrame) -> pd.Series:\n\n\ty_pred = pipe.predict_proba(X)[:,1]\n\t\n\t_logger.info(\n\t\tf'Making predictions with model version: {_version} '\n\t\tf'Input Data: {X}'\n\t\tf'Predictions: {y_pred}'\n\t)\n\tresults = {\"predictions\": y_pred, \"version\": _version}\n\treturn results", "def get_repsample(param_df, dataset='2013'):\n # Load data\n population_data = load_dataset(dataset=dataset)\n norm_population_data = scale(population_data)\n sample_ind = np.array([])\n covtot = (np.cov(norm_population_data, rowvar=0)**2).sum()\n covres_list = [covtot]\n # Add samples recursively\n for i in range(population_data.shape[0]):\n sample_ind = add_sample(norm_population_data, sample_ind)\n covres_list.append(calculate_covres(norm_population_data, sample_ind))\n # Plot the convergence rate\n rel_err = np.array(covres_list) / covtot\n plot_convergence(rel_err)\n # Get actual data\n sample_data = population_data[sample_ind[:6], :]\n np.savetxt('./csvs/repsample.csv', sample_data, delimiter=',')\n # Make the dataframe version for paper writing\n columns = ['tau1', 'tau2', 'g1', 'g2', 'ginf', 'mu', 'alpha', 'thickness']\n sample_data_df = pd.DataFrame(sample_data, columns=columns)\n sample_data_df = sample_data_df[['thickness', 'mu', 'alpha',\n 'tau1', 'tau2', 'g1', 'g2', 'ginf']]\n sample_data_df.index += 1 # Index start from 1 for biologists\n sample_data_df.to_csv('./csvs/repsample_df.csv')\n return sample_data_df", "def reproduce(population:list):\n new_gen = []\n probs = []\n for p in population:\n probs.append(p[3])\n while len(new_gen) != len(probs):\n parents = selection(probs)\n son,eval_son,daughter,eval_daughter = xo(population[parents[0]][0],population[parents[0]][1], population[parents[1]][0],population[parents[1]][1],2)\n new_gen.append([son,eval_son])\n new_gen.append([daughter,eval_daughter])\n # mutation\n # lets say 5% of the population gets mutated\n how_many_to_mutate = int(NUM_OF_CHROMOZOMS * (1/100))\n t = [i for i in range(NUM_OF_CHROMOZOMS)]\n # choose percent of the population randomly, uniformly\n indices_to_mutate = choice(t, how_many_to_mutate, replace=False)\n for i in range(len(indices_to_mutate)):\n mutate(new_gen[indices_to_mutate[i]])\n\n evaluateAll(new_gen)\n return new_gen", "def predict(self,X_test):\r\n self.X_test_data=X_test.reset_index(drop=True)\r\n \r\n temp=pd.DataFrame()\r\n count=0\r\n for each_model in self.fitted_model:\r\n count=count+1\r\n \r\n temp_str='model_'+str(count)+'_predictions'\r\n temp[temp_str]=each_model.predict(self.X_test_data)\r\n \r\n temp_str='model_'+str(count)+'_probablities'\r\n out = each_model.predict_proba(self.X_test_data)\r\n temp[temp_str]=[max(each) for each in out]\r\n \r\n \"\"\"self.outcomes=temp_df\r\n return self.out()\"\"\"\r\n \r\n \r\n \r\n \"\"\"columns list form all the predictions and probabalities individually\"\"\"\r\n pred_list=[x for x in temp.columns if x.split('_')[-1]=='predictions']\r\n prob_list=[x for x in temp.columns if x.split('_')[-1]=='probablities']\r\n \r\n \"\"\"getting max probablity column name out of alla the probablity\"\"\"\r\n \r\n max_prob=temp[prob_list].max(axis=1)\r\n max_prob_col_name=temp[prob_list].idxmax(axis=1)\r\n \r\n \"\"\"getting final prediction column name for each instance using max probabaility column name\"\"\"\r\n final_pred_col=[]\r\n for every in max_prob_col_name:\r\n final_pred_col.append('_'.join(every.split('_')[:-1])+'_predictions')\r\n \r\n \"\"\"final_pred=[]\r\n for each in range(len(final_pred_col)):\r\n final_pred.append(temp[pred_list].loc[each][final_pred_col[i]])\"\"\"\r\n \r\n final_pred=[]\r\n for each in range(len(final_pred_col)):\r\n final_pred.append(temp[pred_list].loc[each][final_pred_col[each]])\r\n \r\n \r\n \r\n self.final_preditions=pd.DataFrame(final_pred,columns=['prediction'])\r\n self.final_probablaties=pd.DataFrame(max_prob,columns=['probablity'])\r\n \r\n \r\n return (pd.concat([self.X_test_data, self.final_preditions,self.final_probablaties], axis=1, sort=True))", "def make_discrete(n_rows_population=500, \n n_rows_peripheral=125000, \n random_state=None,\n aggregation=aggregations.Count):\n random = np.random.RandomState(random_state)\n\n population_table = pd.DataFrame()\n population_table[\"column_01\"] = random.randint(0, 10, n_rows_population).astype(np.str)\n population_table[\"join_key\"] = np.arange(n_rows_population)\n population_table[\"time_stamp_population\"] = random.rand(n_rows_population)\n\n peripheral_table = pd.DataFrame()\n peripheral_table[\"column_01\"] = random.randint(-11, 11, n_rows_peripheral)\n peripheral_table[\"join_key\"] = random.randint(0, n_rows_population, n_rows_peripheral) \n peripheral_table[\"time_stamp_peripheral\"] = random.rand(n_rows_peripheral)\n\n # Compute targets\n temp = peripheral_table.merge(\n population_table[[\"join_key\", \"time_stamp_population\"]],\n how=\"left\",\n on=\"join_key\"\n )\n\n # Apply some conditions\n temp = temp[\n (temp[\"time_stamp_peripheral\"] <= temp[\"time_stamp_population\"]) &\n (temp[\"column_01\"] > 0.0)\n ]\n\n # Define the aggregation\n temp = _aggregate(temp, aggregation, \"column_01\", \"join_key\")\n\n temp = temp.rename(index=str, columns={\"column_01\": \"targets\"})\n\n population_table = population_table.merge(\n temp,\n how=\"left\",\n on=\"join_key\"\n )\n\n del temp\n\n population_table = population_table.rename(\n index=str, columns={\"time_stamp_population\": \"time_stamp\"})\n\n peripheral_table = peripheral_table.rename(\n index=str, columns={\"time_stamp_peripheral\": \"time_stamp\"})\n\n # Replace NaN targets with 0.0 - target values may never be NaN!.\n population_table.targets = np.where(\n np.isnan(population_table['targets']), \n 0, \n population_table['targets'])\n\n return population_table, peripheral_table", "def get_simprop_df(param_df):\n # Creat the thicknesses and the alphas\n simprop_dict = {}\n prop_list = ['thickness', 'alpha', 'ginf']\n for prop in prop_list:\n simprop_dict[prop] = get_simprop(param_df[prop])\n # Manually adjust ginf and g1, g2\n simprop_dict['ginf'][0] = .1\n p = np.polyfit(param_df['ginf'], param_df['g1'], 1)\n # Calculate p-value for this regression\n print('Stats of ginf and g1 regression:',\n linregress(param_df['ginf'], param_df['g1']))\n simprop_dict['g1'] = np.polyval(p, simprop_dict['ginf'])\n simprop_dict['g2'] = 1. - simprop_dict['g1'] - simprop_dict['ginf']\n # Add sylgard elasticity and thickness\n sylgardh = 10.1348\n sylgarde = 1.05e5\n simprop_dict['sylgardh'] = sylgardh * np.r_[.5:1.5:5j]\n simprop_dict['sylgarde'] = sylgarde * np.r_[.5:1.5:5j]\n simprop_array = np.c_[\n simprop_dict['thickness'], simprop_dict['alpha'],\n simprop_dict['sylgardh'], simprop_dict['sylgarde'],\n simprop_dict['g1'], simprop_dict['g2'], simprop_dict['ginf']]\n # Save to csv for abaqus scripts to load\n np.savetxt('./csvs/simprop.csv', simprop_array, delimiter=',')\n # Save dataframe to excel for the paper\n simprop_df = pd.DataFrame(simprop_dict)\n simprop_df.to_excel('./csvs/simprop.xlsx')\n return simprop_df", "def build_predictions_matrix(self):\n df = self._build_data_frame()\n\n self._train_model(df)\n\n test_df, smoothed_predictions = self._get_predictions(df)\n\n predictions_dict = {}\n for (row_index, row), prediction in zip(test_df.iterrows(), smoothed_predictions):\n predictions_dict[row_index] = prediction\n\n self.link_probability_matrix = self._convert_to_matrix(predictions_dict)\n print(predictions_dict)\n # print(self.link_probability_matrix.shape)\n # print(self.link_probability_matrix)", "def __call__(self, results):\n if np.random.rand() > self.prob:\n return results\n offset = random_negative(self.offset, self.random_negative_prob)\n self._translate_img(results, offset, self.direction)\n return results", "def create_loadshape_pmult_dataframe_for_simulation(settings: SimulationSettingsModel):\n df = create_loadshape_pmult_dataframe(settings)\n simulation_index = create_datetime_index_from_settings(settings)\n return df.loc[simulation_index]", "def df_to_array(datasample):\r\n return np.array(datasample)", "def make_numerical(n_rows_population=500, \n n_rows_peripheral=125000, \n random_state=None,\n aggregation=aggregations.Count):\n random = np.random.RandomState(random_state)\n\n population_table = pd.DataFrame()\n population_table[\"column_01\"] = random.rand(n_rows_population) * 2.0 - 1.0\n population_table[\"join_key\"] = np.arange(n_rows_population)\n population_table[\"time_stamp_population\"] = random.rand(n_rows_population)\n\n peripheral_table = pd.DataFrame()\n peripheral_table[\"column_01\"] = random.rand(n_rows_peripheral) * 2.0 - 1.0\n peripheral_table[\"join_key\"] = random.randint(0, n_rows_population, n_rows_peripheral) \n peripheral_table[\"time_stamp_peripheral\"] = random.rand(n_rows_peripheral)\n\n # Compute targets\n temp = peripheral_table.merge(\n population_table[[\"join_key\", \"time_stamp_population\"]],\n how=\"left\",\n on=\"join_key\"\n )\n\n # Apply some conditions\n temp = temp[\n (temp[\"time_stamp_peripheral\"] <= temp[\"time_stamp_population\"]) &\n (temp[\"time_stamp_peripheral\"] >= temp[\"time_stamp_population\"] - 0.5)\n ]\n\n # Define the aggregation\n temp = _aggregate(temp, aggregation, \"column_01\", \"join_key\")\n\n temp = temp.rename(index=str, columns={\"column_01\": \"targets\"})\n\n population_table = population_table.merge(\n temp,\n how=\"left\",\n on=\"join_key\"\n )\n\n del temp\n\n population_table = population_table.rename(\n index=str, columns={\"time_stamp_population\": \"time_stamp\"})\n\n peripheral_table = peripheral_table.rename(\n index=str, columns={\"time_stamp_peripheral\": \"time_stamp\"})\n\n # Replace NaN targets with 0.0 - target values may never be NaN!.\n population_table.targets = np.where(\n np.isnan(population_table['targets']), \n 0, \n population_table['targets'])\n\n return population_table, peripheral_table", "def _format_prediction(predictions, target_column, index=None, dtype=np.float64) -> pd.DataFrame:\n # `target_column` indicates multidimensional output, but predictions are one-dimensional\n if len(target_column) > 1:\n if (len(predictions.shape) == 1) or (predictions.shape[1] == 1):\n predictions = pd.get_dummies(predictions).values\n\n return pd.DataFrame(data=predictions, index=index, columns=target_column, dtype=dtype)", "def impute_df(df, algorithm):\n return pd.DataFrame(data=algorithm.fit_transform(df), columns=df.columns, index=df.index)", "def _reformat_results(self, results, strategy='BE_LOOP'):\n if self._verbose:\n print('Strategy to use: {}'.format(strategy))\n # Create an empty array to store the guess parameters\n if self._verbose:\n print('Raw results and compound Loop vector of shape {}'.format(len(results)))\n\n if strategy in ['BE_LOOP']:\n temp = np.array([np.hstack([result.x, result.fun]) for result in results])\n temp = stack_real_to_compound(temp, loop_fit32)\n return temp", "def preprocess_targets(california_housing_dataframe):\r\n output_targets = pd.DataFrame()\r\n # Scale the target to be in units of thousands of dollars.\r\n output_targets[\"median_house_value\"] = (\r\n california_housing_dataframe[\"median_house_value\"] / 1000.0)\r\n return output_targets", "def get_values_from_data(result_df, labels):\n values = []\n\n for _, column in enumerate(result_df.columns):\n values.append(result_df[column].Incorrect)\n\n data = {\"labels\": labels, \"values\": values}\n local_df = pd.DataFrame(data)\n\n values = local_df[\"values\"].astype(float)\n local_df[\"values\"] = (values * 100).astype(int)\n return local_df", "def summarize_ancestral_prob_df(df):\n df = df.groupby(['pattern', 'allele_count_a', 'allele_count_b',\n 'anc_species_state', 'anc_pop_state',\n 'anc_species_pop']) \\\n .apply(lambda x: x['joint_prob'].sum()) \\\n .reset_index() \\\n .set_index(['pattern', 'allele_count_a', 'allele_count_b'])\n df.columns = ['anc_species_state', 'anc_pop_state',\n 'anc_species_pop', 'prob']\n return df", "def resultsToArray(self):\n data = {}\n for item in self.data_array:\n data[item[0]] = [item[1]]\n return data", "def transform(self, X: np.ndarray) -> pd.core.frame.DataFrame:\r\n return pd.DataFrame(X, columns=self.attributes_names)", "def Statsmodels_PValues(name, results, Explanatory, NumDecimal):\n\n PValues = results.pvalues\n PValues = [str(round(item, NumDecimal)) for item in PValues]\n for item in range(0, len(Explanatory.columns)):\n PValues[item + 1] = str(PValues[item + 1]) + ' ' + str(Explanatory.columns[item])\n PValues[0] = str(PValues[0])\n PValues = ', '.join(PValues)\n\n return PValues", "def write_one_phenotype(result_df, phenotype_name, gene_name_list, run_parameters):\n result_df.to_csv(get_output_file_name(run_parameters, 'results_directory', phenotype_name, 'viz'), header=True, index=False, sep='\\t')\n\n download_result_df = pd.DataFrame(data=None, index=None, columns=[phenotype_name])\n download_result_df[phenotype_name] = result_df['Gene_ENSEMBL_ID']\n download_result_df.to_csv(\n get_output_file_name(run_parameters, 'results_tmp_directory', phenotype_name, 'download'), header=True, index=False, sep='\\t')\n\n top_genes = download_result_df.values[: run_parameters['top_beta_of_sort']]\n update_orig_result_df = pd.DataFrame(np.in1d(gene_name_list, top_genes).astype(int), index=gene_name_list, columns=[phenotype_name])\n update_orig_result_df.to_csv(\n get_output_file_name(run_parameters, 'results_tmp_directory', phenotype_name, 'original'), header=True, index=True, sep='\\t')", "def prod_test(dataframe):\n dataframe.columns = [\"word_values\"]\n dataframe = dataframe.dropna(subset=['word_values'])\n return su.pipe().transform(dataframe)", "def predict_values_from_proba(proba_res, lab_res):\n\n nb_samples = proba_res[0].shape[0]\n nb_attribs = len(proba_res)\n predictions = init_predictions(nb_samples, nb_attribs)\n\n assert nb_attribs == len(lab_res)\n for i in range(nb_attribs):\n my_result = lab_res[i].take(np.argmax(proba_res[i], axis=1), axis=0)\n np.rint(my_result)\n predictions[:, i] = my_result\n\n return predictions.astype(int)" ]
[ "0.6117095", "0.58314747", "0.58246684", "0.5775569", "0.5740103", "0.562746", "0.5493443", "0.5482421", "0.5470787", "0.5464433", "0.54462564", "0.5428806", "0.54074293", "0.5386283", "0.5379246", "0.5360976", "0.53533596", "0.53266215", "0.5307653", "0.5297774", "0.5293929", "0.52835184", "0.5278332", "0.52485234", "0.5232543", "0.5231443", "0.5225568", "0.5213808", "0.52078795", "0.52022815", "0.51997215", "0.5187242", "0.5186976", "0.5184314", "0.5175554", "0.5160705", "0.5150955", "0.5138604", "0.5134026", "0.5129429", "0.51084846", "0.5091845", "0.5088146", "0.50818104", "0.50800717", "0.5077786", "0.50664", "0.50650334", "0.50585246", "0.505835", "0.505468", "0.5050589", "0.5045476", "0.50341", "0.50192463", "0.5015598", "0.50131774", "0.5008602", "0.5006525", "0.5001016", "0.50009984", "0.49973482", "0.49887663", "0.49846175", "0.49833485", "0.49802247", "0.49787804", "0.4978192", "0.4968255", "0.49680862", "0.49582157", "0.49507904", "0.49458456", "0.49409023", "0.49390528", "0.49382713", "0.49307567", "0.49241576", "0.4923283", "0.49171942", "0.49024552", "0.48986185", "0.48974147", "0.48953354", "0.48945367", "0.48938745", "0.48780504", "0.4875723", "0.48712704", "0.4868681", "0.4868665", "0.48682618", "0.4865395", "0.48616743", "0.4850535", "0.484776", "0.48471293", "0.48415497", "0.48402032", "0.4836325" ]
0.51801986
34
Takes a pandas dataframe which contains the proportions of language classes over generations and plots timecourses
def plot_timecourse_language_types(lang_class_prop_over_gen_df, title, file_path, file_name): sns.set_style("darkgrid") sns.set_context("talk") fig, ax = plt.subplots() if len(possible_form_lengths) == 1: palette = sns.color_palette(["black", "red", "green", "grey"]) else: palette = sns.color_palette(["black", sns.color_palette("colorblind")[3], sns.color_palette("colorblind")[1], sns.color_palette("colorblind")[2], sns.color_palette("colorblind")[9], sns.color_palette("colorblind")[0], sns.color_palette("colorblind")[7]]) sns.lineplot(x="generation", y="proportion", hue="class", data=lang_class_prop_over_gen_df, palette=palette) # sns.lineplot(x="generation", y="proportion", hue="class", data=lang_class_prop_over_gen_df, palette=palette, ci=95, err_style="bars") plt.tick_params(axis='both', which='major', labelsize=18) plt.tick_params(axis='both', which='minor', labelsize=18) plt.ylim(-0.05, 1.05) plt.title(title, fontsize=22) plt.xlabel('Generation', fontsize=20) plt.ylabel('Mean proportion', fontsize=20) handles, labels = ax.get_legend_handles_labels() labels = ['D', 'H', 'H+Div.', 'C', 'C+Red.-part', 'C+Red.-whole', 'O'] # ax.legend(handles=handles[1:], labels=labels[1:]) ax.legend(handles=handles, labels=labels) plt.tight_layout() plt.savefig(file_path + "Timecourse_plot_lang_types_" + file_name + ".png") plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_word_class_pr_genre(df):\n df['nouns'] = df['nouns'] * 100\n df['verbs'] = df['verbs'] * 100\n df['adverbs'] = df['adverbs'] * 100\n # plotting nouns\n plotting_helper_method('nouns', 'genre', df)\n plt.title('Amount of nouns pr song pr. genre')\n plt.xlabel(\"Amount of nouns in each song\")\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/nouns_pr_genre_plot')\n\n # plotting verbs\n plotting_helper_method('verbs', 'genre', df)\n plt.title('Amount of verbs pr song pr. genre')\n plt.xlabel('Amount of verbs in each song')\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/verbs_pr_genre_plot')\n\n # plotting adverbs\n plotting_helper_method('adverbs', 'genre', df)\n plt.title('Amount of adverbs pr song pr. genre')\n plt.xlabel('Amount of adverbs in each song')\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/adverbs_pr_genre_plot')", "def dataframe_to_language_stats(dataframe, n_runs, n_batches, n_gens, possible_form_lengths):\n if len(possible_form_lengths) == 1:\n n_language_classes = 4\n else:\n n_language_classes = 7 #TODO: or should this be 6 (i.e. collapsing the two different reduplication strategies?)\n proportion_column = np.array(dataframe['proportion'])\n proportion_column_as_results = proportion_column.reshape((n_runs*n_batches, n_gens, n_language_classes))\n return proportion_column_as_results", "def plot_pie_charts_of_word_class_distribution(df):\n genre_dict = {\n 'g':'Rock',\n 'b':'Hip-Hop',\n 'r':'Pop'\n }\n for _, genre in genre_dict.items():\n filtered_df = df[df['genre'] == genre]\n \n # plotting circle diagram for the specific genre\n avg_percentage_nouns = filtered_df['nouns'].mean()\n avg_percentage_verbs = filtered_df['verbs'].mean()\n avg_percentage_adverbs = filtered_df['adverbs'].mean()\n\n total = avg_percentage_nouns + avg_percentage_nouns + avg_percentage_nouns\n nouns = avg_percentage_nouns / total * 100\n verbs = avg_percentage_verbs / total * 100\n adverbs = avg_percentage_adverbs / total * 100\n\n # Pie chart\n labels = ['Nouns', 'Verbs', 'Adverbs']\n sizes = [nouns, verbs, adverbs]\n\n _, ax1 = plt.subplots()\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%',\n shadow=True, startangle=90)\n # Equal aspect ratio ensures that pie is drawn as a circle\n ax1.axis('equal') \n plt.tight_layout()\n plt.title(f'Circle diagram of the genre \"{genre}\"s average word classes distribution')\n plt.show()\n # plt.savefig(f'src/visualization/feature_plots/{genre}_word_class_distribution')", "def plot_barplot_language_types(lang_class_prop_over_gen_df, title, file_path, file_name, n_runs, n_batches, n_gens, gen_start, lang_class_baselines_all, lang_class_baselines_fully_expressive, possible_form_lengths):\n\n sns.set_style(\"darkgrid\")\n sns.set_context(\"talk\")\n\n if len(possible_form_lengths) == 1:\n n_language_classes = 4\n else:\n n_language_classes = 7 #TODO: or should this be 6 (i.e. collapsing the two different reduplication strategies?)\n\n proportion_column_as_results = dataframe_to_language_stats(lang_class_prop_over_gen_df, n_runs, n_batches, n_gens, possible_form_lengths)\n\n proportion_column_from_start_gen = proportion_column_as_results[:, gen_start:]\n\n proportion_column_from_start_gen = proportion_column_from_start_gen.flatten()\n\n runs_column_from_start_gen = []\n for i in range(n_runs*n_batches):\n for j in range(gen_start, n_gens):\n for k in range(n_language_classes):\n runs_column_from_start_gen.append(i)\n runs_column_from_start_gen = np.array(runs_column_from_start_gen)\n\n generation_column_from_start_gen = []\n for i in range(n_runs*n_batches):\n for j in range(gen_start, n_gens):\n for k in range(n_language_classes):\n generation_column_from_start_gen.append(j)\n generation_column_from_start_gen = np.array(generation_column_from_start_gen)\n\n class_column_from_start_gen = []\n for i in range(n_runs*n_batches):\n for j in range(gen_start, n_gens):\n if n_language_classes == 4:\n class_column_from_start_gen.append('degenerate')\n class_column_from_start_gen.append('holistic')\n class_column_from_start_gen.append('compositional')\n class_column_from_start_gen.append('other')\n elif n_language_classes == 7:\n class_column_from_start_gen.append('D')\n class_column_from_start_gen.append('H')\n class_column_from_start_gen.append('H+Div.')\n class_column_from_start_gen.append('C')\n class_column_from_start_gen.append('C+Red.-part')\n class_column_from_start_gen.append('C+Red.-whole')\n class_column_from_start_gen.append('O')\n\n new_data_dict = {'run': runs_column_from_start_gen,\n 'generation': generation_column_from_start_gen,\n 'proportion': proportion_column_from_start_gen,\n 'class': class_column_from_start_gen}\n\n lang_class_prop_over_gen_df_from_starting_gen = pd.DataFrame(new_data_dict)\n\n if len(possible_form_lengths) == 1:\n palette = sns.color_palette([\"black\", \"red\", \"green\", \"grey\"])\n else:\n palette = sns.color_palette([\"black\",\n sns.color_palette(\"colorblind\")[3],\n sns.color_palette(\"colorblind\")[1],\n sns.color_palette(\"colorblind\")[2],\n sns.color_palette(\"colorblind\")[9],\n sns.color_palette(\"colorblind\")[0],\n sns.color_palette(\"colorblind\")[7]])\n\n sns.barplot(x=\"class\", y=\"proportion\", data=lang_class_prop_over_gen_df_from_starting_gen, palette=palette)\n\n # plt.axhline(y=lang_class_baselines_all[0], xmin=0.0, xmax=0.25, color='k', linestyle='--', linewidth=2)\n # plt.axhline(y=lang_class_baselines_all[1], xmin=0.25, xmax=0.5, color='k', linestyle='--', linewidth=2)\n # plt.axhline(y=lang_class_baselines_all[2], xmin=0.5, xmax=0.75, color='k', linestyle='--', linewidth=2)\n # plt.axhline(y=lang_class_baselines_all[3], xmin=0.75, xmax=1.0, color='k', linestyle='--', linewidth=2)\n #\n # if title == 'Mutual Understanding Only' or title == 'Minimal Effort & Mutual Understanding':\n # plt.axhline(y=lang_class_baselines_fully_expressive[0], xmin=0.25, xmax=0.5, color='0.6', linestyle='--', linewidth=2)\n # plt.axhline(y=lang_class_baselines_fully_expressive[1], xmin=0.5, xmax=0.75, color='0.6', linestyle='--', linewidth=2)\n\n plt.tick_params(axis='both', which='major', labelsize=18)\n plt.tick_params(axis='both', which='minor', labelsize=18)\n plt.ylim(-0.05, 1.05)\n plt.title(title, fontsize=22)\n # plt.xlabel('Language class')\n plt.xlabel('', fontsize=20)\n plt.ylabel('Mean proportion', fontsize=20)\n plt.tight_layout()\n\n if holistic_without_partial_meaning is True:\n plt.savefig(file_path + \"Barplot_lang_types_\" + file_name + \"_burn_in_\" + str(gen_start) + \".png\")\n else:\n plt.savefig(file_path + \"Barplot_lang_types_\" + file_name + \"_burn_in_\" + str(gen_start) + \"_NEW.png\")\n plt.show()", "def leitner_proportions(df):\n denom = df.shape[0]\n prop_dict = {}\n\n for i in range(1,6):\n df_i = df[df['comfort_level'] == i]\n numer = df_i.shape[0]\n prop_dict[i] = numer / denom\n\n prop_df = pd.DataFrame.from_dict([prop_dict], orient='columns') \n\n prop_df = prop_df.T.rename(columns={0:'proportion'}) \n \n return prop_df", "def getFigBysubClass(df, path, nameClass):\n\ttmp = pd.DataFrame()\n\ttmp = tmp.append(df)\n\tdicoNbTrClass = countTranscript.getFig3Percent(path)\n\tdicoNbTrBt = countTranscript.getFig5Percent(path)\n\tdel tmp['nuclA']\n\tdel tmp['nuclT']\n\tdel tmp['nuclN']\n\tdel tmp['Type']\n\tclassDf = pd.DataFrame()\n\tclassDftmp = tmp[ tmp.Class == nameClass]\n\tgroups = classDftmp.groupby('Biotype')\n\tfor name, group in groups:\n\t\tgroupFilter = group[ group.Location == 'intron' ]\n\t\tgroupFilter = groupFilter.append( group[ group.Location == 'exon' ])\n\t\trow = sumSubTable(groupFilter, name)\n\t\trow['Biotype'] = name\n\t\trow['Class'] = nameClass\n\t\tif name not in dicoNbTrBt['Tot']:\n\t\t\tdicoNbTrBt['Tot'][name] = 0\n\t\tif name not in dicoNbTrBt['Wt']:\n\t\t\tdicoNbTrBt['Wt'][name] = 0\n\t\tif name not in dicoNbTrBt['Shuf']:\n\t\t\tdicoNbTrBt['Shuf'][name] = 0\n\t\trow['nbTr'] = dicoNbTrBt['Tot'][name]\n\t\trow['NbTrpG4Wt'] = dicoNbTrBt['Wt'][name]\n\t\trow['NbTrpG4Shuf'] = dicoNbTrBt['Shuf'][name]\n\t\trow.update(computePercent(row))\n\t\trow = pd.DataFrame(row, index=[len(classDftmp)+1])\n\t\tclassDf = classDf.append(row)\n\trow = {'Class' : nameClass,\n\t\t\t'Biotype' : nameClass,\n\t\t\t'nuclG' : sum(classDftmp.nuclG),\n\t\t\t'nuclC' : sum(classDftmp.nuclC),\n\t\t\t'nbTr' : dicoNbTrClass['Tot'][nameClass],\n\t\t\t'NbpG4rWt' : sum(classDftmp.NbpG4rWt),\n\t\t\t'NbpG4rShuf' : sum(classDftmp.NbpG4rShuf),\n\t\t\t'NbTrpG4Wt' : dicoNbTrClass['Wt'][nameClass],\n\t\t\t'NbTrpG4Shuf' : dicoNbTrClass['Shuf'][nameClass],\n\t\t\t'Tot' : sum(classDftmp.Tot)}\n\trow.update(computePercent(row))\n\trow = pd.DataFrame(row, index=[len(classDf)+1])\n\tclassDf = classDf.append(row)\n\tclassDf = computeDensity(classDf, 'Segment')\n\treturn classDf", "def language_stats_to_dataframe(results, n_runs, n_gens, possible_form_lengths):\n\n if len(possible_form_lengths) == 1:\n n_language_classes = 4\n else:\n n_language_classes = 7 #TODO: or should this be 6 (i.e. collapsing the two different reduplication strategies?)\n\n column_proportion = np.array(results)\n\n if n_language_classes == 4 and column_proportion.shape[2] > n_language_classes:\n column_proportion_compositional_summed = np.zeros((n_runs, n_gens, n_language_classes))\n for r in range(len(column_proportion_compositional_summed)):\n for g in range(len(column_proportion_compositional_summed[0])):\n column_proportion_compositional_summed[r][g] = np.array([column_proportion[r][g][0], column_proportion[r][g][1], column_proportion[r][g][2]+column_proportion[r][g][3], column_proportion[r][g][4]])\n column_proportion = column_proportion_compositional_summed.flatten()\n\n else:\n column_proportion = column_proportion.flatten()\n\n column_runs = []\n for i in range(n_runs):\n for j in range(n_gens):\n for k in range(n_language_classes):\n column_runs.append(i)\n column_runs = np.array(column_runs)\n\n column_generation = []\n for i in range(n_runs):\n for j in range(n_gens):\n for k in range(n_language_classes):\n column_generation.append(j)\n column_generation = np.array(column_generation)\n\n column_type = []\n for i in range(n_runs):\n for j in range(n_gens):\n if len(possible_form_lengths) == 1:\n column_type.append('degenerate')\n column_type.append('holistic')\n column_type.append('compositional')\n column_type.append('other')\n else:\n column_type.append('degenerate')\n column_type.append('holistic')\n column_type.append('holistic_diversify_signal')\n column_type.append('compositional')\n column_type.append('compositional_reduplicate_segments')\n column_type.append('compositional_reduplicate_whole_signal')\n column_type.append('other')\n\n data = {'run': column_runs,\n 'generation': column_generation,\n 'proportion': column_proportion,\n 'class': column_type}\n\n lang_class_prop_over_gen_df = pd.DataFrame(data)\n\n return lang_class_prop_over_gen_df", "def grid_plot_google(proverbs_list, data, dim = (4,4), ylog = False): \n\n plt.rcParams.update({\n 'font.size': 9,\n 'axes.titlesize': 8,\n 'axes.labelsize': 14,\n 'xtick.labelsize': 7,\n 'ytick.labelsize': 7,\n 'legend.fontsize': 10,\n })\n \n rows, cols = dim[0], dim[1]\n fig = plt.figure(figsize=(12, 5.75))\n gs = gridspec.GridSpec(ncols=cols, nrows=rows)\n gs.update(wspace = 0.2, hspace = 0.2)\n \n \n res = None\n \n i = 0\n \n fig.text(0.5, 0.02,'Year' , ha='center', fontsize=14)\n fig.text(0.02, 0.5, 'Frequency among all volumes in Google Books', va='center', rotation='vertical', fontsize=14)\n for r in np.arange(0, rows, step=1):\n for c in np.arange(cols):\n\n ax = fig.add_subplot(gs[r, c])\n ax.text(0.1,0.9,'\\\"{}\\\"'.format(proverbs_list[i].lower()),horizontalalignment='left', transform=ax.transAxes)\n\n ts = data[data.proverb ==proverbs_list[i]]\n ts = ts[data.year >= 1800]\n ts.year = pd.to_datetime(ts.year, format = '%Y', errors='coerce')\n ts.index = ts.year\n ts = ts.sort_index()\n ts = ts.reindex(pd.date_range('01/01/1800', '01/01/2019', freq = 'AS'), fill_value=0)\n #get 5-year rolling average\n ts2 = ts.copy()\n ts2 = ts2.rolling(window = 5).mean()\n print(ts)\n\n if res != None:\n ts = ts.resample(res).sum()\n \n if ylog == False:\n pass\n\n elif ylog == True:\n ax.set_yscale('log') \n \n ax.plot(ts.index, ts['vol_norm'], alpha = 0.5, color = 'gray')\n ax.plot(ts2.index, ts2['vol_norm'], alpha = 0.9, color='darkorange')\n i+=1\n \n plt.subplots_adjust(left=0.08, right=0.95, top=0.95, bottom=0.1)", "def plot_timecourse_repair_counts(repair_counts_over_gen_df, title, file_path, file_name):\n sns.set_style(\"darkgrid\")\n sns.set_context(\"talk\")\n\n fig, ax = plt.subplots()\n\n palette = sns.color_palette(\"colorblind\")\n\n sns.lineplot(x=\"generation\", y=\"independent_repair_proportion\", data=repair_counts_over_gen_df, palette=palette)\n # sns.lineplot(x=\"generation\", y=\"proportion\", hue=\"class\", data=lang_class_prop_over_gen_df, palette=palette, ci=95, err_style=\"bars\")\n\n plt.tick_params(axis='both', which='major', labelsize=18)\n plt.tick_params(axis='both', which='minor', labelsize=18)\n plt.ylim(-0.05, 1.05)\n plt.title(title, fontsize=22)\n plt.xlabel('Generation', fontsize=20)\n plt.ylabel('Mean proportion', fontsize=20)\n # handles, labels = ax.get_legend_handles_labels()\n #\n # labels = ['D', 'H', 'H+Div.', 'C', 'C+Red.-part', 'C+Red.-whole', 'O']\n\n # # ax.legend(handles=handles[1:], labels=labels[1:])\n # ax.legend(handles=handles, labels=labels)\n plt.tight_layout()\n plt.savefig(file_path + \"Timecourse_plot_repairs_\" + file_name + \".png\")\n plt.show()", "def proportions_visualiser(\n df: pd.core.frame.DataFrame,\n colum_name: str = \"Sensor Glucose (mg/dL)\",\n limits: Dict[str, int] = {\"low\": 70, \"high\": 180},\n windows: Dict[str, int] = {\"weekly\": 7, \"monthly\": 30},\n kind: str = \"TIR\",\n) -> NoReturn:\n\n valid_kinds = [\"TIR\", \"TBR\", \"TAR\"]\n\n if \"low\" not in limits.keys() or \"high\" not in limits.keys():\n raise Exception(f\"limits.keys() should be ['low', 'high'] not {limits.keys()}\")\n\n titles = {\n \"TIR\": f\"Time In Range [{limits['low']},{limits['high']})\",\n \"TAR\": f\"Time Above Range >= {limits['high']}\",\n \"TBR\": f\"Time Below Range < {limits['low']}\",\n }\n\n kind = kind.upper()\n if kind not in valid_kinds:\n raise Exception(\n f\"Invalid kind `{kind}`, select one from {valid_kinds} or refer to help({self.__name__})\"\n )\n\n TIR = (\n lambda y: 100\n * y[(y >= limits[\"low\"]) & (y < limits[\"high\"])].count()\n / y.count()\n )\n TBR = lambda y: 100 * y[(y < limits[\"low\"])].count() / y.count()\n TAR = lambda y: 100 * y[(y >= limits[\"high\"])].count() / y.count()\n\n _proportions = df[colum_name].groupby(df.index.date).apply(eval(f\"{kind}\"))\n\n _proportions.plot(**{\"label\": \"daily\"})\n\n for key, value in windows.items():\n _ax = _proportions.rolling(value).mean().plot(**{\"label\": key})\n\n _mean_proportion = _proportions.mean()\n plt.ylabel(\"Percentage\")\n plt.axhline(\n _mean_proportion,\n **{\"label\": f\"mean = {round(_mean_proportion,1)}\", \"c\": \"blue\"},\n )\n plt.legend()\n plt.title(titles[kind])", "def visualize(X: pd.DataFrame, y: pd.DataFrame) -> None:\r\n y[\"Action\"].value_counts().plot.pie(explode=(0.02, 0.04, 0.05, 0.09), title=\"Proportion of classes in dataset\")\r\n plt.savefig(\"Figures/proportions\")\r\n\r\n for i, column in enumerate(X.columns):\r\n fig, ax = plt.subplots(1, 2)\r\n\r\n ax[0].hist(\r\n (\r\n X[y[\"Action\"] == \"allow\"][column],\r\n X[y[\"Action\"] == \"deny\"][column],\r\n X[y[\"Action\"] == \"drop\"][column],\r\n X[y[\"Action\"] == \"reset-both\"][column],\r\n )\r\n )\r\n ax[0].set_xlabel(column)\r\n ax[0].set_ylabel(\"Frequency\")\r\n\r\n ax[1].boxplot(\r\n (\r\n X[y[\"Action\"] == \"allow\"][column],\r\n X[y[\"Action\"] == \"deny\"][column],\r\n X[y[\"Action\"] == \"drop\"][column],\r\n X[y[\"Action\"] == \"reset-both\"][column],\r\n )\r\n )\r\n ax[1].set_xlabel(\"Action\")\r\n ax[1].set_ylabel(column)\r\n\r\n X[column].hist(by=y[\"Action\"])\r\n\r\n ax[0].legend([\"allow\", \"deny\", \"drop\", \"reset-both\"])\r\n ax[1].set_xticklabels([\"allow\", \"deny\", \"drop\", \"reset-both\"])\r\n fig.suptitle(\"Distribution of classes among attributes\")\r\n plt.savefig(\"Figures/boxplots\")", "def makeComparsionChart(columns, data):\n fig = plt.figure(figsize=(16, 10))\n gs = gridspec.GridSpec(2, 3, wspace = 0.2, hspace=0.2, right=0.96, left=0.04)\n ax1 = plt.subplot(gs[0, 0:1], label=\"\")\n ax2 = plt.subplot(gs[0, 1:2], label=\"\" )\n ax3 = plt.subplot(gs[0, 2:3], label=\"\" )\n ax4 = plt.subplot(gs[1, 0:1], label=\"\" )\n ax5 = plt.subplot(gs[1, 1:2], label=\"\" )\n ax1.set_title('Before Scaling')\n ax2.set_title('After Standard Scaler')\n ax3.set_title('After Min-Max Scaler')\n ax4.set_title('After Roboust Scaler')\n ax5.set_title('After Normalization')\n\n for column in columns:\n sns.kdeplot(data[0][column], ax=ax1, legend=False)\n sns.kdeplot(data[1][column], ax=ax2, legend=False)\n sns.kdeplot(data[2][column], ax=ax3, legend=False)\n sns.kdeplot(data[3][column], ax=ax4, legend=False)\n sns.kdeplot(data[4][column], ax=ax5, legend=False)\n\n plt.show()", "def samplecost(app, endclasses, fxnmode, samptype='std', title=\"\"):\n associated_scens=[]\n for phase in app.phases:\n associated_scens = associated_scens + app.scenids.get((fxnmode, phase), [])\n costs = np.array([endclasses[scen]['cost'] for scen in associated_scens])\n times = np.array([time for phase, timemodes in app.sampletimes.items() if timemodes for time in timemodes if fxnmode in timemodes.get(time)] ) \n rates = np.array(list(app.rates_timeless[fxnmode].values()))\n \n tPlot, axes = plt.subplots(2, 1, sharey=False, gridspec_kw={'height_ratios': [3, 1]})\n phasetimes_start =[times[0] for phase, times in app.phases.items()]\n phasetimes_end =[times[1] for phase, times in app.phases.items()]\n ratetimes =[]\n ratesvect =[]\n phaselocs = []\n for (ind, phasetime) in enumerate(phasetimes_start):\n axes[0].axvline(phasetime, color=\"black\") \n phaselocs= phaselocs +[(phasetimes_end[ind]-phasetimes_start[ind])/2 + phasetimes_start[ind]]\n\n axes[1].axvline(phasetime, color=\"black\") \n ratetimes = ratetimes + [phasetimes_start[ind]] + [phasetimes_end[ind]]\n ratesvect = ratesvect + [rates[ind]] + [rates[ind]]\n #axes[1].text(middletime, 0.5*max(rates), list(app.phases.keys())[ind], ha='center', backgroundcolor=\"white\")\n #rate plots\n axes[1].set_xticks(phaselocs)\n axes[1].set_xticklabels(list(app.phases.keys()))\n \n axes[1].plot(ratetimes, ratesvect)\n axes[1].set_xlim(phasetimes_start[0], phasetimes_end[-1])\n axes[1].set_ylim(0, np.max(ratesvect)*1.2 )\n axes[1].set_ylabel(\"Rate\")\n axes[1].set_xlabel(\"Time (\"+str(app.units)+\")\")\n axes[1].grid()\n #cost plots\n axes[0].set_xlim(phasetimes_start[0], phasetimes_end[-1])\n axes[0].set_ylim(0, 1.2*np.max(costs))\n if samptype=='fullint':\n axes[0].plot(times, costs, label=\"cost\")\n else:\n if samptype=='quadrature' or samptype=='pruned piecewise-linear': \n sizes = 1000*np.array([weight if weight !=1/len(timeweights) else 0.0 for phase, timeweights in app.weights[fxnmode].items() for time, weight in timeweights.items() if time in times])\n axes[0].scatter(times, costs,s=sizes, label=\"cost\", alpha=0.5)\n axes[0].stem(times, costs, label=\"cost\", markerfmt=\",\", use_line_collection=True)\n \n axes[0].set_ylabel(\"Cost\")\n axes[0].grid()\n if title: axes[0].set_title(title)\n elif type(fxnmode[0])==tuple: axes[0].set_title(\"Cost function of \"+str(fxnmode)+\" over time\")\n else: axes[0].set_title(\"Cost function of \"+fxnmode[0]+\": \"+fxnmode[1]+\" over time\")\n #plt.subplot_adjust()\n plt.tight_layout()", "def costovertime(endclasses, app, costtype='expected cost'):\n costovertime = cost_table(endclasses, app)\n plt.plot(list(costovertime.index), costovertime[costtype])\n plt.title('Total '+costtype+' of all faults over time.')\n plt.ylabel(costtype)\n plt.xlabel(\"Time (\"+str(app.units)+\")\")\n plt.grid()", "def grid_plot_nyt(proverbs_list, data, dim = (4,4), res = '1M'):\n \n plt.rcParams.update({\n 'font.size': 9,\n 'axes.titlesize': 8,\n 'axes.labelsize': 14,\n 'xtick.labelsize': 7,\n 'ytick.labelsize': 7,\n 'legend.fontsize': 10,\n })\n \n rows, cols = dim[0], dim[1]\n fig = plt.figure(figsize=(12, 5.75))\n gs = gridspec.GridSpec(ncols=cols, nrows=rows)\n gs.update(wspace = 0.3, hspace = 0.2)\n \n\n i = 0\n \n fig.text(0.5, 0.02,'Year' , ha='center', fontsize=14)\n fig.text(0.02, 0.5, 'Frequency among all articles in NYT', va='center', rotation='vertical', fontsize=14)\n \n #get month resolution\n ts = data.copy()\n resamp = ts.resample(res).sum()\n resamp = resamp.div(resamp['total'], axis =0)\n ts = resamp\n \n #get year resolution\n ts2 = data.copy()\n resamp = ts.resample('1Y').sum()\n resamp = resamp.div(resamp['total'], axis =0)\n ts2 = resamp\n \n #make each plot in the grid\n for r in np.arange(0, rows, step=1):\n for c in np.arange(cols):\n\n ax = fig.add_subplot(gs[r, c])\n\n ax.text(0.1,0.9,'\\\"{}\\\"'.format(proverbs_list[i]),horizontalalignment='left', transform=ax.transAxes)\n\n print(ts[proverbs_list[i]])\n ax.plot(ts.index, ts[proverbs_list[i]], alpha = 0.5, color = 'gray')\n ax.plot(ts2.index, ts2[proverbs_list[i]], alpha = 0.9, color = 'orange')\n i+=1\n \n plt.subplots_adjust(left=0.08, right=0.95, top=0.95, bottom=0.1)", "def dashboard(df):\n panamax = (df.loc[:, \"Class\"] == \"Panamax\")\n post_panamax = (df.loc[:, \"Class\"] == \"Post-Panamax\")\n nearshore = (df.loc[:, \"Location\"] == \"Nearshore\")\n offshore = (df.loc[:, \"Location\"] == \"Offshore\")\n inbound = (df.loc[:, \"Course Behavior\"] == \"Inbound\")\n outbound = (df.loc[:, \"Course Behavior\"] == \"Outbound\")\n dat = {\"Proportion<br>of Transits\":[\n str(round(sum(panamax) / len(df) * 100, 2)) + \"%\",\n str(round(sum(post_panamax) / len(df) * 100, 2)) + \"%\", \"100%\"\n ],\n \"Compliance<br>Rate\":[\n str(round(sum(panamax & (df.loc[:, \"VSPD kn\"] <= 10)) /\n sum(panamax) * 100, 2)) + \"%\",\n str(round(sum(post_panamax & (df.loc[:, \"VSPD kn\"] <= 10)) /\n sum(post_panamax) * 100, 2)) + \"%\",\n str(round(sum(df.loc[:, \"VSPD kn\"] <= 10) / len(df) * 100, 2)) + \"%\"\n ],\n \"Mean<br>VSPD\":[\n str(round(df[panamax].loc[:, \"VSPD kn\"].mean(), 2)) + \" kn\",\n str(round(df[post_panamax].loc[:, \"VSPD kn\"].mean(), 2)) + \" kn\",\n str(round(df.loc[:, \"VSPD kn\"].mean(), 2)) + \" kn\"\n ],\n \"Nearshore<br>Median VSPD\":[\n str(round(df[nearshore & panamax].loc[:, \"VSPD kn\"].median(), 2)) +\n \" kn\",\n str(round(df[nearshore & post_panamax].loc[:,\n (\"VSPD kn\")].median(), 2)) + \" kn\",\n str(round(df[nearshore].loc[:, \"VSPD kn\"].median(), 2)) + \" kn\"\n ],\n \"Offshore<br>Median VSPD\":[\n str(round(df[offshore & panamax].loc[:, \"VSPD kn\"].median(), 2)) +\n \" kn\",\n str(round(df[offshore & post_panamax].loc[:,\n (\"VSPD kn\")].median(), 2)) + \" kn\",\n str(round(df[offshore].loc[:, \"VSPD kn\"].median(), 2)) + \" kn\"\n ],\n \"Inbound<br>Median VSPD\":[\n str(round(df[inbound & panamax].loc[:, \"VSPD kn\"].median(), 2)) +\n \" kn\",\n str(round(df[inbound & post_panamax].loc[:,\n (\"VSPD kn\")].median(), 2)) + \" kn\",\n str(round(df[inbound].loc[:, \"VSPD kn\"].median(), 2)) + \" kn\"\n ],\n \"Outbound<br>Median VSPD\":[\n str(round(df[outbound & panamax].loc[:, \"VSPD kn\"].median(), 2)) +\n \" kn\",\n str(round(df[outbound & post_panamax].loc[:,\n (\"VSPD kn\")].median(), 2)) + \" kn\",\n str(round(df[outbound].loc[:, \"VSPD kn\"].median(), 2)) + \" kn\"\n ],\n \"VSPD-WSPD<br>Correlation\":[\n str(round(df[panamax].dropna().loc[:, (\"VSPD kn\", \"WSPD mph\")].corr()\n .iloc[0][1], 2)),\n str(round(df[post_panamax].dropna().loc[:,\n (\"VSPD kn\", \"WSPD mph\")].corr().iloc[0][1], 2)),\n str(round(df.dropna().loc[:,\n (\"VSPD kn\", \"WSPD mph\")].corr().iloc[0][1], 2))\n ]\n }\n index = [\"Panamax\", \"Post-Panamax\", \"Combined\"]\n return pd.DataFrame(dat, index)", "def trip_duration_stats(df):", "def forebears (WFROM,WTO,efrom, eto, g=25):\n \n c.execute(\"\"\"\n SELECT wyear, eyear, count (eyear), wnationality\n FROM clean \n WHERE (eyear IS NOT Null) AND (wyear IS NOT Null)\n AND WYEAR >= ? and WYEAR <= ? \n AND eyear >= ? AND eyear <= ? \n GROUP BY wyear, eyear\n ORDER BY wyear, eyear\"\"\", (WFROM, WTO, efrom, eto))\n\n years = c.fetchall()\n epigraphtotal = sum (s for (x,y,s,n) in years)\n #plt.xlim(WFROM, WTO)\n #plt.ylim(100, -1500)\n #colors = list(mcolors.TABLEAU_COLORS.keys()) *20\n #print(colors)\n \n \n gen =dd(lambda: dd(int))\n gentotal= dd(int)\n for (x,y,s,n) in years:\n gen[generation(x,g)][generation(y-x,g)] += 1\n gentotal[generation(x,g)] +=1\n \n for x in gen:\n for y in gen[x]:\n print(x, y, gen[x][y], gentotal[x])\n\n \n\n plt.figure(figsize=(10, 5))\n ax=plt.axes()\n\n\n #df.plot(colormap=gray) \n cumtotal = [0]*len(gen)\n\n for d in range(0,-200, -1):\n #for d in range(min(gen.keys()),max(gen.keys()),-1):\n xv = list(gen.keys())\n yv = [rat(gen[x][d],gentotal[x]) for x in xv]\n plt.bar(xv, yv, bottom=cumtotal,\n tick_label=[x*g for x in xv])\n cumtotal = [x + y for x, y in zip(yv, cumtotal)]\n #colors.pop()\n #print(d, cumtotal)\n plt.xlabel('Year of Work (in generations)')\n plt.ylabel(f'Share of Distance to forebear (in {g} year generations)')\n plt.title(f'Distance back vs Year of Work ({epigraphtotal} epigraphs)')\n plt.savefig(f\"figs/eg-forebear-{WFROM}:{WTO}-{efrom}:{eto}-{g}.png\")\n plt.close()", "def plot_df(data_frame):\n plt.figure(figsize = (10, 5))\n chart = sns.countplot(data_frame['label'], \n palette=\"Set1\"\n )\n plt.show()", "def visualize_timecourses_grid(timecourses_cols, cols_names, out_filename, hgap=20, vgap=20):\n\n\tnframes, ncomponents = timecourses_cols[0].shape\n\tncols = len(timecourses_cols)\n\n\tf, axarr = pyplot.subplots(ncomponents, ncols)\n\n\t# fig = pyplot.figure(figsize=(4, 2))\n\n\tthemin = min([timecourses_cols[col].min() for col in range(ncols)])\n\tthemax = min([timecourses_cols[col].max() for col in range(ncols)])\n\n\tfor k in range(ncomponents):\n\t\tfor col in range(ncols):\n\t\t\taxarr[k, col].plot(timecourses_cols[col][:,k])\n\n\t\t\tif k == 0:\n\t\t\t\taxarr[k, col].set_title(cols_names[col])\n\n\t\t\taxarr[k, col].tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='off')\n\t\t\taxarr[k, col].set_ylim(themin, themax)\n\t\t\taxarr[k, col].set_xticklabels([])\n\t\t\taxarr[k, col].set_yticklabels([])\n\n\tpyplot.tight_layout()\n\tpyplot.savefig(out_filename)\n\tpyplot.close(f)", "def proportion_with_cardinals(df, PATH):\n \n df_test = df.copy()\n df_test['cardinal'] = df.title.apply(contains_cardinal)\n\n click = df_test[df_test.target == 1]\n non = df_test[df_test.target == 0]\n click = click.groupby(['cardinal']).target.count()\n non = non.groupby(['cardinal']).target.count()\n \n non = non[1]/non[0] * 100\n click = click[1]/click[0] * 100\n # plot the results\n fig, ax = plt.subplots(figsize=(12,6))\n sns.barplot(x=['Normal', \"Clickbait\"], y=[non, click], ax=ax)\n plt.title(\"Percent of Titles Containing Cardinal Numbers\", size = 24)\n plt.xlabel(\"Article Class\", size=24)\n plt.ylabel(\"Percent %\", size = 24)\n plt.ylim(0, 100)\n plt.xticks([0,1], label=[\"Normal\", \"Clickbait\"], size=24)\n if PATH:\n plt.savefig(PATH, bbox_inches=\"tight\", transparent=True)\n \n return ax", "def inst_class_stats(df, col='num_pkts'):\n classes = df.groupby('class_label')\n stat = classes[col].describe()\n return stat", "def graph_course(self):\n group = self.__data[\"filted_general_groupby\"]\n graph = {}\n if self.analysis[\"courses\"] is None:\n self.courses_list()\n\n # inicializa o dicionario que vai guardar o grafico\n for course in self.analysis[\"courses\"].index:\n graph[course] = []\n\n for i in range(18):\n min_v = i * 5\n max_v = min_v + 4.99\n self.__calc_graph_mean(group, min_v, max_v, graph)\n\n min_v = 95\n max_v = 100\n self.__calc_graph_mean(group, min_v, max_v, graph)\n\n self.analysis[\"graph_course\"] = graph", "def plot_scenario_distribution(self):\n x = self.arms\n\n y = self.df.groupby('price').mean().Converted[x]\n y_sex_0 = self.df[self.df.Sex == 0].groupby('price').mean().Converted[x]\n y_sex_1 = self.df[self.df.Sex == 1].groupby('price').mean().Converted[x]\n y_age_0 = self.df[self.df.Under_30 == 0].groupby('price').mean().Converted[x]\n y_age_1 = self.df[self.df.Under_30 == 1].groupby('price').mean().Converted[x]\n\n fig, ax_list = plt.subplots(2,1, figsize=(12, 9))\n\n for ax in ax_list:\n ax.grid(alpha=0.3, linestyle='--')\n\n ax.set_ylim(bottom=0, top=0.6)\n ax.set_xlim(left=50, right=104)\n\n ax.set_xlabel(\"Price\", fontsize=14)\n ax.set_ylabel(\"Conversion Rate\", fontsize=14)\n\n ax.set_xticks(self.arms)\n ax.set_xticklabels(self.arms.astype(np.int64), fontsize=12, alpha=0.7)\n ax.set_yticks(np.linspace(0, 0.7, 8))\n ax.set_yticklabels([str((i * 100).astype(np.int64)) + \"%\" for i in np.linspace(0, 0.7, 8)], fontsize=12, alpha=0.7)\n\n ax.spines['right'].set_alpha(0)\n ax.spines['left'].set_alpha(0.3)\n ax.spines['top'].set_alpha(0)\n ax.spines['bottom'].set_alpha(0.3)\n\n ax_list[0].plot(x, y, label='Global')\n ax_list[0].plot(x, y_sex_0, label='Male', color='moccasin')\n ax_list[0].plot(x, y_sex_1, label='Female', color='darkorange')\n\n ax_list[1].plot(x, y, label='Global')\n ax_list[1].plot(x, y_age_0, label='Under 30', color='red')\n ax_list[1].plot(x, y_age_1, label='Over 30', color='darkred')\n\n ax_list[0].legend()\n ax_list[1].legend()\n\n fig.suptitle(\"Conversion Rate\", fontsize=22)\n\n fig.show()\n\n plt.savefig('chapter5_pricing.png')", "def context_study_stats(frame_path=METRICS_DIR+'/merge.csv'):\n frame = pd.read_csv(frame_path)\n print(frame['LOC_prod'].mean())\n print(frame['LOC_prod'].sum())\n print(frame['LOC_test'].sum())\n print(frame['no_mutations'].sum())\n print(frame.shape[0])\n\n sizes = frame.groupby('project').size()\n prod = frame.groupby('project')['LOC_prod'].sum( )\n test = frame.groupby('project')['LOC_test'].sum()\n mutants = frame.groupby('project')['no_mutations'].sum()\n\n result = pd.DataFrame({'project': list(sizes.index),\n 'size': list(sizes),\n 'prod': list(prod),\n 'test': list(test),\n 'mutants': list(mutants)},\n columns=['project', 'size', 'prod', 'test', 'mutants'])\n print(result.to_latex())", "def makePdf(sources):\n pdf = PdfPages(\"sample_features.pdf\")\n classnames = []\n classname_dict = {}\n x = 2 # number of subplot columns\n y = 3 # number of subplot rows\n for source in sources:\n lc = source.lcs[0]\n\n if lc.classname not in classnames:\n classnames.append(lc.classname)\n classname_dict[lc.classname] = [lc]\n else:\n classname_dict[lc.classname].append(lc)\n\n if len(classname_dict[lc.classname]) < 3:\n\n label = lc.classname + \"; ID: \" + lc.id\n # all_times histogram:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(label)\n ax.axis('off')\n\n ax1 = fig.add_subplot(321)\n ax2 = fig.add_subplot(322)\n ax2.axis('off')\n ax3 = fig.add_subplot(323)\n ax4 = fig.add_subplot(324)\n ax4.axis('off')\n ax5 = fig.add_subplot(325)\n ax6 = fig.add_subplot(326)\n ax6.axis('off')\n\n hist, bins, other = ax1.hist(lc.all_times, 50, normed=True)\n ax1.text(np.max(bins) * 0.1, np.max(hist) * 0.8,\n r'Histogram (normed) of all $\\Delta$Ts')\n\n ax2.text(0.0, 0.9, (r'$\\bullet$med time to next obs: ' +\n str(np.round(lc.cads_med, 4))))\n ax2.text(0.0, 0.75, (r'$\\bullet$avg time to next obs: ' +\n str(np.round(lc.avgt, 4))))\n ax2.text(0.0, 0.6, (r'$\\bullet$std dev of time to next obs: ' +\n str(np.round(lc.cads_std, 4))))\n ax2.text(0.0, 0.45, (r'$\\bullet$med of all $\\Delta$Ts: ' +\n str(np.round(lc.all_times_med, 4))))\n ax2.text(0.0, 0.3, (r'$\\bullet$avg of all $\\Delta$Ts: ' +\n str(np.round(lc.all_times_avg, 4))))\n ax2.text(0.0, 0.15, (r'$\\bullet$std dev of all $\\Delta$Ts: ' +\n str(np.round(lc.all_times_std, 4))))\n\n hist, bins, other = ax3.hist(lc.cads, 50)\n ax3.text(np.max(bins) * 0.1, np.max(hist) * 0.8,\n r'Hist of time to next obs')\n\n ax6.text(\n 0.0, 0.9, r'$\\bullet$Number of epochs: ' + str(lc.n_epochs))\n ax6.text(0.0, 0.75, (r'$\\bullet$Time b/w first & last obs (days): ' +\n str(np.round(lc.total_time, 2))))\n ax6.text(0.0, 0.6, (r'$\\bullet$Average error in mag: ' +\n str(np.round(lc.avg_err, 4))))\n ax6.text(0.0, 0.45, (r'$\\bullet$Median error in mag: ' +\n str(np.round(lc.med_err, 4))))\n ax6.text(0.0, 0.3, (r'$\\bullet$Std dev of error: ' +\n str(np.round(lc.std_err, 4))))\n ax6.text(0.0, 0.15, '')\n\n ax5.scatter(lc.epochs, lc.mags)\n\n ax4.text(0.0, 0.9, (r'$\\bullet$Avg double to single step ratio: ' +\n str(np.round(lc.avg_double_to_single_step, 3))))\n ax4.text(0.0, 0.75, (r'$\\bullet$Med double to single step: ' +\n str(np.round(lc.med_double_to_single_step, 3))))\n ax4.text(0.0, 0.6, (r'$\\bullet$Std dev of double to single step: ' +\n str(np.round(lc.std_double_to_single_step, 3))))\n ax4.text(\n 0.0, 0.45,\n (r'$\\bullet$1st peak to 2nd peak (in all $\\Delta$Ts): ' +\n str(np.round(lc.all_times_nhist_peak_1_to_2, 3))))\n ax4.text(\n 0.0, 0.3,\n (r'$\\bullet$2ndt peak to 3rd peak (in all $\\Delta$Ts): ' +\n str(np.round(lc.all_times_nhist_peak_2_to_3, 3))))\n ax4.text(\n 0.0, 0.15,\n (r'$\\bullet$1st peak to 3rd peak (in all $\\Delta$Ts): ' +\n str(np.round(lc.all_times_nhist_peak_1_to_3, 3))))\n\n pdf.savefig(fig)\n\n pdf.close()\n\n pdf = PdfPages('feature_plots.pdf')\n\n fig = plt.figure()\n\n ax1 = fig.add_subplot(221)\n ax2 = fig.add_subplot(222)\n ax3 = fig.add_subplot(223)\n ax4 = fig.add_subplot(224)\n\n plt.subplots_adjust(wspace=0.4, hspace=0.4)\n\n classnamenum = 0\n\n colors = ['red', 'yellow', 'green', 'blue', 'gray', 'orange', 'cyan',\n 'magenta']\n for classname, lcs in list(classname_dict.items()):\n classnamenum += 1\n print(classname, len(lcs), 'light curves.')\n attr1 = []\n attr2 = []\n attr3 = []\n attr4 = []\n attr5 = []\n attr6 = []\n attr7 = []\n attr8 = []\n for lc in lcs:\n attr1.append(lc.n_epochs)\n attr2.append(lc.avgt)\n attr3.append(lc.cads_std)\n attr4.append(lc.total_time)\n attr5.append(lc.all_times_hist_peak_val)\n attr6.append(lc.cad_probs[5000])\n attr7.append(lc.all_times_nhist_peak_1_to_3)\n attr8.append(lc.all_times_nhist_peak_val)\n\n ax2.scatter(attr1, attr2, color=colors[classnamenum], label=classname)\n ax1.scatter(attr3, attr4, color=colors[classnamenum], label=classname)\n ax2.set_xlabel('N Epochs')\n ax2.set_ylabel('Avg time to next obs')\n ax1.set_xlabel('Standard dev. of time to next obs')\n ax1.set_ylabel('Time b/w first and last obs')\n\n ax3.scatter(attr5, attr6, color=colors[classnamenum], label=classname)\n ax4.scatter(attr7, attr8, color=colors[classnamenum], label=classname)\n ax3.set_xlabel(r'All $\\Delta$T hist peak val')\n ax3.set_ylabel('Prob time to next obs <= 5000 min')\n ax4.set_xlabel(r'$\\Delta$Ts normed hist peak 1 to peak 3')\n ax4.set_ylabel(r'Peak val of all $\\Delta$Ts normed hist')\n\n #ax1.legend(bbox_to_anchor=(1.1, 1.1),prop={'size':6})\n ax2.legend(bbox_to_anchor=(1.1, 1.1), prop={'size': 6})\n #ax3.legend(loc='upper right',prop={'size':6})\n #ax4.legend(loc='upper right',prop={'size':6})\n\n pdf.savefig(fig)\n\n pdf.close()\n return 0", "def plot_associative_learning_progress(ax, df):\n\n num_objects_list = sorted(df.curr_num_objects.unique())\n legend_list = []\n for idx in num_objects_list:\n ax.plot(df[df.curr_num_objects == idx].groupby('objects_iter').rewards.mean())\n legend_list.append(f'ns={idx}')\n ax.set_xlabel('Stimulus iteration')\n ax.set_ylabel('P(correct)')\n ax.set_ylim([0.4, 1])\n ax.legend(legend_list)", "def count_plot_target_class(self):\r\n print(self.dataframe_name)\r\n print(self.data_frame.groupby([self.target_column]).size()) # print the sum of every class\r\n\r\n sns.countplot(data=self.data_frame, x=self.data_frame[self.target_column])\r\n plt.title(self.dataframe_name + ': Display the distribution of ' + self.target_column + ' class')\r\n plt.xlabel('Target Name: ' + self.target_column)\r\n plt.ylabel('Count')\r\n self.save_plot_as_image()\r\n plt.show()", "def grid_plot_twitter(proverbs_list, data,dim = (4,4), ylog = False, rt = False): \n plt.rcParams.update({\n 'font.size': 9,\n 'axes.titlesize': 8,\n 'axes.labelsize': 14,\n 'xtick.labelsize': 7,\n 'ytick.labelsize': 7,\n 'legend.fontsize': 10,\n })\n \n rows, cols = dim[0],dim[1]\n fig = plt.figure(figsize=(12, 5.75))\n gs = gridspec.GridSpec(ncols=cols, nrows=rows)\n gs.update(wspace = 0.2, hspace = 0.2)\n \n \n i = 0\n \n fig.text(0.5, 0.02,'Year' , ha='center', fontsize = 14)\n fig.text(0.02, 0.5, 'Frequency among all {}-grams on Twitter'.format(len(proverbs_list[0].split())), va='center', rotation='vertical', fontsize = 14)\n \n #loop to create each timeseries plot in the grid\n for r in np.arange(0, rows, step=1):\n for c in np.arange(cols):\n\n ax = fig.add_subplot(gs[r, c])\n\n ax.text(0.1,0.9,'\\\"{}\\\"'.format(proverbs_list[i]),horizontalalignment='left', transform=ax.transAxes)\n ts = data[data.proverb ==proverbs_list[i]]\n ts.date = pd.to_datetime(ts.date, format = '%Y-%m-%d', errors='coerce')\n ts.index = ts.date\n ts = ts.sort_index()\n print(ts)\n ts2 = ts.copy()[['freq_noRT', 'freq']]\n print(ts2)\n ts2 = ts2.rolling(window=30).mean()\n print(ts2)\n\n \n if ylog == False:\n pass\n\n elif ylog == True:\n ax.set_yscale('log') \n\n if rt == False:\n ax.plot(ts.index, ts['freq_noRT'], alpha = 0.5, color = 'gray')\n ax.plot(ts2.index, ts2['freq_noRT'], alpha = 0.9, color='darkorange') \n \n elif rt ==True:\n ax.plot(ts.index, ts['freq'], alpha = 0.5, color = 'gray')\n ax.plot(ts2.index, ts2['freq'], alpha = 0.9, color='darkorange')\n i+=1\n \n plt.subplots_adjust(left=0.08, right=0.95, top=0.95, bottom=0.1)", "def prop_types(houses:pd.DataFrame) -> None:\n sns.set_style('whitegrid')\n indexNames= houses[houses['PRICE'] >= 3000000].index\n houses= houses.drop(indexNames)\n \n ax= sns.catplot(x= 'PROPERTY_TYPE', y= 'PRICE', kind= 'box', data= houses)\n ax.set_xticklabels(rotation=30)\n plt.tight_layout()\n plt.show()\n \n ax= sns.countplot(x= 'PROPERTY_TYPE', data= houses)\n ax.set_xticklabels(ax.get_xticklabels(), rotation= 30, ha=\"right\", fontsize=9)\n plt.show()", "def plot_genre_and_word_count(df):\n plotting_helper_method('word_count', 'genre', df)\n\n plt.title('Word count pr. genre')\n plt.xlabel('Word Count')\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/word_count_plot')", "def grid_plot_gutenberg(proverbs_list, data, counts, begin_at =1800, end_at = 1950, bin_size = 20): \n \n plt.rcParams.update({\n 'font.size': 9,\n 'axes.titlesize': 8,\n 'axes.labelsize': 14,\n 'xtick.labelsize': 7,\n 'ytick.labelsize': 7,\n 'legend.fontsize': 10,\n })\n \n rows, cols = 4, 4\n fig = plt.figure(figsize=(12, 5.75))\n gs = gridspec.GridSpec(ncols=cols, nrows=rows)\n gs.update(wspace = 0.2, hspace = 0.2) \n \n \n i = 0\n \n fig.text(0.5, 0.02,'Year' , ha='center', fontsize=14)\n fig.text(0.02, 0.5, 'Frequency among all volumes in Gutenberg', va='center', rotation='vertical', fontsize=14)\n \n ts = data.copy()\n ts_bin = ts.groupby(lambda x: (x//bin_size)*bin_size).sum()\n ts_norm = ts_bin.div(ts_bin['num_books'], axis=0)\n ts_norm = ts_norm.fillna(0)\n ts = ts_norm.truncate(before = begin_at, after = end_at)[proverbs_list]\n\n #loop to create each timeseries plot in the grid\n for r in np.arange(0, rows, step=1):\n for c in np.arange(cols):\n\n ts2 = ts[proverbs_list[i]].to_frame()\n\n ax = fig.add_subplot(gs[r, c])\n\n ax.text(0.1,0.9,'\\\"{}\\\"'.format(proverbs_list[i]),horizontalalignment='left', transform=ax.transAxes)\n\n ax.plot(ts2.index, ts2[proverbs_list[i]], alpha = 0.5)\n i+=1\n \n plt.subplots_adjust(left=0.08, right=0.95, top=0.95, bottom=0.1)", "def make_plots():\n prep = DataPrep(filepath='/home/ubuntu/ca_bills_project/data/extra/topic_intro_data_05-23-17-08-23.csv')\n prep.prepare()\n k = 100\n trained_model_file = \"/home/ubuntu/ca_bills_project/data/extra/intro_model_100_topics_rf_10000trees.pkl\"\n with open(trained_model_file) as p:\n model = pickle.load(p)\n print \"loaded model\"\n features = [u'days_since_start', u'session_type', u'party_ALL_DEM', u'party_ALL_REP',\n u'party_BOTH', 'party_COM', u'urgency_No', u'urgency_Yes',\n u'taxlevy_No',\n u'taxlevy_Yes']\n topic_features = [\"topic_\"+str(x) for x in range(k)]\n features += topic_features\n X_train, y_train = prep.subset(features)\n feature_importance(model, features)\n feature_subset_indices = [73, 13]\n gb_file = \"/home/ubuntu/ca_bills_project/data/extra/intro_model_100_topics_gb.pkl\"\n with open(gb_file) as p:\n gb = pickle.load(p)\n make_partial_dependence(gb, X_train, y_train, features, feature_subset_indices)", "def explore_col(s, e):\n \n fig = plt.figure(figsize=(10, 8))\n\n\n sub1 = fig.add_subplot(221) \n sub1.set_title(s +' histogram') \n sub1.hist(df_tr_lbl[s])\n\n sub2 = fig.add_subplot(222)\n sub2.set_title(s +' boxplot')\n sub2.boxplot(df_tr_lbl[s])\n \n #np.random.seed(12345)\n \n if e > 100 or e <= 0:\n select_engines = list(pd.unique(df_tr_lbl.id))\n else:\n select_engines = np.random.choice(range(1,101), e, replace=False)\n \n sub3 = fig.add_subplot(223)\n sub3.set_title('time series: ' + s +' / cycle')\n sub3.set_xlabel('cycle')\n for i in select_engines:\n df = df_tr_lbl[['cycle', s]][df_tr_lbl.id == i]\n sub3.plot(df['cycle'],df[s])\n \n sub4 = fig.add_subplot(224)\n sub4.set_title(\"scatter: \"+ s + \" / ttf (regr label)\")\n sub4.set_xlabel('ttf')\n sub4.scatter(df_tr_lbl['ttf'],df_tr_lbl[s])\n\n\n plt.tight_layout()\n plt.show()", "def course_plots(request, course_code):\n try:\n course = (\n Course.objects.filter(course_filters_pcr, full_code=course_code)\n .order_by(\"-semester\")[:1]\n .select_related(\"topic\", \"topic__most_recent\")\n .get()\n )\n except Course.DoesNotExist:\n raise Http404()\n\n course = course.topic.most_recent\n\n current_semester = get_current_semester()\n\n # Compute set of sections to include in plot data\n filtered_sections = (\n Section.objects.filter(\n extra_metrics_section_filters_pcr(current_semester),\n course__topic_id=course.topic_id,\n )\n .annotate(efficient_semester=F(\"course__semester\"))\n .distinct()\n )\n instructor_ids = request.GET.get(\"instructor_ids\")\n if instructor_ids:\n instructor_ids = [int(id) for id in instructor_ids.split(\",\")]\n filtered_sections = filtered_sections.filter(\n instructors__id__in=instructor_ids,\n ).distinct()\n\n section_map = defaultdict(dict) # a dict mapping semester to section id to section object\n for section in filtered_sections:\n section_map[section.efficient_semester][section.id] = section\n\n (\n avg_demand_plot,\n avg_demand_plot_min_semester,\n recent_demand_plot,\n recent_demand_plot_semester,\n avg_percent_open_plot,\n avg_percent_open_plot_min_semester,\n recent_percent_open_plot,\n recent_percent_open_plot_semester,\n ) = tuple([None] * 8)\n avg_demand_plot_num_semesters, avg_percent_open_plot_num_semesters = (0, 0)\n if section_map:\n status_updates_map = get_status_updates_map(section_map)\n (\n avg_demand_plot,\n avg_demand_plot_min_semester,\n avg_demand_plot_num_semesters,\n recent_demand_plot,\n recent_demand_plot_semester,\n ) = avg_and_recent_demand_plots(section_map, status_updates_map, bin_size=0.005)\n (\n avg_percent_open_plot,\n avg_percent_open_plot_min_semester,\n avg_percent_open_plot_num_semesters,\n recent_percent_open_plot,\n recent_percent_open_plot_semester,\n ) = avg_and_recent_percent_open_plots(section_map, status_updates_map)\n\n current_adp = get_or_create_add_drop_period(current_semester)\n local_tz = gettz(TIME_ZONE)\n\n return Response(\n {\n \"code\": course_code,\n \"current_add_drop_period\": {\n \"start\": current_adp.estimated_start.astimezone(tz=local_tz),\n \"end\": current_adp.estimated_end.astimezone(tz=local_tz),\n },\n \"average_plots\": {\n \"pca_demand_plot_since_semester\": avg_demand_plot_min_semester,\n \"pca_demand_plot_num_semesters\": avg_demand_plot_num_semesters,\n \"pca_demand_plot\": avg_demand_plot,\n \"percent_open_plot_since_semester\": avg_percent_open_plot_min_semester,\n \"percent_open_plot_num_semesters\": avg_percent_open_plot_num_semesters,\n \"percent_open_plot\": avg_percent_open_plot,\n },\n \"recent_plots\": {\n \"pca_demand_plot_since_semester\": recent_demand_plot_semester,\n \"pca_demand_plot_num_semesters\": 1 if recent_demand_plot is not None else 0,\n \"pca_demand_plot\": recent_demand_plot,\n \"percent_open_plot_since_semester\": recent_percent_open_plot_semester,\n \"percent_open_plot_num_semesters\": 1 if recent_demand_plot is not None else 0,\n \"percent_open_plot\": recent_percent_open_plot,\n },\n }\n )", "def summary_source(classes_fold_score_list, classes_periods, classes): \n scores = []\n for idx in range(len(classes_periods)):\n temp = pd.concat([classes_fold_score_list[idx], classes_periods[idx].source], axis=1)\n temp = temp.groupby(['source', 'catalog']).size().unstack(fill_value=0).T\n scores.append(temp)\n score_df = pd.concat(scores, keys=classes)\n score_df.index.set_levels([\"Wrong\", \"Right\", \"Multiply\"], \n level=1,\n inplace=True)\n score_df = (score_df.T.fillna(0))\n dividend = score_df.iloc[:, score_df.columns.get_level_values(1)==\"Right\"].T.droplevel(-1).T\n divisor = score_df.T.groupby(level=0).sum().T.loc[:,[\"RRL\",\"Ceph\",\"LPV\",\"DSCT\",\"EB\"]]\n return dividend.divide(divisor).round(2).mean(axis=1)", "def time_stats(df):", "def visualise_hourly_arrivals_at_each_lab(tests_dataframe):\r\n labs_df = create_dataframe_from_csv('labs.csv')\r\n labs_df = drop_missing_values_in_dataframe(labs_df)\r\n list_of_labs = labs_df['lab_name'].to_list()\r\n for lab_name in list_of_labs:\r\n df = tests_dataframe.loc[tests_dataframe['lab_name'] == lab_name]\r\n df.time_test_arrives_lab = pd.to_datetime(df.time_test_arrives_lab)\r\n df = df.sort_values(by=\"time_test_arrives_lab\")\r\n df = df[['time_test_arrives_lab']]\r\n df = df.reset_index().set_index('time_test_arrives_lab')\r\n df = df.resample('H').count()\r\n df.plot(title = 'hourly arrivals at ' + lab_name)\r\n plt.show()", "def create_course_bars(hist_df, fig, labels):\n colors = [\n \"#60a7ba\",\n \"#f0912d\",\n \"#357025\",\n \"#ba3622\",\n \"#8f33d6\",\n \"#6a4c4d\",\n \"#cf8af3\",\n ]\n all_numbers = []\n\n for index, _ in enumerate(fig[\"layout\"][\"annotations\"]):\n all_numbers.append(float(fig[\"layout\"][\"annotations\"][index][\"text\"]))\n\n for _, idx in enumerate(hist_df.index.unique()):\n row = all_numbers.index(idx)\n show_legend = row == 0\n traces = []\n\n # Calculate subfigure position in figure\n row = (row + 1) / 2\n col = 1 if row.is_integer() else 0\n row = math.ceil(row) - 1\n\n # Calculate dataframe for plot\n task_subset_df = hist_df.loc[idx]\n task_subset_df = task_subset_df.apply(pd.value_counts).T\n task_subset_df = task_subset_df.div(task_subset_df.sum(axis=1), axis=0)\n\n # Handle case if there are only correct answers\n if task_subset_df.shape != (\n 7,\n 2,\n ): # sometimes task_subset_df is in the wrong shape\n if task_subset_df.shape != (\n 7,\n 1,\n ):\n task_subset_df = task_subset_df.T\n\n if \"correct\" in task_subset_df.columns.values:\n task_subset_df[\"incorrect\"] = 0\n\n # Each bar needs a color and a legend entry and will therefore\n # be plotted individually\n for i, color in enumerate(colors):\n trace = go.Bar(\n x=[task_subset_df.index.values[i]],\n y=[task_subset_df.incorrect[i] * 100],\n name=labels[i],\n marker={\"color\": color},\n showlegend=show_legend,\n )\n traces.append(trace)\n\n # All traces build one subfigure\n for trace in traces:\n fig.append_trace(trace, row=row + 1, col=col + 1)\n\n # Figure styling\n fig.update_layout(\n height=400 * (row + 1),\n legend={\n \"orientation\": \"h\",\n \"xanchor\": \"left\",\n \"yanchor\": \"bottom\",\n \"x\": 0.15,\n \"y\": 1.05,\n },\n )\n fig.update_xaxes(showticklabels=False)\n\n # for i in range(0, row + 1):\n fig.update_yaxes(title_text=\"Students\", row=i + 1, col=1)\n return fig", "def draw_num_classes_graphs():\n values = [10, 50, 100, 250, 1000, 4000]\n for num_classes in values:\n print(\"Training model on {} most common classes.\".format(num_classes))\n model = create_pretrained_model(num_classes=num_classes)\n histories = train(model, num_classes, epochs=50)\n run_name = get_run_name(\"{}classes\".format(num_classes))\n save_learning_curves(histories, run_name)\n csv_path = os.path.join(\"plots/\", run_name, \"data.csv\")\n ut.write_csv_dict(histories,\n keys=['loss', 'acc', 'val_loss', 'val_acc'],\n filename=csv_path)", "def preprocess(df):\n \n # drop the following columns - irrelevant now\n DROP_COLUMNS = ['id', 'original_title', 'release_date'\n , 'tmdbId', 'popularity', 'year']\n df.drop(DROP_COLUMNS, axis=1, inplace=True)\n \n # drop all of the language columns\n DROP_COLUMNS = [col for col in df.columns if col[:3]==\"lan\"]\n df.drop(DROP_COLUMNS, axis=1, inplace=True)\n\n # loop through the columns we want to aggregate\n for col_type in [\n \"original_language_\"\n , \"prod_comp_cntry_\"\n , \"prod_comp_names_\"\n , \"writers_\"\n , \"actors_\"\n , \"genres_\"\n , \"director_\"\n ]:\n # create a dictionary of each unique value and its frequency\n val_freq = {}\n for col in df.columns:\n if col.startswith(col_type):\n val_freq[col] = df[col].sum()\n\n # create a dataframe from this dictionary; sort by count\n counts = pd.DataFrame.from_dict(\n val_freq\n , orient='index'\n , columns=['count']\n ).sort_values('count', ascending=False)\n counts['frac'] = counts['count'].apply(lambda x: 100*x / df.shape[0])\n\n # handle special case of production company country\n if col_type == \"prod_comp_cntry_\":\n DROP_COLUMNS = [col for col in counts.index][3:]\n\n # handle special case of directors\n elif col_type == \"director_\":\n DIRECTOR_COLS = [col for col in df.columns\n if col.startswith(\"director_\")\n and col!=\"director_pop\"]\n df['established_director'] = df[DIRECTOR_COLS].max(axis=1)\n DROP_COLUMNS = DIRECTOR_COLS\n\n # handle special case of actors\n elif col_type == \"actors_\":\n ACTORS_COLS = [col for col in df.columns if \"actors\" in col]\n df['num_top_100_actors'] = df[ACTORS_COLS].sum(axis=1)\n DROP_COLUMNS = ACTORS_COLS\n\n # handle all the other cases\n else:\n DROP_COLUMNS = [col for col in counts.query('frac < 2').index]\n\n\n df.drop(DROP_COLUMNS, axis=1, inplace=True)\n \n ##########################################################################\n # adjust the data for inflation\n CPI_tf = df['CPIAUCSL'].max()\n df['budget'] = df[['budget', 'CPIAUCSL']].apply(\n cpi_adjust\n , args=(CPI_tf ,)\n , axis=1\n )\n df['revenue'] = df[['revenue', 'CPIAUCSL']].apply(\n cpi_adjust\n , args=(CPI_tf ,)\n , axis=1\n )\n # no longer need CPI data\n df.drop('CPIAUCSL', axis=1, inplace=True)\n \n ########################################################################## \n # add in useful features about the cast and crew \n df['cast_crew_sum_pop'] = (\n df['director_pop']\n + df['avg_actor_pop']\n + df['avg_writer_pop']\n )\n df['cast_crew_product_pop'] = (\n df['director_pop']\n * df['avg_actor_pop']\n * df['avg_writer_pop']\n )\n df['runtime'].replace(to_replace=0, value=df['runtime'].median(), inplace=True)\n df = df.query('10000 <= revenue').copy()\n df = df.query('100000 <= budget').copy()\n df.drop('sum_actor_pop', axis=1, inplace=True)\n df.drop('min_writer_pop', axis=1, inplace=True)\n\n # code to transform columns\n for col in [\n \"budget\", \"director_pop\", \"avg_writer_pop\"\n , \"max_writer_pop\", \"avg_actor_pop\", \"max_actor_pop\"\n , \"min_actor_pop\", 'cast_crew_sum_pop'\n , 'cast_crew_product_pop'\n ]:\n df['log10_'+col] = df[col].apply(lambda x: math.log10(x))\n df.drop(col, axis=1, inplace=True)\n \n return df", "def createGraph(self):\n self.measurements(45,50,10)\n avg = self.readFile(\"avg.pickle\")\n table = []\n for a in avg:\n table.append((a[0], a[1], a[2], a[3], a[4], \"Boolean\"))\n table.append((a[0], a[1], a[2], a[5], a[6], \"Fractional\"))\n table.append((a[0], a[1], a[2], a[7], a[8], \"Hierarchical\"))\n df = pd.DataFrame(table)\n df.columns = [\"nPages\", \"nCentroids\", \"Time\", \"Mean\", \"Std\", \"Type\"]\n print(df)\n sns.set(style = 'darkgrid')\n sns.lmplot(x = \"nCentroids\", y = \"Mean\", col = \"Type\", hue=\"Type\", data = df)\n #sns.lmplot(x = \"nPages\", y = \"Mean\", col = \"Type\", hue=\"Type\", data = df)\n #sns.scatterplot(x = \"nCentroids\", y = \"Mean\", size = \"Time\", hue = \"Type\", sizes = (20, 200), data = df)\n #sns.scatterplot(x = \"nPages\", y = \"Mean\", size = \"Time\", hue = \"Type\", sizes = (20, 200), data = df)\n plt.show()", "def plot_wordsOverTime(df, col, column_line = None, operation = 'count', title = 'Words over time'):\n\n print('\\n*** INTERACTIVE MODE: HOVER OVER THE GRAPH TO SEE COUNTS FOR EACH YEAR***')\n\n # create a subsett of year and number of projects\n counts = df.groupby([col]).agg(operation)\n counts.reset_index(inplace = True)\n\n # create a column data source to plot in bokeh\n source = ColumnDataSource(counts)\n\n # initialize the plot\n p = figure(plot_width = 1000,\n plot_height = 450,\n title = title)\n\n # plot the trend line\n p.line(x=col, y=column_line,\n line_width=6, source=source)\n\n # set parameters\n p.xgrid.grid_line_color = None\n p.ygrid.grid_line_color = None\n p.background_fill_color = \"AliceBlue\"\n p.title.text_font_size = \"16pt\"\n p.title.text_color = 'MidnightBlue'\n p.xaxis.axis_label_text_font_size = '15pt'\n p.yaxis.axis_label_text_font_size = '15pt'\n\n p.yaxis.axis_label = 'Total number of words'\n p.xaxis.major_label_text_font_size = '12pt'\n\n # create annotation\n box = BoxAnnotation(left=2014.5, right=2016.5,\n line_width=1, line_color='black', line_dash='dashed',\n fill_alpha=0.2, fill_color='orange')\n\n # add annotation to plot\n p.add_layout(box)\n\n # add interactive hover tool\n hover = HoverTool()\n hover.tooltips = [(\"Total number of words \", \"@word_count\"), ('year', '@year')]\n\n hover.mode = 'vline'\n p.add_tools(hover)\n\n # export plots\n _=export_png(p, filename = img_path / 'wordsovertime.png')\n output_file(img_path/'wordsovertime.html')\n\n p.output_backend = \"svg\"\n export_svgs(p, filename=img_path/\"wordsovertime.svg\")\n\n #display plot\n show(p)", "def getFig3Data(df, path):\n\ttmp = pd.DataFrame()\n\t# tmp = tmp.append(df)\n\ttmp = tmp.append(df[df.Location == 'exon'])\n\ttmp = tmp.append(df[df.Location == 'intron'])\n\t# print(df[df.Location == 'exon'].NbpG4rWt)\n\t# print(df[df.Location == 'intron'].NbpG4rWt)\n\tdicoNbTr = countTranscript.getFig3Percent(path)\n\tGlobal = pd.DataFrame()\n\tgroups = tmp.groupby('Class')\n\tfor name, group in groups:\n\t\trow = sumSubTable(group, name)\n\t\trow['Class'] = name\n\t\trow = pd.DataFrame(row, index=[len(Global)+1])\n\t\tGlobal = Global.append(row)\n\t# print(sum(Global.NbpG4rWt))\n\trow = {'Class' : 'Global',\n\t\t\t'nuclG' : sum(Global.nuclG),\n\t\t\t'nuclC' : sum(Global.nuclC),\n\t\t\t'NbpG4rWt' : sum(Global.NbpG4rWt),\n\t\t\t'NbpG4rShuf' : sum(Global.NbpG4rShuf),\n\t\t\t'Tot' : sum(Global.Tot)}\n\trow = pd.DataFrame(row, index=[len(Global)+1])\n\tGlobal = Global.append(row)\n\tGlobal['nbTr'] = Global['Class'].map( dicoNbTr['Tot'] )\n\tGlobal['NbTrpG4Wt'] = Global['Class'].map( dicoNbTr['Wt'] )\n\tGlobal['NbTrpG4Shuf'] = Global['Class'].map( dicoNbTr['Shuf'] )\n\tGlobal['PercentWt'] = Global['NbTrpG4Wt'] / Global['nbTr'] * 100\n\tGlobal['PercentShuf'] = Global['NbTrpG4Shuf'] / Global['nbTr'] * 100\n\tGlobal = computeDensity(Global, 'Segment')\n\treturn Global", "def GetGraphicAverages(diagnostic_cases, diagnostic, weeks,year, n_years):\n t = 1.96\n\n current_year = Year.objects.get(year=year)\n weeks_current_year = weeks.filter(year=current_year)\n year_ob = Year.objects.filter(year__lt=year)\n weeks = weeks.filter(year__in=year_ob)\n\n popu = 0\n\n #cases per diagnostic\n diagnostic_cases_w = diagnostic_cases\n\n #arithmetic average of the weeks / n_years\n averages = [0] * 52\n\n standard_deviations = [0] * 52\n #number of years\n\n #cases per week of the diferent years\n cases_per_weeks = [0] * 52\n\n for i in range(len(averages)):\n\n f = [0]*(n_years)\n \n\n year = 0\n\n y_idx = 0\n for w in range(len(weeks)):\n #print(y)\n if weeks[w].week == i+1:\n \n if year != weeks[w].year: # Esto no pasa nunca\n year = weeks[w].year\n cases = 0\n \n \n for p in diagnostic_cases_w:\n\n if p.week == weeks[w]:\n \n cases += p.cases\n\n f[y_idx ] = cases\n y_idx +=1\n\n averages[i] = np.average(f) #borrar\n\n standard_deviations[i] = np.std(f)\n \n cases = 0\n for week in weeks_current_year:\n if week.week == i+1:\n dia = diagnostic_cases.filter(week=week)\n \n for d in dia:\n\n cases += d.cases\n\n cases_per_weeks[i] = cases \n\n\n #array of class dots for draw the chart of averages\n dots_graphic_averages = []\n #array of class dots for draw the chart of cumulative\n dots_graphic_cumulative = []\n\n\n average_cumulative = 0\n top_rank_cumulative = 0\n cases_acumulative = 0\n lower_rank_cumulative = 0\n\n for i in range(len(standard_deviations)):\n lower_rank = 0\n top_rank = 0\n\n if n_years != 0:\n lower_rank = averages[i] - (t * standard_deviations[i]/ math.sqrt(n_years))\n top_rank = averages[i] + (t * standard_deviations[i] / math.sqrt(n_years))\n if lower_rank < 0:\n lower_rank = 0\n\n # Acumulative dots\n cases_acumulative += cases_per_weeks[i]\n average_cumulative += averages[i]\n if lower_rank >= 0:\n lower_rank_cumulative += lower_rank\n top_rank_cumulative += top_rank\n\n dots_average = DotsGraphicAverage(averages[i],i+1, lower_rank, top_rank,cases_per_weeks[i])\n dots_cumulative = DotsGraphicAverage(average_cumulative,i+1, lower_rank_cumulative, top_rank_cumulative,cases_acumulative)\n dots_graphic_averages.append(dots_average)\n dots_graphic_cumulative.append(dots_cumulative)\n\n\n return dots_graphic_averages, dots_graphic_cumulative", "def k_means_montage(self, df, class_col):\n\n n_classes = df[class_col].nunique()\n for cl in sorted(df[class_col].unique()):\n montage_df = df[df[class_col] == cl].path\n imgs = [np.array(Image.open(img)) for img in montage_df]\n imgs = np.stack(imgs)\n plt.figure(figsize=(12, 15))\n plt.imshow(montage(imgs, multichannel=True).astype(np.uint8))\n plt.title(f\"Montage for Class{cl}\")", "def meetup_groups_dynamic(growth_df):\n\n def convert_to_percent(row):\n total_groups = row.sum()\n return row.apply(lambda x: x * 100 / total_groups)\n\n return growth_df.apply(convert_to_percent, axis=1)", "def trip_duration_stats(df):\r\n\r\n print('\\nCalculating Trip Duration...\\n')\r\n start_time = time.time()\r\n # TO DO: display total travel time\r\n print(df.groupby(['month'])['Trip Duration'].sum())\r\n print(df.groupby(['day_of_week'])['Trip Duration'].sum())\r\n\r\n # TO DO: display mean travel time\r\n print(df.groupby(['month'])['Trip Duration'].mean())\r\n print(df.groupby(['day_of_week'])['Trip Duration'].mean())\r\n\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def plot_genre_and_avg_word_len(df):\n plotting_helper_method('avg_word_len_nm', 'genre', df)\n plt.xlim(0, 1)\n\n plt.title('Normalized Average Word Length pr. genre')\n plt.xlabel('Normalized Average Word Length')\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/average_word_length')", "def plot_genre_and_normalized_word_count(df):\n plotting_helper_method('word_count_nm', 'genre', df)\n plt.title('Normalized Word count pr. genre')\n plt.xlabel('Normalized Word Count')\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/normalized_word_count_plot')", "def plot_timecourses(timecourses, output_file):\n\tpyplot.plot(timecourses)\n\tpyplot.savefig(output_file)", "def grouping_cols(df, cat_percentage = 0.05, checking_itr = 10):", "def plays(df):\n tp = (\n df.query('play_type in @OFFENSE_PLAY_TYPES')\n .pivot_table(index=['game_id', 'posteam'], \n columns=['play_type'], \n values=['play_id'], \n aggfunc='count',\n fill_value=0)\n .pipe(lambda x: x.set_axis([f'{b}_plays' for a, b in x.columns], axis=1, inplace=False))\n .reset_index()\n ) \n tp['tot_plays'] = tp.loc[:, [c for c in tp.columns if '_plays' in c]].sum(axis=1)\n tp['run_pct'] = tp['run_plays'] / (tp['run_plays'] + tp['pass_plays'])\n tp['pass_pct'] = tp['pass_plays'] / (tp['run_plays'] + tp['pass_plays'])\n return tp.join(time_of_possession(df), on=['game_id', 'posteam'], how='left')", "def plot_ttc(data_frame):\n figtt, axtt = plot_var(\n data_frame=data_frame,\n x_var=\"flow\",\n y_var=\"timetC\",\n label_var=\"mpr\",\n pivot=\"distance\",\n x_label=\"Flow [veh/m]\",\n y_label=\"Time To Collision [s]\",\n t_label=\"Distance [m]: \",\n legends=[r\"0 \\%\", r\"10 \\%\", r\"20 \\%\", r\"30 \\%\", r\"40 \\%\"],\n fnt_size={\"fontsize\": 16},\n )\n return figtt, axtt", "def profil_consommation(params):\n N = 50; min_val = 120; max_val = 200;\n df = pd.DataFrame( data = np.random.rand(N, 3), columns = [\"a_i\",\"a_j\",\"a_k\"] );\n# df = pd.DataFrame( data = np.random.uniform(low=min_val, high=max_val, size=(N, 3)),\\\n# columns = [\"a_i\",\"a_j\",\"a_k\"] );\n df[\"a_j\"] = 0.3 + df[\"a_i\"] *( 1 + np.random.random(N,)*df[\"a_i\"]);\n df[\"a_k\"] = df[\"a_k\"] ; \n df[\"a_i\"] = df[\"a_i\"] + 0.6;\n df[\"a_j\"] = df[\"a_j\"] + 0.6;\n \n fig, ax = plt.subplots()\n df.plot( ax = ax);\n ax.set_yticklabels(['{:3.1f}'.format(x*100) \\\n for x in ax.get_yticks()]);\n ax.legend( df.columns, loc='upper center', bbox_to_anchor=(0.5, 1.00),\\\n ncol=3, fancybox=True, shadow=True); \n plt.savefig(params[\"path_save\"]+\\\n \"profilDeConsommationSeriesTemporelles.eps\",\\\n dpi= 190)", "def plotProgression(self, df_X, repl_strs, time_strs,\n title=\"\", ax=None, label_fontsize=16, is_plot=True, figsize=(10, 10)):\n def deDup(lst):\n seen = set()\n seen_add = seen.add\n return [x for x in lst if not (x in seen or seen_add(x))]\n # \n if ax is None:\n _, ax = plt.subplots(1, figsize=figsize)\n if self._class_names is None:\n class_dct = {k: str(k) for k in self._ser_y.values}\n class_names = list(class_dct.values())\n else:\n class_names = list(self._class_names)\n class_names.insert(0, \"\")\n df_prediction = self.predict(df_X)\n repl_dct = {}\n for repl_str in repl_strs:\n indices = list(df_prediction.index)\n bools = [repl_str in i for i in indices]\n if any(bools):\n indices = [i for i in df_prediction.index if repl_str in i]\n y_vals = np.repeat(np.nan, len(time_strs))\n # Select the dominate class from the prediction\n for idx in indices:\n time_str = _selStrFromList(idx, time_strs)\n pos = time_strs.index(time_str)\n row = df_prediction.loc[idx, :]\n val = row.max()\n # Account for the blank row in the plot\n y_val = 1 + [c for c in row.index if row[c] == val][0]\n y_vals[pos] = y_val\n repl_dct[repl_str] = y_vals\n # Construct plot, starting with longest first\n for y_vals in repl_dct.values():\n ax.plot(time_strs, y_vals, marker=\"o\")\n ax.set_ylim(0, len(class_names))\n yticks = ax.get_yticklabels()[0]\n labels = list(class_names)\n ax.set_xticklabels(time_strs, rotation=90, fontsize=label_fontsize)\n ax.set_yticklabels(labels, fontsize=label_fontsize)\n ax.legend(repl_strs, bbox_to_anchor=(1.0, 1), loc=\"upper right\")\n fontsize = label_fontsize + 2\n plt.title(title, fontsize=fontsize)\n if is_plot:\n plt.show()", "def plotclass_pdf(pp, s, t=None):\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n\n try:\n X4, Y4 = zip(*s.mean_cln_4)\n X6, Y6 = zip(*s.mean_cln_6)\n ax1.plot(X4, Y4, 'bo', color=\"blue\", alpha=0.4, label=\"IPv4\")\n ax1.plot(X6, Y6, 'bo', color=\"red\", alpha=0.4, label=\"IPv6\")\n except Exception as e:\n print(\"Plotting failed for host {} with error {}\".format(s.domain, e))\n return\n\n try:\n ax1.plot(s.xs4, s.spl_arr4, linewidth=4, color=\"blue\", alpha=0.4)\n ax1.plot(s.xs6, s.spl_arr6, linewidth=4, color=\"red\", alpha=0.4)\n except Exception as e:\n print(\"Not plotting host {} due to exception {}\".format(s.domain, e))\n return\n\n plt.legend(loc='lower right')\n plt.title('Host: {} ({} / {})\\n Decision: {}'.format(\n s.domain, s.ip4, s.ip6, s.dec), fontsize=10)\n plt.xlabel('measurement time (h)')\n plt.ylabel('observed offset (msec)')\n ticks = ax1.get_xticks() / 3600\n ticks = [round(t, 1) for t in ticks]\n ax1.set_xticklabels(ticks)\n # saving all in PDF\n pp.savefig(fig)\n tikz_save(\"{}.{}-{}.tex\".format(t, s.domain, hash((s.ip4, s.ip6))))\n plt.close(fig)", "def boxplots_of_classes(self, title:str, y_axis: str=\"mean activity over all neurons\", second_path: str=r'D:\\Dataframes\\double_skip_mean', show:bool=True, dest_path:str=None, show_outliers: bool=False):\n data = []\n counter = 0\n for pop in self.populations:\n df = pd.read_csv(self.path + '\\\\{}.csv'.format(pop))\n trials = df['label'].tolist()\n values = df['Component 1'].tolist()\n response = df['response'].tolist()\n \n for i in range(len(response)):\n # Removing day 4 trials\n if eval(trials[i])[0] != 4:\n data.append([response[i], values[i], \"Transition over 1 day\"])\n\n df = pd.DataFrame(data, columns = ['Labels', y_axis, \"Transition\"])\n\n self.__box_plot(df, \"Labels\", y_axis, \"Transition\", title, show=show, dest_path=dest_path, showfliers=show_outliers, order = [\"0->0\", \"0->1\", \"1->0\", \"1->1\"])", "def visualizations():\r\n raise NotImplementedError\r\n # df = pandas.read_csv('accidents_by_hour.csv', index_col=0, header=0)\r\n # plt.plot(0, 0, data=df)\r\n # plt.show()\r", "def visualize(houses:pd.DataFrame) -> None:\n #price_distribution(houses)\n #prop_types(houses)\n #zip_code(houses)\n #year_built(houses)\n #bed_bath(houses)\n return", "def plot(self, context=None):\n\n response = requests.get(self.url).content\n table = pd.read_html(response, attrs={\"id\": \"main_table_countries_today\"})\n df = table[0].fillna(0)\n # df.drop(df.index[0], inplace=True) # World\n df.drop([\"ActiveCases\", 'Serious,Critical', 'Serious,Critical', 'Deaths/1M pop', 'Tests/ 1M pop'], axis=1, inplace=True)\n df.drop(df.columns[6], axis=1, inplace=True)\n\n if len(context) > 3:\n context = context.lower().capitalize()\n df = df.loc[df[\"Country,Other\"] == context]\n if 4 > len(context) > 1:\n context = context.upper()\n df = df.loc[df[\"Country,Other\"] == context]\n if len(context) <= 1:\n df = df[1:]\n\n C_Names = df[\"Country,Other\"].head(n=10).values.tolist()\n T_Cases = df[\"TotalCases\"].head(n=10).values.tolist()\n # N_Cases = df[\"NewCases\"].head(n=10).values.tolist() # not plotted\n T_Deaths = df[\"TotalDeaths\"].head(n=10).values.tolist()\n # N_Deaths = df[\"NewDeaths\"].head(n=10).values.tolist() # not plotted\n T_Recovered = df[\"TotalRecovered\"].head(n=10).values.tolist()\n T_Tests = df[\"TotalTests\"].head(n=10).values.tolist()\n\n x = np.arange(len(C_Names))\n width = 0.20\n\n fig, ax = plt.subplots()\n\n ax.bar(x - 0.30, T_Cases, width, label='TotalCases', color=\"Blue\")\n ax.bar(x - 0.10, T_Deaths, width, label='TotalDeaths', color=\"Red\")\n ax.bar(x + 0.10, T_Tests, width, label='TotalTests', color=\"Green\")\n ax.bar(x + 0.30, T_Recovered, width, label='TotalRecovered', color=\"Orange\")\n\n if len(context) > 1:\n ax.set_title(\"{}'s Situation\".format(context))\n else:\n ax.set_title(\"World's Top10 Situation\")\n\n ax.set_xticks(x)\n ax.set_xticklabels(C_Names)\n ax.legend()\n plt.ticklabel_format(style='plain', axis=\"y\")\n fig.set_size_inches(18.5, 10.5)\n fig.tight_layout()\n plt.grid()\n\n if len(context) > 1:\n font1 = {'family': 'serif',\n 'color': 'blue',\n 'weight': 'bold',\n 'size': 20}\n font2 = {'family': 'serif',\n 'color': 'red',\n 'weight': 'normal',\n 'size': 20}\n font3 = {'family': 'serif',\n 'color': 'green',\n 'weight': 'normal',\n 'size': 20}\n font4 = {'family': 'serif',\n 'color': 'orange',\n 'weight': 'normal',\n 'size': 20}\n\n # bbox=dict(facecolor='black', alpha=0.5)\n plt.text(0.863, 0.67, \"Total Cases:\\n{:,}\".format(int(T_Cases[0])), fontdict=font1, transform=ax.transAxes)\n plt.text(0.863, 0.57, \"Total Deaths:\\n{:,}\".format(int(T_Deaths[0])), fontdict=font2, transform=ax.transAxes)\n plt.text(0.863, 0.47, \"Total Tests:\\n{:,}\".format(int(T_Tests[0])), fontdict=font3, transform=ax.transAxes)\n plt.text(0.863, 0.37, \"Total Recovered:\\n{:,}\".format(int(T_Recovered[0])), fontdict=font4, transform=ax.transAxes)\n\n # plt.savefig('corona.png') # Uncomment it to save the figure\n plt.show()", "def visualise_number_of_tests_simultaneously_processed_at_each_lab(tests_dataframe):\r\n labs_df = create_dataframe_from_csv('labs.csv')\r\n labs_df = drop_missing_values_in_dataframe(labs_df)\r\n list_of_labs = labs_df['lab_name'].to_list()\r\n for index in range(len(list_of_labs)):\r\n df = tests_dataframe.loc[tests_dataframe['lab_name'] == list_of_labs[index]]\r\n df = merge_arrival_and_completion_time(df)\r\n df.plot.line(x = 'time', y = 'server_size', rot=70, title=\"Visualise the number of tests being simultaneously processed at \" + list_of_labs[index])\r\n plt.show()", "def eda_plot():\n\n df1 = pd.read_csv('eda_malware.csv')\n df2 = pd.read_csv('eda_random.csv')\n df3 = pd.read_csv('eda_popular.csv')\n\n df = pd.concat([df1, df2, df3], ignore_index=True)\n df['label'].replace([0,1],['Benign','Malware'],inplace=True)\n\n colors = ['#EAB6AB','#D9E6F3','#CBAACB','#CCE2CB', '#FFAEA5', '#A2E1DB', '#97C1A9']\n # b vs. m: node types counts\n f1 = pd.crosstab(df['label'], df['node_types_counts'])\n\n f1 = pd.DataFrame({\"3 Types\": [1, 4], \"4 Types\": [1, 407], \"5 Types\": [245, 5768], \"6 Types\": [39, 1113], \"7 Types\": [83, 487], \"8 Types\": [154, 368], \"9 Types\": [103, 286]}).rename(index={0:'Benign', 1:'Malware'})\n f1.plot(kind='bar', color=colors)\n fig = plt.gcf()\n plt.legend(loc='upper left')\n plt.title('Benign vs. Malicious: Number of Node Types')\n fig.savefig('bv_node_types.png')\n\n # for a better look, limit type 5 malware to 2k counts only\n f1 = pd.DataFrame({\"3 Types\": [1, 4], \"4 Types\": [1, 407], \"5 Types\": [245, 2000], \"6 Types\": [39, 1113], \"7 Types\": [83, 487], \"8 Types\": [154, 368], \"9 Types\": [103, 286]}).rename(index={0:'Benign', 1:'Malware'})\n f1.plot(kind='bar', color=colors)\n fig = plt.gcf()\n plt.legend(loc='upper left')\n plt.title('Benign vs. Malicious: Number of Node Types')\n fig.savefig('bv_node_types1.png')\n\n # node types\n # for malware: extract node types info for node types counts > 5, and sum up each types counts\n node_types = df[(df['label'] == 'Malware') & (df['node_types_counts'] >= 5)]['node_types'] #series\n lst = [ast.literal_eval(s) for s in node_types]\n\n c = Counter()\n for d in lst:\n c.update(d)\n\n df_nt = pd.DataFrame(dict(c).items(), columns=['node_types', 'counts'])\n df_nt = df_nt.sort_values(by=['counts'])\n\n sizes = [215060, 2823059, 3135725, 5641356, 10679709, 16547701]\n labels = ['Others', 'static,Node', 'public,static,Node', 'Node', 'external,Node', 'public,Node']\n\n colors = ['#EAB6AB','#D9E6F3','#CBAACB','#CCE2CB', '#FFAEA5', '#A2E1DB']\n\n fig1, ax1 = plt.subplots(figsize=(7, 7))\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%',\n shadow=False, startangle=90, colors=colors)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.title('Malware: Top Node Types and Its Counts', y=1.05)\n\n plt.show()\n fig1.savefig('counts_pie_m.png')\n\n # for benign: extract node types info for node types counts, and sum up each types counts\n node_types = df[(df['label'] == 'Benign')]['node_types'] #series\n lst = [ast.literal_eval(s) for s in node_types]\n\n c = Counter()\n for d in lst:\n c.update(d)\n\n df_nt = pd.DataFrame(dict(c).items(), columns=['node_types', 'counts'])\n df_nt = df_nt.sort_values(by=['counts'])\n\n sizes = [77967, 2892033, 2964924, 5287258, 6478196, 20364339]\n labels = ['Others', 'staticNode', 'public,staticNode', 'external,Node', 'Node', 'public,Node']\n\n colors = ['#EAB6AB','#D9E6F3','#CBAACB','#CCE2CB', '#FFAEA5', '#A2E1DB']\n\n fig1, ax1 = plt.subplots(figsize=(7, 7))\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%',\n shadow=False, startangle=90, colors=colors)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.title('Benign: Top Node Types and Its Counts', y=1.05)\n\n plt.show()\n fig1.savefig('counts_pie_b.png')\n\n # benign vs malware: counts\n sizes = [8435, 802]\n labels = ['Benign', 'Malware']\n\n colors = ['#EAB6AB','#D9E6F3']\n\n fig1, ax1 = plt.subplots(figsize=(7, 7))\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%',\n shadow=False, startangle=90, colors=colors)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.title('Number of Benign vs. Malware', y=1.05)\n\n plt.show()\n fig1.savefig('bm_counts.png')\n\n # number of edges vs number of nodes\n groups = df.groupby('label')\n colors = ['#FFAEA5', '#A2E1DB']\n\n # Plot\n fig, ax = plt.subplots()\n ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling\n for name, group in groups:\n if name == 'Benign':\n c = colors[0]\n else:\n c = colors[1]\n ax.plot(group.number_edges, group.number_nodes, marker='o', linestyle='', ms=4, label=name, c=c)\n ax.legend()\n ax.set_xlabel('Number of Edges')\n ax.set_ylabel('Number of Nodes')\n ax.set_title('Benign & Malware: Number of Edges vs. Number of Nodes', y=1.05)\n\n plt.show()\n fig.savefig('bm_edges_nodes.png')", "def compare_plot_instances(data_causal):\n col_names = data_causal.columns.values # get the columns' names\n dimension = 2 # TODO: figure out better way to organize plots by location\n\n fig = plt.figure()\n i = 1\n for cond in col_names:\n ax = fig.add_subplot(len(col_names)/dimension, dimension, i)\n df_compare = data_causal.groupby(cond)[cond].count() # displays num instances assigned to each condition\n ax = df_compare.plot(kind='bar', title=cond)\n ax.set_xlabel(cond)\n ax.set_ylabel(\"count instances\")\n i += 1\n fig.tight_layout()\n plt.show()", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n \n #adding histogram of times\n plt.hist(df['Start Time'].dt.hour, bins='auto', edgecolor='black')\n plt.title('Histogram of Travel Frequency by Hour')\n plt.xlabel('Hour of the Day')\n plt.ylabel('Count of Trips')\n plt.axis('tight')\n plt.grid()\n plt.show()\n \n # display the most common month\n popular_month = df['month'].mode()[0]\n print('Most Popular Month: \\n',cal.month_name[popular_month])\n\n # display the most common day of week\n popular_day = df['day'].mode()[0]\n print('Most Popular Day: \\n',popular_day )\n \n # display the most common start hour\n popular_hour = df['hour'].mode()[0]\n print('Most Popular Hour: \\n',popular_hour )\n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def exercise_1(self):\n gdp = self.gdp\n phones = self.phones \n percent_literate = self.percent_literate\n # print(len(gdp), len(phones),len(percent_literate))\n print(type(self.percent_literate[1]))\n print((percent_literate[1]))\n\n # Create scatter plot with GDP on the x-axis and number of phones on the y-axis\n sns.scatterplot(x = gdp, y = phones)\n plt.show()\n\n # Change this scatter plot to have percent literate on the y-axis\n # sns.scatterplot(x=gdp, y=percent_literate) \n # plt.show()", "def run_pca(df, cols=None): \n import numpy as np\n import pandas as pd\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n from sklearn.preprocessing import StandardScaler\n from sklearn.decomposition import PCA\n import mpld3\n\n # Define and markers to use for different categories\n groups_dict = {(u'D', 0):('Germany, unregulated', 'g', 'o'),\n (u'N', 0):('Norway, unregulated', 'b', 'o'),\n (u'D', 1):('Germany, regulated', 'g', '^'),\n (u'N', 1):('Norway, regulated', 'b', '^')}\n \n # Extract cols of interest\n cats = df[['country', 'regulated']]\n\n if cols:\n df = df[cols].astype(float)\n\n # Standardise the feature data\n feat_std = StandardScaler().fit_transform(df)\n\n # Setup PCA. Initially, choose to keep ALL components\n pca = PCA()\n\n # Fit model\n pca.fit(feat_std)\n\n # Get explained variances (in %)\n var_exp = 100*pca.explained_variance_ratio_\n cum_exp = np.cumsum(var_exp)\n\n # Get eigenvalues\n cov_mat = np.cov(feat_std.T)\n eig_vals, eig_vecs = np.linalg.eig(cov_mat)\n\n # Get number of EVs > 1 (Kaiser-Guttman criterion)\n # and print summary\n n_kgc = (eig_vals > 1).sum()\n print 'Variance explained by first %s PCs (%%):\\n' % n_kgc\n print var_exp[:n_kgc]\n print '\\nTotal: %.2f%%' % var_exp[:n_kgc].sum()\n \n # Plot\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(16, 6))\n \n # Explained variance\n axes[0].bar(range(1, len(var_exp)+1), var_exp, \n align='center', label='Individual components')\n axes[0].plot(range(1, len(cum_exp)+1), cum_exp, \n 'r-o', label='Cumulative')\n axes[0].set_xlabel('Principal component')\n axes[0].set_ylabel('Variance explained (%)')\n axes[0].legend(loc='center right')\n \n # Eigenvalues\n axes[1].plot(range(1, len(eig_vals)+1), np.sort(eig_vals)[::-1], \n 'r-o', label='Eigenvalues')\n axes[1].axhline(1, c='k', ls='-', label='Kaiser-Guttman threshold')\n axes[1].set_xlabel('Principal component')\n axes[1].set_ylabel('Eigenvalue')\n axes[1].legend(loc='upper right') \n \n # PC loadings\n loads = pd.DataFrame(data=pca.components_, \n columns=df.columns,\n index=range(1, pca.components_.shape[0]+1)).T\n\n # Project into 2 and 3 components\n fig = plt.figure(figsize=(16, 6))\n \n # Plot 2 components\n ax = fig.add_subplot(1, 2, 1)\n \n # Refit the PCA, this time specifying 2 components\n # and transforming the result\n feat_reduced = PCA(n_components=2).fit_transform(feat_std)\n \n # Build df \n data = pd.DataFrame({'PC1':feat_reduced[:, 0],\n 'PC2':feat_reduced[:, 1],\n 'country':cats['country'],\n 'regulated':cats['regulated']}) \n\n groups = data.groupby(['country', 'regulated'])\n \n # Plot\n for name, group in groups:\n ax.scatter(group['PC1'], group['PC2'], s=60,\n label=groups_dict[name][0],\n c=groups_dict[name][1],\n marker=groups_dict[name][2])\n \n ax.set_xlabel('First principal component')\n ax.set_ylabel('Second principal component')\n ax.set_title('First two PCA directions')\n \n # Plot 3 components\n ax = fig.add_subplot(1, 2, 2, projection='3d', \n elev=-150, azim=135)\n\n # Refit the PCA, this time specifying 3 components\n # and transforming the result\n feat_reduced = PCA(n_components=3).fit_transform(feat_std)\n\n # Build df with colours\n data = pd.DataFrame({'PC1':feat_reduced[:, 0],\n 'PC2':feat_reduced[:, 1],\n 'PC3':feat_reduced[:, 2],\n 'country':cats['country'],\n 'regulated':cats['regulated']}) \n \n groups = data.groupby(['country', 'regulated'])\n \n # Plot\n for name, group in groups:\n ax.scatter(group['PC1'], group['PC2'], group['PC3'],\n label=groups_dict[name][0],\n c=groups_dict[name][1],\n marker=groups_dict[name][2],\n s=60)\n \n ax.set_title('First three PCA directions')\n ax.set_xlabel('First principal component')\n ax.set_ylabel('Second principal component')\n ax.set_zlabel('Third principal component')\n ax.legend(bbox_to_anchor=(0.15, -0.1), frameon=True)\n plt.show()\n\n return loads", "def train_age_count():\n\n import numpy as np\n import pandas as pd\n import matplotlib.pyplot as plt\n import itertools\n\n train = pd.read_csv('./data/train.csv')\n color_labels = pd.read_csv('./data/color_labels.csv')\n state_labels = pd.read_csv('./data/state_labels.csv')\n\n # Convert age from months to years\n train.loc[train['Age'] > -1, 'Age'] = (train['Age']//12)\n \n # Divide by dog (Type = 1) and cat (Type = 2)\n dog_df = train.loc[train['Type'] == 1, ['State','Type', 'Age', 'AdoptionSpeed']]\n cat_df = train.loc[train['Type'] == 2, ['State','Type', 'Age', 'AdoptionSpeed']]\n \n dog_max_age = max(dog_df.loc[:, 'Age'])\n dog_min_age = min(dog_df.loc[:, 'Age'])\n \n cat_max_age = max(cat_df.loc[:, 'Age'])\n cat_min_age = min(cat_df.loc[:, 'Age'])\n \n dog_age_labels = []\n dog_count = []\n \n cat_age_labels = []\n cat_count = []\n \n # Find dog count for each age\n for i in range(dog_min_age, dog_max_age + 1) :\n count = (dog_df.Age == i).sum()\n if(count > 0) :\n dog_count.append(count)\n dog_age_labels.append(i)\n\n # Find cat count for each age\n for i in range(cat_min_age, cat_max_age + 1) :\n count = (cat_df.Age == i).sum()\n if(count > 0) :\n cat_count.append(count)\n cat_age_labels.append(i)\n \n # Plot pie charts\n plt.figure()\n plt.pie(dog_count, labels = dog_age_labels, startangle=90, autopct='%.1f%%')\n plt.title('Count of Dogs at Different Ages')\n plt.show()\n \n plt.figure()\n plt.pie(cat_count, labels = cat_age_labels, startangle=90, autopct='%.1f%%')\n plt.title('Count of Cats at Different Ages')\n plt.show()\n \n \n \n # Plot bar graphs\n \n plt.figure(num = None, figsize=(6,4),dpi=80, facecolor = 'w', edgecolor='k')\n index = np.arange(len(dog_age_labels))\n \n index = index[0:13]\n dog_age_labels = dog_age_labels[0:13]\n dog_count = dog_count[0:13]\n \n plt.bar(index, dog_count)\n plt.xlabel('Age in Years')\n plt.xticks(index, dog_age_labels)\n plt.ylabel('Count')\n plt.title('Count of Dogs at Different Ages')\n plt.savefig('dogAgeCount.png', bbox_inches='tight')\n \n plt.figure(num = None, figsize=(6,4),dpi=80, facecolor = 'w', edgecolor='k')\n index = np.arange(len(cat_age_labels))\n \n index = index[0:11]\n cat_age_labels = cat_age_labels[0:11]\n cat_count = cat_count[0:11]\n \n plt.bar(index, cat_count)\n plt.xlabel('Age in Years')\n plt.xticks(index, cat_age_labels)\n plt.ylabel('Count')\n plt.title('Count of Cats at Different Ages')\n plt.savefig('catAgeCount.png', bbox_inches='tight')\n plt.show()", "def calc_class_ratio(da):\n\n # list all class codes in dataset\n list_classes = (np.unique(da, return_counts=False)).tolist()\n\n # create empty dataframe & dictionary\n ratio_table = pd.DataFrame(data=None, columns=list_classes)\n date_line = {}\n\n # count all pixels, should be consistent\n total_pix = int(np.sum(da.isel(time=1)))\n\n # iterate through each year in dataset\n for i in range(0, len(da.time)):\n date = str(da.time[i].data)[0:10]\n\n # for each year iterate though each present class number\n # and count pixels\n for n in list_classes:\n number_of_pixles = int(np.sum(da.isel(time=i) == n))\n percentage = number_of_pixles / total_pix * 100\n date_line[n] = percentage\n\n # add each year's counts to dataframe\n ratio_table.loc[date] = date_line\n\n return ratio_table", "def generate_pca(X, y, cols, n_components, **kwargs):\n\n pca = PCA(n_components, **kwargs)\n pca_result = pca.fit_transform(X)\n pca_df = pd.DataFrame(pca_result, columns=cols, index=X.index)\n pca_df['label'] = y\n pca_plot = ggplot(pca_df, aes(x=\"PCA-1\", y=\"PCA-2\", color='label') ) + geom_point(size=100,alpha=0.8) + ggtitle(\"First and Second Principal Components colored by class\")\n return pca_plot", "def time_stats(df):\n\n print('\\nDisplaying the statistics on the most frequent times of travel...\\n')\n start_time = time.time()\n\n # display the most common month\n most_common_month = df['Month'].mode()[0]\n print('The month with the most travels for the selected filters is: ' +\n str(months[most_common_month-1]).title() + '.')\n\n # display the most common day of week\n most_common_day = df['day_of_week'].mode()[0]\n print('The most common day of the week for the selected filters is: ' +\n str(most_common_day) + '.')\n\n # display the most common start hour\n most_common_hour = df['Start Hour'].mode()[0]\n print('The most common start hour is for the selected filters is: ' +\n str(most_common_hour) + '.')\n\n print(\"\\nWe took {} seconds to complete this.\".format((time.time() - start_time)))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # Display the most common month\n month_mode = NUM_TO_MONTH[df['start_month'].mode()[0]]\n print(\"The most common month has been \" + month_mode + \".\")\n\n # Display the most common day of week\n weekday_mode = NUM_TO_WEEKDAY[df['day_of_week'].mode()[0]]\n print(\"The most common day of the week has been \" + weekday_mode + \".\")\n\n # Display the most common start hour\n start_hour_mode = [df['start_hour'].mode()[0]][0]\n print(\"The most common hour has been at \" + str(start_hour_mode) + \" hours.\")\n\n # Display the most common day\n sd_df = df[['start_year', 'start_month','start_day']]\n sd_df = sd_df.groupby(['start_day', 'start_month', 'start_year']).size().reset_index(name='counts')\n sd_df = sd_df.sort_values(by = ['counts','start_year','start_month','start_day'], ascending = [False, False, False, False ])\n\n cmonth, cday, cyear, ncounts = sd_df['start_month'].values[0], sd_df['start_day'].values[0], sd_df['start_year'].values[0], sd_df['counts'].values[0]\n print(\"The most common day has been on \" + str(cday) + \" \" + NUM_TO_MONTH[cmonth] + \" \" + str(cyear) + \", having a total of \" + str(ncounts)+ \" bike rents.\")\n\n #Time controlling\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def plotting(dataframe, prod_num):\n fig, axs = plt.subplots(2, sharex=True)\n axs[0].plot(dataframe['STU'])\n axs[1].plot(dataframe['STU'].diff().dropna())\n axs[0].set_title(\"Time Series of Product\" + f\"_{prod_num}\")\n axs[1].set_title(\"Differenced Time Series of Product\" + f\"_{prod_num}\")\n plt.savefig(\"Time Series of Product\" + f\"_{prod_num}\" + \".pdf\")", "def class_proportions(histogram, years, columns, year_header=None):\r\n\r\n wh = None\r\n\r\n if histogram is not None:\r\n histogram_mask = histogram[year_header].isin(years)\r\n wh = (histogram[histogram_mask].loc[:, columns].sum() /\r\n histogram[histogram_mask].loc[:, columns].sum().sum()).values\r\n return wh", "def plot_work_trajectories(pdf, work, title=\"\"):\n plt.figure(figsize=(12, 8))\n\n nrows = 2\n ncols = 6\n workcols = 2\n for (row, direction) in enumerate(['delete', 'insert']):\n #\n # Plot work vs step\n #\n\n col = 0\n plt.subplot2grid((nrows,ncols), (row, col), colspan=(ncols-workcols))\n\n # Plot average work distribution in think solid line\n plt.plot(work[direction].mean(0), 'k-', linewidth=1.0, alpha=1.0)\n # Plot bundle of work trajectories in transparent lines\n plt.plot(work[direction].T, 'k-', linewidth=0.5, alpha=0.3)\n # Adjust axes to eliminate large-magnitude outliers (keep 98% of data in-range)\n workvals = np.ravel(np.abs(work[direction]))\n worklim = np.percentile(workvals, 98)\n nsteps = work[direction].shape[1]\n plt.axis([0, nsteps, -worklim, +worklim])\n # Label plot\n if row == 1: plt.xlabel('steps')\n plt.ylabel('work / kT')\n plt.title(\"%s NCMC in environment '%s' : %s\" % (title, envname, direction))\n plt.legend(['average work', 'NCMC attempts'])\n\n #\n # Plot work histogram\n #\n\n col = ncols - workcols\n plt.subplot2grid((nrows,ncols), (row, col), colspan=workcols)\n\n # Plot average work distribution in think solid line\n #nbins = 40\n workvals = work[direction][:-1,-1]\n #plt.hist(workvals, nbins)\n if workvals.std() != 0.0:\n sns.distplot(workvals, rug=True)\n else:\n print('workvals has stddev of zero')\n print(workvals)\n # Adjust axes to eliminate large-magnitude outliers (keep 98% of data in-range)\n #worklim = np.percentile(workvals, 98)\n #oldaxis = plt.axis()\n #plt.axis([-worklim, +worklim, 0, oldaxis[3]])\n # Label plot\n if row == 1: plt.xlabel('work / kT')\n plt.title(\"total %s work\" % direction)\n\n pdf.savefig() # saves the current figure into a pdf page\n plt.close()", "def compute_features(input: str, output: str):\n co.nb.matplotlib_inline()\n\n df = pd.read_csv(input)\n\n # Show proportion of customers exited vs retained\n labels = 'Exited', 'Retained'\n sizes = [df.Exited[df['Exited'] == 1].count(), df.Exited[df['Exited'] == 0].count()]\n explode = (0, 0.1)\n fig1, ax1 = plt.subplots(figsize=(5, 4))\n ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',\n shadow=True, startangle=90)\n ax1.axis('equal')\n plt.title(\"Proportion of customers churned vs retained\", size=10)\n plt.show()\n\n # Drop meaningless index columns, as well as surname which would likely be\n # profiling.\n df.drop([\"RowNumber\", \"CustomerId\", \"Surname\"], axis=1, inplace=True)\n\n # Normalize balance by salary, and tenure and credit score by age.\n df[\"BalanceSalaryRatio\"] = df.Balance / df.EstimatedSalary\n df[\"TenureByAge\"] = df.Tenure / df.Age\n df[\"CreditScoreGivenAge\"] = df.CreditScore / df.Age\n\n # Arrange columns by data type for easier manipulation\n continuous_vars = ['CreditScore', 'Age', 'Tenure', 'Balance', 'NumOfProducts', 'EstimatedSalary',\n 'BalanceSalaryRatio',\n 'TenureByAge', 'CreditScoreGivenAge']\n cat_vars = ['HasCrCard', 'IsActiveMember', 'Geography', 'Gender']\n df = df[['Exited'] + continuous_vars + cat_vars]\n\n # For the one hot variables, we change 0 to -1 so that the models can capture\n # a negative relation where the attribute is inapplicable instead of 0\n df.loc[df.HasCrCard == 0, 'HasCrCard'] = -1\n df.loc[df.IsActiveMember == 0, 'IsActiveMember'] = -1\n\n # One hot encode the categorical variables\n lst = ['Geography', 'Gender']\n remove = list()\n for i in lst:\n if df[i].dtype == np.str or df[i].dtype == np.object:\n for j in df[i].unique():\n df[i + '_' + j] = np.where(df[i] == j, 1, -1)\n remove.append(i)\n df = df.drop(remove, axis=1)\n\n # Scale continuous variables to go from 0 to 1.\n min_vec = df[continuous_vars].min().copy()\n max_vec = df[continuous_vars].max().copy()\n df[continuous_vars] = (df[continuous_vars] - min_vec) / (max_vec - min_vec)\n\n # Print results\n _df_pretty(df.head().transpose().round(2))\n\n os.makedirs(os.path.dirname(output), exist_ok=True)\n df.to_csv(output)", "def generate_statistics_plots(graph_name, graph_steps):\n df_final_situation = pd.DataFrame(columns=[\"type\", \"value\"])\n df_step = pd.DataFrame(columns=[\"type\", \"step\", \"value\"])\n df_exposed = pd.DataFrame(columns=[\"step\", \"type\", \"value\"])\n\n st.markdown(\"\")\n\n for i in range(graph_steps):\n # read graph and print stats\n graph_result_path = \"./data/output/\"\n G = nx.read_gexf(f\"{graph_result_path}G_{graph_name}_step{i}.gexf\")\n print_stats(G, i, graph_name)\n\n # LINE CHART (append informations into dataframe)\n df_step = df_step.append(\n {\"type\": \"not_exposed\", \"step\": i, \"value\": cn.count_not_exposed(G)},\n ignore_index=True,\n )\n df_step = df_step.append(\n {\"type\": \"exposed\", \"step\": i, \"value\": cn.count_exposed(G)},\n ignore_index=True,\n )\n df_step = df_step.append(\n {\"type\": \"infected\", \"step\": i, \"value\": cn.count_infected(G)},\n ignore_index=True,\n )\n\n line_chart = px.line(\n df_step,\n x=\"step\",\n y=\"value\",\n color=\"type\",\n title=f\"Infection overall: {graph_name} step: {i}\",\n )\n\n # BAR CHART (append informations into dataframe)\n df_exposed = df_exposed.append(\n {\n \"step\": i,\n \"type\": \"opinion_leader\",\n \"value\": cn.count_exposed_opinion_leader(G),\n },\n ignore_index=True,\n )\n df_exposed = df_exposed.append(\n {\"step\": i, \"type\": \"bot\", \"value\": cn.count_exposed_bot(G)},\n ignore_index=True,\n )\n df_exposed = df_exposed.append(\n {\"step\": i, \"type\": \"user\", \"value\": cn.count_exposed_user(G)},\n ignore_index=True,\n )\n bar_chart = px.bar(\n df_exposed,\n x=\"step\",\n y=\"value\",\n color=\"type\",\n title=f\"Type of agents exposed: {graph_name} step: {i}\",\n )\n\n # PIE CHART (append informations into dataframe)\n if i == 4:\n df_final_situation = df_final_situation.append(\n {\"type\": \"not_exposed\", \"value\": cn.count_not_exposed(G)},\n ignore_index=True,\n )\n df_final_situation = df_final_situation.append(\n {\"type\": \"exposed\", \"value\": cn.count_exposed(G)},\n ignore_index=True,\n )\n df_final_situation = df_final_situation.append(\n {\"type\": \"infected\", \"value\": cn.count_infected(G)},\n ignore_index=True,\n )\n\n #### CREATE THE PLOTS\n ##Uncomment plot(..) to save the plots to disk in html format\n\n plot_folder = \"./data/plots/\"\n\n # Plotly Line Plot\n # plot(line_chart, filename=f\"{plot_folder}steps_{graph_name}.html\")\n st.plotly_chart(line_chart, use_container_width=True)\n\n # Plotly bar plot\n # plot(bar_chart, filename=f\"{plot_folder}exposed_type_{graph_name}.html\")\n st.plotly_chart(bar_chart, use_container_width=True)\n\n # Plotly final pie chart\n final_pie_chart = px.pie(\n df_final_situation, values=\"value\", names=\"type\", title=f\"Final situation plot of: {graph_name}\"\n )\n # plot(final_pie_chart, filename=f\"{plot_folder}final_situation.html\")\n st.plotly_chart(final_pie_chart, use_container_width=True)\n\n print(\"\\nStatistics calculated succesfully\")\n\n return True", "def degree_performance_visualization(degrees, scores):\n plt.plot(scores, marker=\".\", color='b');\n plt.xticks(range(len(scores)))\n ax = plt.gca()\n ax.set_xticklabels(degrees)\n plt.xlabel(\"Degrees\")\n plt.ylabel(\"Score\")\n plt.title(\"Performance over degrees\")\n plt.grid(True)\n plt.savefig(\"degree_performances_test\")", "def showEntireDataset(wl_listG, wl_listV, tsvd_graphlet_vectors, kpca_graphlet_gram, tsvd_shortestpath_vectors,\n kpca_shortestpath_gram, classes):\n for i in range(1, 8):\n if (i == 6):\n data_tsvd = tsvd_graphlet_vectors\n data_kpca = kpca_graphlet_gram\n elif (i == 7):\n data_tsvd = tsvd_shortestpath_vectors\n data_kpca = kpca_shortestpath_gram\n else:\n data_tsvd = wl_listV[i - 1]\n data_kpca = wl_listG[i - 1]\n fig = plt.figure(figsize=(15, 15))\n if (i == 6):\n fig.suptitle('Graphlet', fontsize=25)\n elif (i == 7):\n fig.suptitle('Shortest Path', fontsize=25)\n else:\n fig.suptitle(f'Weisfeiler-Lehman {i}', fontsize=25)\n ax1 = fig.add_subplot(221)\n ax2 = fig.add_subplot(222)\n ax3 = fig.add_subplot(223, projection='3d')\n ax4 = fig.add_subplot(224, projection='3d')\n ax1.title.set_text('2D TruncatedSVD')\n ax2.title.set_text('2D KernelPCA')\n ax3.title.set_text('3D TruncatedSVD')\n ax4.title.set_text('3D KernelPCA')\n ax1.scatter(data_tsvd[:, 0], data_tsvd[:, 1], c=classes)\n ax2.scatter(data_kpca[:, 0], data_kpca[:, 1], c=classes)\n ax3.scatter3D(data_tsvd[:, 0], data_tsvd[:, 1], data_tsvd[:, 2], c=classes)\n ax4.scatter3D(data_kpca[:, 0], data_kpca[:, 1], data_kpca[:, 2], c=classes)\n plt.show()\n print(\"________________________________________________________________________________________\")\n print()", "def sample_plots_by_scn(self, df, num_graphs, num_per_row, fig_width=16, hspace=0.6):\n\t\tnum_rows = int(np.ceil(num_graphs / num_per_row))\n\t\tfig, axes = plt.subplots(nrows=num_rows, ncols=num_per_row, figsize=(fig_width, num_rows * fig_width / 4))\n\t\tfig.subplots_adjust(hspace=hspace)\n\t\tplt.xticks(rotation=45)\n\t\tfor i, scn in enumerate(df.SystemCodeNumber.unique()[:num_graphs]):\n\t\t\ttemp_df = df[df.SystemCodeNumber == scn]\n\t\t\tax = axes[i // num_per_row, i % num_per_row]\n\t\t\tax.plot(temp_df.LastUpdated, temp_df.PercentOccupied)\n\t\t\tax.set_title('Parking Area: {}'.format(scn))\n\t\t\tax.set_xlabel('Date')\n\t\t\tax.set_ylabel('Percent Occupied')\n\t\t\tax.yaxis.set_major_formatter(ticker.PercentFormatter(xmax=1))\n\n\t\tfor ax in fig.axes:\n\t\t\tplt.sca(ax)\n\t\t\tplt.xticks(rotation=45)\n\t\tplt.show()", "def plot_ttcdprc(data_frame):\n figtt, axtt = plot_var(\n data_frame=data_frame,\n x_var=\"distance\",\n y_var=\"timeTC %\",\n label_var=\"mpr\",\n pivot=\"flow\",\n x_label=\"Distance [m]\",\n y_label=r\"Change in Time to Collision [\\%]\",\n t_label=\"Flow [veh/h]: \",\n legends=[r\"0 \\%\", r\"10 \\%\", r\"20 \\%\", r\"30 \\%\", r\"40 \\%\"],\n fnt_size={\"fontsize\": 16},\n )\n return figtt, axtt", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n \n #display the most common month\n print('\\nCalculating The Most Common Month to Travel...\\n')\n common_month = df['month'].mode()[0]\n print('Most Common Month : {} Counts {}'.format(MONTHS[common_month-1].title(),df['month'].value_counts()[common_month]))\n\n #display the most common day of week\n print('\\nCalculating The Most Common Day to Travel...\\n')\n common_day = df['day_of_week'].mode()[0]\n print('Most Common Day : {} Counts {}'.format(common_day,df['day_of_week'].value_counts()[common_day]))\n \n #display the most common start hour\n print('\\nCalculating The Most Common Start Hour to Travel...\\n')\n df['hour'] = df['Start Time'].dt.hour\n common_hour = df['hour'].mode()[0]\n print('Most Common Hour : {} Counts {}'.format(common_hour,df['hour'].value_counts()[common_hour]))\n \n print(\"\\nThis took %s seconds.\\n\" % (time.time() - start_time))\n print('******************************')\n print('-'*40)", "def plot_pca(df, rows, batch: bool =False):\n\n markers, colors = construct_point(df)\n cdict = dict(zip(rows.keys(), colors))\n marker = dict(zip(rows.keys(), markers))\n fig, ax = plt.subplots(figsize=(10, 8))\n if batch:\n # consider changing batch to 'group' for generic grouping\n labels = ['Batch {}'.format(i+1) for i in range(len(colors))]\n else:\n labels = df.index.to_list()\n add_plot(df=df, ax=ax, cdict=cdict, marker=marker, labels=labels, rows=rows)\n plt.xlabel(\"Principal Component 1\", fontsize=15)\n plt.ylabel(\"Principal Component 2\", fontsize=15)\n\n for i in df.index.to_list():\n ax.annotate(i, (df.loc[i, ['Principal Component 1']]+.05, df.loc[i, ['Principal Component 2']]+.05))\n # ax.legend(loc='upper right', bbox_to_anchor=(1.04, 1))\n plt.tight_layout()\n ax.legend().remove()\n return fig, ax", "def pie_plot(data,ara,rd_f,cla_arr=string.ascii_lowercase):\n \n data=pd.Series(data)\n dataclass=pd.Series(value_to_class_label(ara,data,cla_arr))\n \n parti=data.groupby(dataclass).agg(rd_f)\n \n labels=parti.index\n parts = parti.tolist()\n colors = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue','peru',\n 'teal','cornflowerblue','crimson','cadetblue','beige']\n\n plt.pie(parts, labels=labels, colors=colors,\n autopct='%1.1f%%', shadow=True, startangle=140) \n plt.axis('equal')", "def visualize_classes(df, label_names, err_param_name, reduced_data_column, labels_column, cmap, title, max_n_cols=4):\n df = df[sorted(df.columns)].groupby(err_param_name).first().reset_index()\n labels = df[labels_column][0]\n\n n_rows, n_cols = get_n_rows_cols(df.shape[0], max_n_cols)\n fig, axs = plt.subplots(n_rows, n_cols, figsize=(n_cols * 3, n_rows * 3), squeeze=False, constrained_layout=True)\n for i, ax in enumerate(axs.ravel()):\n if i >= df.shape[0]:\n ax.set_xticks([])\n ax.set_yticks([])\n ax.axis(\"off\")\n continue\n reduced_data = df[reduced_data_column][i]\n sc = ax.scatter(*reduced_data.T, c=labels, cmap=cmap, marker=\".\", s=40)\n x_min, x_max, y_min, y_max = get_lims(reduced_data)\n ax.set_xlim(x_min, x_max)\n ax.set_ylim(y_min, y_max)\n err_param_val = round(df[err_param_name][i], 3)\n ax.set_title(err_param_name + \"=\" + str(err_param_val))\n ax.set_xticks([])\n ax.set_yticks([])\n fig.suptitle(title)\n n_unique = np.unique(labels).shape[0]\n cbar = fig.colorbar(sc, ax=axs, boundaries=np.arange(n_unique + 1) - 0.5, ticks=np.arange(n_unique),\n use_gridspec=True, aspect=50)\n if label_names:\n cbar.ax.yaxis.set_ticklabels(label_names)\n\n path_to_plot = generate_unique_path(\"out\", \"png\")\n fig.savefig(path_to_plot)", "def report(\n df: pd.DataFrame,\n id_col: str = \"Compound_Id\",\n columns: List[str] = [\"Compound_Id\", \"Smiles\"],\n title: str = \"Cluster Report\",\n intro: str = \"Large clusters first, similar clusters together.\",\n):\n\n def add_cluster(cl_no, sim_to=None):\n if sim_to is None:\n sim_to = \"\"\n html.append(\"<hr>\")\n else:\n sim_to = f\"(similar to {sim_to})\"\n mf_cl = mf.MolFrame(df.query(\"Cluster_No == @cl_no\")[columns])\n mf_cl = mf_cl.add_mols()\n html.append(\n f\"<br><h2>Cluster {cl_no} ({len(mf_cl.data)} Members)&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;{sim_to}</h2><br>\"\n )\n grid = mfv.html_grid(mf_cl.data, id_col=\"Compound_Id\")\n html.append(grid)\n\n if id_col not in columns:\n columns = [id_col] + columns\n if \"Smiles\" not in columns:\n columns.append(\"Smiles\")\n df_repr = df.query(\"IsRepr == 'Yes'\").reset_index().drop(\"index\", axis=1)\n chem_sim = {}\n for idx, rec0 in df_repr.iterrows():\n for _, rec1 in df_repr.iloc[idx + 1 :].iterrows():\n cl0 = rec0[\"Cluster_No\"]\n cl1 = rec1[\"Cluster_No\"]\n sim = mf.chem_sim(rec0[\"Smiles\"], rec1[\"Smiles\"])\n chem_sim[(cl0, cl1)] = sim\n chem_sim[(cl1, cl0)] = sim\n\n cl_sizes = (\n df[[\"Cluster_No\", \"Compound_Id\"]]\n .groupby(by=\"Cluster_No\")\n .count()\n .reset_index()\n .rename(columns={\"Compound_Id\": \"Size\"})\n )\n cl_sizes = cl_sizes.sort_values(\"Size\", ascending=False)\n cl_order = {x: True for x in cl_sizes[\"Cluster_No\"].values}\n\n html = [f\"<h1>{title}</h1><br>{intro}<br><br>\"]\n while len(cl_order) > 0:\n cl_no = list(cl_order.keys())[0]\n add_cluster(cl_no)\n cl_order.pop(cl_no)\n to_remove = []\n for sim_cl in cl_order:\n if chem_sim[(cl_no, sim_cl)] > 0.45:\n add_cluster(sim_cl, cl_no)\n to_remove.append(sim_cl)\n for x in to_remove:\n cl_order.pop(x)\n\n mfht.write(mfht.page(\"\\n\".join(html)), \"Clusters.html\")", "def figure_3(df):\n\n sns.set(style=\"white\", palette=sns.color_palette(\"cubehelix\", 6)) #still 6 if osa csa\n f, axes = plt.subplots(5, 1, figsize=(6, 9)) # 6, 2 if OSA CSA\n sns.despine(top=True, bottom=True)\n # f.suptitle(\"Outcome, Grouped by Contributing Etiology\")\n\n # contains used instead of equal to include patients with multiple etio (e.g. cardiac+medication count to both)\n neurologic_df = df.loc[df['PostDx'].str.contains(\"Neurologic\")].sort_values(by='Outcome')\n cardiac_df = df.loc[df['PostDx'].str.contains(\"Cardiac\")].sort_values(by='Outcome')\n medication_df = df.loc[df['PostDx'].str.contains(\"Medication\")].sort_values(by='Outcome')\n tecsa_df = df.loc[df['PostDx'].str.contains(\"TECSA\")].sort_values(by='Outcome')\n # osacsa_df = df.loc[df['PostDx'].str.contains(\"OSA-CSA\")].sort_values(by='Outcome')\n primary_df = df.loc[df['PostDx'].str.contains(\"Primary\")].sort_values(by='Outcome')\n\n # collapse possible outcomes\n neurologic_df['col_outcome'] = neurologic_df.apply(collapse_initial_outcome, axis=1)\n cardiac_df['col_outcome'] = cardiac_df.apply(collapse_initial_outcome, axis=1)\n medication_df['col_outcome'] = medication_df.apply(collapse_initial_outcome, axis=1)\n tecsa_df['col_outcome'] = tecsa_df.apply(collapse_initial_outcome, axis=1)\n # osacsa_df['col_outcome'] = osacsa_df.apply(collapse_initial_outcome, axis=1)\n primary_df['col_outcome'] = primary_df.apply(collapse_initial_outcome, axis=1)\n\n # Create count plot for each Etio on the left, then a Pie Chart with proportion on the right\n\n hatches = ['', '||||', '']\n face_color = ['white', 'white', 'dimgrey']\n\n # Neurologic\n bar = sns.countplot(y='col_outcome', data=neurologic_df, ax=axes[0])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[0].set(xlabel=\"\", ylabel=\"Neurologic\\nConditions\")\n\n\n # Cardiac\n bar = sns.countplot(y='col_outcome', data=cardiac_df, ax=axes[1])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[1].set(xlabel=\"\", ylabel=\"Cardiac\\nConditions\")\n\n # Medication\n bar = sns.countplot(y='col_outcome', data=medication_df, ax=axes[2])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[2].set(xlabel=\"\", ylabel=\"Opiate Use\")\n\n # OSA-CSA\n # bar = sns.countplot(y='col_outcome', data=osacsa_df, ax=axes[3,0])\n # for i, this_bar in enumerate(bar.patches):\n # # Set a different hatch for each bar\n # this_bar.set_hatch(hatches[i])\n # axes[3].set(xlabel=\"\", ylabel=\"OSA-CSA\")\n # If adding OSA-CSA back, would need to increase by 1 all of the axes indices\n\n # TE-CSA\n bar = sns.countplot(y='col_outcome', data=tecsa_df, ax=axes[3])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[3].set(xlabel=\"\", ylabel=\"TECSA\")\n\n #Primary\n bar = sns.countplot(y='col_outcome', data=primary_df, ax=axes[4])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[4].set(xlabel=\"Number of Patients\", ylabel=\"Primary CSA\")\n\n # Combined X axis for L side\n axes[4].get_shared_x_axes().join(axes[4], axes[3], axes[2], axes[1], axes[0]) # axes[5] would need to be added back\n axes[0].set_xticklabels(\"\")\n axes[1].set_xticklabels(\"\")\n axes[2].set_xticklabels(\"\")\n axes[3].set_xticklabels(\"\")\n # axes[4].set_xticklabels(\"\")\n # Leave bottom labels in\n\n # Resize all\n axes[0].autoscale()\n axes[1].autoscale()\n axes[2].autoscale()\n axes[3].autoscale()\n axes[4].autoscale()\n # axes[5].autoscale()\n\n f.tight_layout(rect=[0, 0, 1, 1])\n f.savefig('Figure 3 - outcome of cpap by etio.png', dpi=100)\n # plt.show()", "def plot_class_distribution(image_directory: str = './data/images/',\n filename: str = 'full_data_set',\n output_directory: str = './data/',\n title: str = None,\n labels: bool = False,\n rotate: bool = False,\n semilog: bool = False):\n class_dist = {' '.join(cat_type.split()[:2]): len(os.listdir(image_directory + cat_type))\n for cat_type in os.listdir(image_directory)}\n data_count = pd.Series(class_dist)\n # data_count = data_count[(data_count < 20000)] # & (data_count > 500)]\n plt.figure(figsize=[5, 5])\n rect = plt.bar(data_count.index, data_count.values, color=\"#637b7f\")\n if not labels:\n plt.gca().axes.get_xaxis().set_visible(False)\n if labels:\n if rotate:\n autolabel(rect, 90)\n else:\n autolabel(rect)\n if title:\n plt.title(title)\n plt.ylabel('Class Size')\n if semilog:\n plt.yscale('log')\n filename += '_log'\n plt.tight_layout()\n if rotate:\n plt.xticks(rotation=90)\n plt.tight_layout()\n plt.savefig(output_directory + filename + '_distribution.png', dpi=300)", "def make_pdf_reports(df, path):\n with PdfPages(path) as pdf:\n # settings for the file\n base = 10 # threshold for grouping points\n page_size = (11, 8.5)\n point_size = 1.5 # scatter plot point size\n\n df[\"color\"] = df.db.apply(rand_color) # adjacency color\n df[\"fuzzy_y\"] = df.y.apply(my_round) # horizontal group color\n df[\"y_color\"] = df.fuzzy_y.apply(rand_color)\n df[\"fuzzy_x\"] = df.x.apply(my_round) # vertical group color\n df[\"x_color\"] = df.fuzzy_x.apply(rand_color)\n\n # Add title and axis names\n plt.figure(figsize=page_size)\n plt.title('Horizontal Grouping Scatter Plot')\n plt.xlabel('x distance')\n plt.ylabel('y distance')\n plt.scatter(df.x, df.y, c=df.y_color, s=point_size)\n pdf.savefig() # saves the current figure into a pdf page\n plt.close()\n\n plt.figure(figsize=page_size)\n plt.title('Vertical Grouping Scatter Plot')\n plt.xlabel('x distance')\n plt.ylabel('y distance')\n plt.scatter(df.x, df.y, c=df.x_color, s=point_size)\n pdf.savefig() # saves the current figure into a pdf page\n plt.close()\n\n plt.figure(figsize=page_size)\n plt.title('Block Adjacency Grouping Scatter Plot')\n plt.xlabel('x distance')\n plt.ylabel('y distance')\n plt.scatter(df.x, df.y, c=df.color, s=point_size)\n pdf.savefig() # saves the current figure into a pdf page\n plt.close()\n\n data1 = df[[\"floor\", \"swing_drop\", \"name\"]]\n data = data1.groupby([\"floor\", \"swing_drop\"]).count()\n data = data.reset_index()\n data.head()\n data = data.fillna(0)\n pivot = data.pivot(index=\"floor\", columns=\"swing_drop\", values=\"name\")\n pivot = pivot.fillna(0)\n order = sorted(df.floor.unique(), reverse=True)\n pivot = pivot.reindex(order)\n plt.figure(figsize=page_size)\n ax = sns.heatmap(pivot, cmap=\"BuPu\")\n ax.set_title(\"Block Qty Heatmap\")\n pdf.savefig()\n plt.close()\n\n # bar chart\n plt.rcParams.update({'font.size': 5})\n plt.figure(figsize=page_size)\n plt.title('Block Style Bar Graph')\n plt.xlabel('Names')\n plt.xticks(rotation=90)\n plt.ylabel('Quantities')\n dd = df[['name', \"guid\"]].groupby(\"name\").count()\n dd = dd.reset_index()\n dd = dd.sort_values(\"guid\")\n plt.bar(dd.name, dd.guid)\n # plt.show()\n pdf.savefig()\n plt.close()\n\n # We can also set the file's metadata via the PdfPages object:\n d = pdf.infodict()\n d['Title'] = 'Multipage PDF Example'\n d['Author'] = 'Matthew Kreidler'\n d['Subject'] = 'How to create a multipage pdf file and set its metadata'\n d['Keywords'] = 'PdfPages multipage keywords author title subject'\n d['CreationDate'] = datetime.datetime.today()\n d['ModDate'] = datetime.datetime.today()\n\n print(\"Graphs and Charts finished!\")\n return path", "def time_stats(df):\r\n\r\n # ref: https://stackoverflow.com/questions/48590268/pandas-get-the-most-frequent-values-of-a-column\r\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\r\n\r\n start_time = time.time()\r\n\r\n # Display the most popular month\r\n most_popular_month = df['Month'].mode()[0]\r\n print ('The most popular rental month: {0}'.format(calendar.month_name[most_popular_month]))\r\n\r\n # print most popular day\r\n most_popular_day = df['Start Day'].mode()[0]\r\n print ('The most popular start day of the week: {0}'.format(most_popular_day))\r\n\r\n # most popular hour\r\n most_popular_hour = df['Hour'].mode()[0]\r\n print ('The most popular rental hour is: {0}'.format(most_popular_hour))\r\n\r\n # ref: https://stackoverflow.com/questions/29645153/remove-name-dtype-from-pandas-output\r\n top_2_days = df['Start Day'].value_counts()[0:2]\r\n print ('The top 2 most popular rental days are:\\n{0}'.format(top_2_days.to_string()))\r\n\r\n top_3_hours = df['Hour'].value_counts()[0:3]\r\n print ('The top 3 most popular rental hours are:\\n{0}'.format(top_3_hours.to_string()))\r\n\r\n print('-'*40)\r\n\r\n ###### try plottling some info ####################\r\n # plot via pandas\r\n #pd.value_counts(df['Month']).plot.bar()\r\n #pd.value_counts(df['Start Day']).plot.bar()\r\n #pd.value_counts(df['Hour']).plot.bar()\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def create_progression_tables(self, feat_subset, time_col, patient_col, method, bl_index, skip_no_bl=False):\n\n prog_dfs = []\n\n for df in self:\n patients = df[patient_col]\n\n # create dataframe copy to keep from alternating original dataframe\n prog_df = df[feat_subset][::]\n\n for feat in feat_subset:\n\n for patient in patients.unique():\n # collect values for sinlge patient\n pat_inds = df[df[patient_col] == patient].index\n # create value series storing the values of a patient\n values = df.loc[pat_inds, feat]\n values.index = df.loc[pat_inds, time_col]\n\n # skip patient if no baseline value is present\n if skip_no_bl:\n if bl_index not in values.index:\n prog_df.loc[pat_inds, feat] = np.nan\n continue\n\n # calculate scores for patient and reindex to merge back into dataframe copy\n scores = calc_prog_scores(values, bl_index, method)\n\n # if only NaN has been returned as score set patients progression to nan at all visits\n if type(scores) != pd.Series:\n prog_df.loc[pat_inds, feat] = scores\n\n else: # input normal progression scores for visits\n scores.index = pat_inds\n prog_df.loc[pat_inds, feat] = scores\n\n # get columns from original dataframe to concatinate them to resulting DF\n concat_columns = df[[patient_col, time_col]]\n prog_df = pd.concat([concat_columns, prog_df], join=\"outer\", axis=1)\n\n # add prog_df to list\n prog_dfs.append(prog_df)\n\n # keep track of which categorical features are still in the collection\n categorical_feats = list(set(self.categorical_feats).intersection(feat_subset))\n\n return DataCollection(prog_dfs, self.df_names, categorical_feats)", "def _agg_proportions(df, members=None):\n p = df.copy()\n if members is not None:\n p = p.iloc[members]\n p = p.T.assign(\n group=pd.factorize(p.columns)[0],\n label=pd.factorize(p.columns)[-1],\n value=p.sum(), #/ p.sum().sum() * p.shape[0],\n row_count=p.shape[0]\n )\n p = p[['label', 'group', 'value', 'row_count']]\n p.columns = ['label', 'group', 'value', 'row_count']\n p = list(p.T.to_dict().values())\n return p", "def SA_data_display(opt_df, all_df):\n fig, axs = plt.subplots(2, 3)\n\n axs[0,0].set_title(\"Optimal rewire attempts for circularity\")\n axs[0,0].set_ylabel(\"Percent waste %\")\n axs[0,0].set_xlabel(\"Time (s)\")\n axs[0,0].plot(opt_df[\"Time (s)\"], opt_df[\"Percent waste (%)\"])\n\n axs[0,1].set_title(\"Optimal rewire attempts acceptance probability\")\n axs[0,1].set_ylabel(\"Acceptance Probability\")\n axs[0,1].set_xlabel(\"Time (s)\") # time??\n axs[0,1].scatter(opt_df[\"Time (s)\"], opt_df[\"Probability\"])\n\n axs[0,2].set_title(\"Optimal rewire attempts temperature decrease\")\n axs[0,2].set_ylabel(\"Temperature\")\n axs[0,2].set_xlabel(\"Time (s)\") # time??\n axs[0,2].plot(opt_df[\"Time (s)\"], opt_df[\"Temperature\"])\n\n axs[1,0].set_title(\"All rewire attempts for circularity\")\n axs[1,0].set_ylabel(\"Percent waste %\")\n axs[1,0].set_xlabel(\"Time (s)\")\n axs[1,0].plot(all_df[\"Time (s)\"], all_df[\"Percent waste (%)\"])\n\n axs[1,1].set_title(\"All rewire attempts acceptance probability\")\n axs[1,1].set_ylabel(\"Acceptance Probability\")\n axs[1,1].set_xlabel(\"Time (s)\") # time??\n axs[1,1].scatter(all_df[\"Time (s)\"], all_df[\"Probability\"])\n\n axs[1,2].set_title(\"All rewire attempts temperature decrease\")\n axs[1,2].set_ylabel(\"Temperature\")\n axs[1,2].set_xlabel(\"Time (s)\") # time??\n axs[1,2].plot(all_df[\"Time (s)\"], all_df[\"Temperature\"])\n\n return plt.show()", "def time_stats(df):\n\n print('\\nDisplaying the statistics on the most frequent times of '\n 'travel...\\n')\n start_time = time.time()\n\n # display the most common month\n most_common_month = df['Month'].mode()[0]\n print('For the selected filter, the month with the most travels is: ' +\n str(months[most_common_month-1]).title() + '.')\n\n # display the most common day of week\n most_common_day = df['Weekday'].mode()[0]\n print('For the selected filter, the most common day of the week is: ' +\n str(most_common_day) + '.')\n\n # display the most common start hour\n most_common_hour = df['Start Hour'].mode()[0]\n print('For the selected filter, the most common start hour is: ' +\n str(most_common_hour) + '.')\n\n print('-'*40)", "def GetGraphicQuartiles(diagnostic_cases, diagnostic, weeks,year, n_years):\n current_year = Year.objects.get(year=year)\n weeks_current_year = weeks.filter(year=current_year)\n year_ob = Year.objects.filter(year__lt=year)\n weeks = weeks.filter(year__in=year_ob)\n\n suma = 0\n if n_years % 2 != 0:\n suma = -1\n\n\n qs = [0] * 3\n qss = [0] * 3\n\n diagnost_cases = diagnostic_cases.filter(week__in=weeks_current_year)\n\n dots_graphic_quartiles = [ ]\n dots_graphic_cumulative = [ ]\n cases_per_week_acumulative = 0\n\n for o in range(52):\n cases_per_years = [0] * (n_years)\n cases = 0\n i = 0\n year = 0 \n for week_idx in range(len(weeks)):\n if weeks[week_idx].week == o+1:\n if year != weeks[week_idx].year: # Esto no pasa nunca\n year = weeks[week_idx].year\n cases = 0\n for p in diagnostic_cases:\n if p.week == weeks[week_idx]:\n cases += p.cases\n cases_per_years[i] = cases\n \n i += 1\n\n ##### Getting the quantiles ;)\n\n qs[0] = np.quantile(cases_per_years, 0.25)\n qs[1] = np.quantile(cases_per_years, 0.5)\n qs[2] = np.quantile(cases_per_years, 0.75)\n qss[0] += qs[0]\n qss[1] += qs[1]\n qss[2] += qs[2]\n cases_per_week = 0\n\n ####loop to count the amount of cases in the current year\n year= None\n for week in weeks_current_year:\n if week.week == o+1:\n for d in diagnost_cases:\n year = d.week.year.year\n if d.week == week:\n cases_per_week += d.cases\n cases_per_week_acumulative += cases_per_week\n\n dots = DotsGraphicQuartile(qs[0],qs[1],qs[2],cases_per_week,o+1)\n dots_aculative = DotsGraphicQuartile(qss[0],qss[1],qss[2],cases_per_week_acumulative,o+1)\n dots_graphic_quartiles.append(dots)\n dots_graphic_cumulative.append(dots_aculative)\n\n return dots_graphic_quartiles, dots_graphic_cumulative", "def exercise_5(self):\n student_data = self.student_data\n # Create a dictionary mapping subgroup values to colors\n palette_colors = {\"Rural\": \"green\", \"Urban\": \"blue\"}\n\n # Create a count plot of school with location subgroups\n sns.countplot(x=\"school\", data=student_data\n , hue = \"location\"\n , palette = palette_colors)\n\n\n # Display plot\n plt.show()", "def do_plot(self):\n years = sorted(set(self.prediction_df_without_covid19['Year']))\n predict_without_covid_country = self.prediction_df_without_covid19[\n self.prediction_df_without_covid19['Country'].isin([self.country])].sort_values(['Year'],\n ascending=[True])\n predict_with_covid_country = self.prediction_df_with_covid19[\n self.prediction_df_with_covid19['Country'].isin([self.country])].sort_values(['Year'],\n ascending=[True])\n # ------------------------------------------------------------------------------------------------------\n pa = \\\n predict_without_covid_country.loc[predict_without_covid_country['Year'] == 1990][\n 'Total_CO2_Emissions'].values[\n 0]\n x = []\n for i in range(len(years)):\n x.append(pa * 0.6)\n # ------------------------------------------------------------------------------------------------------\n fig = Figure()\n ax = fig.subplots()\n ax.grid(True, alpha=0.3)\n # plot_title = 'Total CO2 Emissions predicted from 2019-2030 for ' + self.country\n plot_title = 'Total ' + '$CO_2$' + ' Emissions predicted from 2019-2030 for ' + self.country\n label_country_without_covid = 'Total CO2 emissions without covid'\n label_country_with_covid = 'Total CO2 emissions with Covid-19'\n # ------------------------------------------------------------------------------------------------------\n params = {'mathtext.default': 'regular'}\n rcParams.update(params)\n rcParams['font.size'] = 7\n rcParams['lines.markersize'] = 4\n rcParams['figure.figsize'] = [7, 4]\n rcParams['figure.dpi'] = 150\n rcParams['font.family'] = 'Verdana'\n rcParams[\"font.weight\"] = \"normal\"\n font = {'family': 'Verdana',\n 'color': 'xkcd:darkgreen',\n 'weight': 'normal',\n 'size': 9,\n }\n colors = rcParams['axes.prop_cycle'].by_key()['color']\n l1, = ax.plot(years, predict_without_covid_country['Total_CO2_Emissions'], color='xkcd:dark blue green',\n marker='o',\n label=label_country_without_covid)\n l2, = ax.plot(years, predict_with_covid_country['Total_CO2_Emissions'], color='xkcd:neon pink', marker='.',\n label=label_country_with_covid)\n l3, = ax.plot(years, x, color='xkcd:orchid', marker='1')\n print('without covid: ', predict_without_covid_country['Total_CO2_Emissions'].values)\n print('with covid: ', predict_with_covid_country['Total_CO2_Emissions'].values)\n ax.set_xlabel('Years', fontdict=font)\n ax.set_ylabel('Emissions (Gg)', fontdict=font)\n ax.set_title(plot_title, fontsize=12, fontweight='normal')\n ax.patch.set_facecolor('xkcd:green')\n ax.set_facecolor('xkcd:pale green')\n fig.legend((l1, l2, l3), ('Prediction without Covid19', 'Prediction with Covid19', 'Paris Agreement'),\n bbox_to_anchor=(0.907, 0.89))\n fig.savefig(OUTPUT_GRAPH_PATH)", "def graph_cause_count_each(df, label):\r\n # set the visual features of the graph\r\n sns.set(font_scale=1.5)\r\n sns.set_style(\"darkgrid\")\r\n fig, ax = plt.subplots()\r\n fig.set_size_inches(15, 8)\r\n plt.xticks(rotation=45)\r\n ax.set_title(label.capitalize() + \" Police Death Causes\")\r\n # create the graph of the data\r\n plot = sns.barplot(\"year\", \"count\", data=df, palette=\"winter_d\")\r\n # plt.show()\r\n # save the graph as an image with the correct cause naming\r\n name = \"2_graph_cause_count_\" + label + \".png\"\r\n fig.savefig(name)", "def plot_budget_analyais_results(df, fs=8, fs_title=14, lw=3, fontsize=20, colors=['#AA3377', '#009988', '#EE7733', '#0077BB', '#BBBBBB', '#EE3377', '#DDCC77']):\n df_decomposed = df.loc[df['block'] == 'decomposed']\n df_joint = df.loc[df['block'] == 'joint']\n ticklabels = []\n num_sweeps = df_decomposed['num_sweeps'].to_numpy()\n sample_sizes = df_decomposed['sample_sizes'].to_numpy()\n for i in range(len(num_sweeps)):\n ticklabels.append('K=%d\\nL=%d' % (num_sweeps[i], sample_sizes[i]))\n fig = plt.figure(figsize=(fs*2.5, fs))\n ax1 = fig.add_subplot(1, 2, 1)\n ax1.plot(num_sweeps, df_decomposed['density'].to_numpy(), 'o-', c=colors[0], linewidth=lw, label=r'$\\{\\mu, \\tau\\}, \\{c\\}$')\n ax1.plot(num_sweeps, df_joint['density'].to_numpy(), 'o-', c=colors[1], linewidth=lw,label=r'$\\{\\mu, \\tau, c\\}$')\n ax1.set_xticks(num_sweeps)\n ax1.set_xticklabels(ticklabels)\n ax1.tick_params(labelsize=fontsize)\n ax1.grid(alpha=0.4)\n ax2 = fig.add_subplot(1, 2, 2)\n ax2.plot(num_sweeps, df_decomposed['ess'].to_numpy(), 'o-', c=colors[0], linewidth=lw,label=r'$\\{\\mu, \\tau\\}, \\{c\\}$')\n ax2.plot(num_sweeps, df_joint['ess'].to_numpy(), 'o-', c=colors[1], linewidth=lw,label=r'$\\{\\mu, \\tau, c\\}$')\n ax2.set_xticks(num_sweeps)\n ax2.set_xticklabels(ticklabels)\n ax2.tick_params(labelsize=fontsize)\n ax2.grid(alpha=0.4)\n ax2.legend(fontsize=fontsize)\n ax1.legend(fontsize=fontsize)\n ax1.set_ylabel(r'$\\log \\: p_\\theta(x, \\: z)$', fontsize=35)\n ax2.set_ylabel('ESS / L', fontsize=35)", "def pc_project(\n mt: hl.MatrixTable,\n loadings_ht: hl.Table,\n loading_location: str = \"loadings\",\n af_location: str = \"pca_af\",\n) -> hl.Table:\n mt = pc_hwe_gt(mt, loadings_ht, loading_location, af_location)\n mt = mt.annotate_cols(scores=hl.agg.array_sum(mt.pca_loadings * mt.GTN))\n return mt.cols().select(\"scores\")" ]
[ "0.6448972", "0.6392558", "0.6383187", "0.6325968", "0.58468354", "0.5693776", "0.5645798", "0.5629048", "0.5622483", "0.5611402", "0.55704165", "0.5545969", "0.54707754", "0.5421868", "0.541172", "0.5408939", "0.53972", "0.5382327", "0.5376701", "0.5373505", "0.5348653", "0.5318779", "0.53117114", "0.53011274", "0.5285753", "0.527667", "0.5270411", "0.5264465", "0.5251325", "0.52398163", "0.521343", "0.51984954", "0.51901275", "0.5183569", "0.5173878", "0.5152095", "0.5149655", "0.51378024", "0.511371", "0.51097214", "0.51095784", "0.5052808", "0.5047214", "0.5042", "0.50364536", "0.5034725", "0.50232", "0.5021818", "0.5017655", "0.50174034", "0.5002934", "0.49992308", "0.49961686", "0.49929678", "0.49905917", "0.49890637", "0.4985069", "0.4982092", "0.49820316", "0.49785334", "0.4974087", "0.49709526", "0.49674344", "0.49624622", "0.49590847", "0.49545372", "0.49536157", "0.4952961", "0.4952646", "0.49493158", "0.49439347", "0.4940264", "0.4938892", "0.4929626", "0.4923391", "0.4917842", "0.49152872", "0.49061373", "0.49033207", "0.49028543", "0.4901895", "0.4895302", "0.48947206", "0.48879775", "0.4876168", "0.4867296", "0.48672375", "0.4864022", "0.48615372", "0.48605785", "0.4851806", "0.4846353", "0.4844855", "0.48427927", "0.48401394", "0.48369566", "0.48354957", "0.4835225", "0.48210415", "0.4820683" ]
0.66838074
0
Takes a pandas dataframe which contains the proportions of language classes over generations and generates a barplot (excluding the burnin period)
def plot_barplot_language_types(lang_class_prop_over_gen_df, title, file_path, file_name, n_runs, n_batches, n_gens, gen_start, lang_class_baselines_all, lang_class_baselines_fully_expressive, possible_form_lengths): sns.set_style("darkgrid") sns.set_context("talk") if len(possible_form_lengths) == 1: n_language_classes = 4 else: n_language_classes = 7 #TODO: or should this be 6 (i.e. collapsing the two different reduplication strategies?) proportion_column_as_results = dataframe_to_language_stats(lang_class_prop_over_gen_df, n_runs, n_batches, n_gens, possible_form_lengths) proportion_column_from_start_gen = proportion_column_as_results[:, gen_start:] proportion_column_from_start_gen = proportion_column_from_start_gen.flatten() runs_column_from_start_gen = [] for i in range(n_runs*n_batches): for j in range(gen_start, n_gens): for k in range(n_language_classes): runs_column_from_start_gen.append(i) runs_column_from_start_gen = np.array(runs_column_from_start_gen) generation_column_from_start_gen = [] for i in range(n_runs*n_batches): for j in range(gen_start, n_gens): for k in range(n_language_classes): generation_column_from_start_gen.append(j) generation_column_from_start_gen = np.array(generation_column_from_start_gen) class_column_from_start_gen = [] for i in range(n_runs*n_batches): for j in range(gen_start, n_gens): if n_language_classes == 4: class_column_from_start_gen.append('degenerate') class_column_from_start_gen.append('holistic') class_column_from_start_gen.append('compositional') class_column_from_start_gen.append('other') elif n_language_classes == 7: class_column_from_start_gen.append('D') class_column_from_start_gen.append('H') class_column_from_start_gen.append('H+Div.') class_column_from_start_gen.append('C') class_column_from_start_gen.append('C+Red.-part') class_column_from_start_gen.append('C+Red.-whole') class_column_from_start_gen.append('O') new_data_dict = {'run': runs_column_from_start_gen, 'generation': generation_column_from_start_gen, 'proportion': proportion_column_from_start_gen, 'class': class_column_from_start_gen} lang_class_prop_over_gen_df_from_starting_gen = pd.DataFrame(new_data_dict) if len(possible_form_lengths) == 1: palette = sns.color_palette(["black", "red", "green", "grey"]) else: palette = sns.color_palette(["black", sns.color_palette("colorblind")[3], sns.color_palette("colorblind")[1], sns.color_palette("colorblind")[2], sns.color_palette("colorblind")[9], sns.color_palette("colorblind")[0], sns.color_palette("colorblind")[7]]) sns.barplot(x="class", y="proportion", data=lang_class_prop_over_gen_df_from_starting_gen, palette=palette) # plt.axhline(y=lang_class_baselines_all[0], xmin=0.0, xmax=0.25, color='k', linestyle='--', linewidth=2) # plt.axhline(y=lang_class_baselines_all[1], xmin=0.25, xmax=0.5, color='k', linestyle='--', linewidth=2) # plt.axhline(y=lang_class_baselines_all[2], xmin=0.5, xmax=0.75, color='k', linestyle='--', linewidth=2) # plt.axhline(y=lang_class_baselines_all[3], xmin=0.75, xmax=1.0, color='k', linestyle='--', linewidth=2) # # if title == 'Mutual Understanding Only' or title == 'Minimal Effort & Mutual Understanding': # plt.axhline(y=lang_class_baselines_fully_expressive[0], xmin=0.25, xmax=0.5, color='0.6', linestyle='--', linewidth=2) # plt.axhline(y=lang_class_baselines_fully_expressive[1], xmin=0.5, xmax=0.75, color='0.6', linestyle='--', linewidth=2) plt.tick_params(axis='both', which='major', labelsize=18) plt.tick_params(axis='both', which='minor', labelsize=18) plt.ylim(-0.05, 1.05) plt.title(title, fontsize=22) # plt.xlabel('Language class') plt.xlabel('', fontsize=20) plt.ylabel('Mean proportion', fontsize=20) plt.tight_layout() if holistic_without_partial_meaning is True: plt.savefig(file_path + "Barplot_lang_types_" + file_name + "_burn_in_" + str(gen_start) + ".png") else: plt.savefig(file_path + "Barplot_lang_types_" + file_name + "_burn_in_" + str(gen_start) + "_NEW.png") plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bar_plot(df_NP):\n cnt = Counter()\n for tax_list in df_NP.taxonomy:\n for tax in list(tax_list):\n if tax != 'no':\n cnt[tax] += 1\n plt.bar(cnt.keys(),cnt.values())\n plt.xlabel('taxonomic provenance')\n plt.ylabel('number of molecules')\n plt.title('number of aglycons with taxonomies')\n plt.savefig(\"output_data/Barplot.png\")\n print(\"BAR PLOT DONE\")", "def visualize_data(df):\n # Remove 'not available'\n genres = df.genre.unique().tolist()\n remove_index = genres.index('Not Available')\n genres.pop(remove_index)\n print('Genres: ', genres)\n\n # Extract number of songs in each genre\n genre_counts = df.genre.value_counts().tolist()\n genre_counts.pop(remove_index)\n print('Counts: ', genre_counts)\n\n # Plot bar graph\n plt.bar(genres, genre_counts)\n plt.xlabel('Genres')\n plt.ylabel('Count')\n plt.show()", "def compte(df):\n\n df.value_counts()[:100].plot(kind='bar')\n plt.show()", "def stopword_bar(df, stop_words, ax):\n df_test = df.copy()\n df_test['prop'] = df.title.apply(stopword_proportion)\n sns.barplot(data=df_test, x='target', y='prop', ax=ax, ci=False)\n ax.set_title(\"Ratio of Stopwords Between Classes\", size=20)\n ax.set_ylim([1,2])\n ax.set_ylabel(\"Ratio\", size=20)\n ax.set_xlabel(\"Article Class\", size=20)\n plt.xticks(ticks=range(2),labels=['Normal','Clickbait'], size=20)\n return ax", "def show_class_imbalance(df, title='Class Imbalance', PATH=None):\n ax = sns.barplot(x=[\"Normal\", \"Clickbait\"], y=df.groupby(['target']).target.count())\n ax.set_title(title, size=20)\n plt.xticks([0,1],[\"Normal\", \"Clickbait\"], size = 20)\n ax.set_ylabel(\"Document Count\", size=17)\n ax.set_xlabel(\"Article Class\", size=20)\n if PATH:\n plt.savefig(PATH, bbox_inches=\"tight\", transparent=True)\n return ax", "def plot_class_balances(df, col):\n\n ser_counts = df[col].value_counts()\n ser_counts.plot.bar()\n plt.title(col + ' Counts \\n(classes={})'.format(ser_counts.shape[0]))\n \n plt.show()", "def message_genre_bar_chart(df):\n genre_counts = df.groupby('genre').count()['message']\n genre_names = list(genre_counts.index)\n return {\n 'data': [\n Bar(\n x=genre_names,\n y=genre_counts\n )\n ],\n\n 'layout': {\n 'title': 'Distribution of Message Genres',\n 'yaxis': {\n 'title': \"Count\"\n },\n 'xaxis': {\n 'title': \"Genre\"\n }\n }\n }", "def plot_balance_class(classes):\n unique, counts = np.unique(classes, return_counts=True)\n plt.bar(unique, counts)\n plt.title('Class Frequency')\n plt.xlabel('Class')\n plt.ylabel('Frequency')\n plt.show()", "def bar_plot(df, data_pt):\n \n x=df.loc[data_pt]\n y= df.columns.tolist()\n sorte=x.tolist()\n a=sorted(zip(sorte, y))[-10:]\n y=[y for _, y in a]\n ## soru burda yapıp altı ona göre duzeliyecegim birde\n \n x = df[y].loc[data_pt]\n \n # Here we modify the tickangle of the xaxis, resulting in rotated labels.\n #title={'text': \"<b>Comparing features with Golden for Cycle {}\".format(cycle),\n # 'y':0.9,'x':0.5,'xanchor': 'center','yanchor': 'top'}\n\n \n trace = {'type': 'bar',\n 'orientation':'h',\n 'x' : x,\n 'y' : y}\n data = Data([trace])\n layout = {'title' : \"<b>Reconstruction error in each dimension for cycle{}\".format(data_pt),\n 'titlefont':{'size' : 20},\n 'xaxis' : {'title': '<b>Reconstruction Error',\n 'titlefont':{'size' : 20},\n 'tickangle': -45, 'tickfont': {'size':15} ,},\n \n 'yaxis' : {'title': '<b>Features',\n 'titlefont':{'size' : 20},\n 'tickfont': {'size':15},},\n 'margin' : {'l':100, 'r' : 1, 'b': 200, 't': 100, 'pad' : 1},\n 'height' : 600, 'width' : 800,\n }\n \n fig = Figure(data = data, layout = layout)\n \n return pyo.iplot(fig)", "def category_bar_chart(df):\n label_names = df.drop(['message', 'original', 'genre', 'id'], axis=1).columns\n label_counts = []\n for column in label_names:\n label_counts.append(df[column].sum())\n return {\n 'data': [\n Bar(\n x=label_names,\n y=label_counts\n )\n ],\n\n 'layout': {\n 'title': 'Distribution of Labelled Categories',\n 'yaxis': {\n 'title': \"Count\",\n 'type': 'log'\n },\n 'xaxis': {\n 'title': \"Category\"\n }\n }\n }", "def bar_chart(self, df, n_groups, dict):\n fig, ax = plt.subplots()\n # choose bar width (standard 0.8 chosen)\n bar_width = 0.35\n # get an index to set the ticks for the x axis\n\n index = np.arange(n_groups)\n indexes = df.index.tolist()\n print(indexes)\n df[\"index\"] = indexes\n\n # make barchart for permutation test\n ax.bar(index, df[\"perm\"], bar_width, color='b', linewidth=4,\n label='Permutation test')\n # make barchart for t-test\n ax.bar(index + bar_width, df[\"t_test\"], bar_width, color='r',\n label='t-test')\n\n ax.set_xlabel(dict[\"xlabel\"])\n ax.set_ylabel(dict[\"ylabel\"])\n ax.set_title(dict[\"title\"])\n ax.set_xticks(index + bar_width / 2)\n ax.set_xticklabels(dict[\"xtickslabels\"])\n ax.legend()\n\n fig.tight_layout()\n plt.show()", "def visualizeData(df):\n for column in df:\n df[column].value_counts().plot(kind = 'bar', rot = 'vertical', use_index = False)", "def plot_pie_charts_of_word_class_distribution(df):\n genre_dict = {\n 'g':'Rock',\n 'b':'Hip-Hop',\n 'r':'Pop'\n }\n for _, genre in genre_dict.items():\n filtered_df = df[df['genre'] == genre]\n \n # plotting circle diagram for the specific genre\n avg_percentage_nouns = filtered_df['nouns'].mean()\n avg_percentage_verbs = filtered_df['verbs'].mean()\n avg_percentage_adverbs = filtered_df['adverbs'].mean()\n\n total = avg_percentage_nouns + avg_percentage_nouns + avg_percentage_nouns\n nouns = avg_percentage_nouns / total * 100\n verbs = avg_percentage_verbs / total * 100\n adverbs = avg_percentage_adverbs / total * 100\n\n # Pie chart\n labels = ['Nouns', 'Verbs', 'Adverbs']\n sizes = [nouns, verbs, adverbs]\n\n _, ax1 = plt.subplots()\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%',\n shadow=True, startangle=90)\n # Equal aspect ratio ensures that pie is drawn as a circle\n ax1.axis('equal') \n plt.tight_layout()\n plt.title(f'Circle diagram of the genre \"{genre}\"s average word classes distribution')\n plt.show()\n # plt.savefig(f'src/visualization/feature_plots/{genre}_word_class_distribution')", "def plot_word_class_pr_genre(df):\n df['nouns'] = df['nouns'] * 100\n df['verbs'] = df['verbs'] * 100\n df['adverbs'] = df['adverbs'] * 100\n # plotting nouns\n plotting_helper_method('nouns', 'genre', df)\n plt.title('Amount of nouns pr song pr. genre')\n plt.xlabel(\"Amount of nouns in each song\")\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/nouns_pr_genre_plot')\n\n # plotting verbs\n plotting_helper_method('verbs', 'genre', df)\n plt.title('Amount of verbs pr song pr. genre')\n plt.xlabel('Amount of verbs in each song')\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/verbs_pr_genre_plot')\n\n # plotting adverbs\n plotting_helper_method('adverbs', 'genre', df)\n plt.title('Amount of adverbs pr song pr. genre')\n plt.xlabel('Amount of adverbs in each song')\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/adverbs_pr_genre_plot')", "def proportion_with_cardinals(df, PATH):\n \n df_test = df.copy()\n df_test['cardinal'] = df.title.apply(contains_cardinal)\n\n click = df_test[df_test.target == 1]\n non = df_test[df_test.target == 0]\n click = click.groupby(['cardinal']).target.count()\n non = non.groupby(['cardinal']).target.count()\n \n non = non[1]/non[0] * 100\n click = click[1]/click[0] * 100\n # plot the results\n fig, ax = plt.subplots(figsize=(12,6))\n sns.barplot(x=['Normal', \"Clickbait\"], y=[non, click], ax=ax)\n plt.title(\"Percent of Titles Containing Cardinal Numbers\", size = 24)\n plt.xlabel(\"Article Class\", size=24)\n plt.ylabel(\"Percent %\", size = 24)\n plt.ylim(0, 100)\n plt.xticks([0,1], label=[\"Normal\", \"Clickbait\"], size=24)\n if PATH:\n plt.savefig(PATH, bbox_inches=\"tight\", transparent=True)\n \n return ax", "def bar_chart(self, period='M', annot=True):\n assert period in [\"W\", \"M\", \"Y\"], \"Wrong Period. Chose between 'W' - 'M' - 'Y'\"\n assert isinstance(annot, bool), 'Error! Annot parameter must be boolean'\n months = [\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"]\n periods = {\"M\": (\"Monthly\",\"Months\"), \"Y\": (\"Yearly\", \"Years\"), \"W\": (\"Weekly\", \"Weeks\")}\n data = self.data.copy()\n data.set_index(pd.to_datetime(data.index), inplace=True)\n sample = pd.concat([data.head(1), data.resample(period).last()])\n sample['Var%'] = (sample['Profit/Loss'] - sample['Profit/Loss'].shift(1)) / sample['Value'].shift(1) * 100 \n sample.dropna(inplace=True)\n colors = sample['Var%'].apply(lambda x: \"green\" if x > 0 else \"red\")\n fig = plt.figure(figsize=(4,2), dpi=200)\n fig.patch.set_facecolor('#ececec')\n ax = fig.add_subplot(111)\n ax.set_xlabel(periods[period][1])\n ax.set_ylabel(\"Var (%)\")\n ax.set_title(f\"{periods[period][0]} Profit / Loss %\")\n ax.bar(np.arange(len(sample)), sample['Var%'], 0.35, color=colors, alpha=1, label=f\"{periods[period][0]} Statistics\")\n ax.set_xticks(np.arange(len(sample)))\n if period == \"Y\":\n labels = [x for x in sample.index.year]\n ax.set_ylim(sample['Var%'].min()-2,sample['Var%'].max()+2) \n elif period == \"W\":\n sample_M = pd.concat([data.head(1), data.resample(\"M\").last()])\n ax.set_xticks(np.arange(-2, len(sample_M)*4-2, 4))\n labels = [m + \"-\" + y for m, y in zip([months[x-1] for x in sample_M.index.month[1:]], [str(x) for x in sample_M.index.year[1:]])]\n m = months[int(months.index(labels[-1][:-5])) + 1] if int(months.index(labels[-1][:-5])) + 1 != 12 else months[0]\n y = int(labels[-1][-4:]) if m != 0 else int(labels[-1][-4:]+1)\n labels.append(m + '-' + str(y))\n else:\n labels = [m + \"-\" + y for m, y in zip([months[x-1] for x in sample.index.month], [str(x) for x in sample.index.year])]\n ax.set_xticklabels(labels)\n cords = {'M': (0.2, 0.5, 4, 1), 'W': (0.5, 0.5, 'x-small', 1), 'Y': (0.045, 0.3, 'x-large', 0.85)}\n if annot:\n for d, v in zip(range(len(sample)), sample['Var%']):\n if v > 0:\n ax.annotate(str(round(v, 2)) + \" %\", xy=(d - cords[period][0], v+cords[period][1]), fontsize=cords[period][2])\n else:\n ax.annotate(str(round(v, 2)) + \" %\", xy=(d - cords[period][0], v-cords[period][3]), fontsize=cords[period][2])\n if period != \"Y\":\n fig.autofmt_xdate()\n ax.grid(True, alpha=0.5)\n ax.legend()\n return fig, ax", "def proportions_visualiser(\n df: pd.core.frame.DataFrame,\n colum_name: str = \"Sensor Glucose (mg/dL)\",\n limits: Dict[str, int] = {\"low\": 70, \"high\": 180},\n windows: Dict[str, int] = {\"weekly\": 7, \"monthly\": 30},\n kind: str = \"TIR\",\n) -> NoReturn:\n\n valid_kinds = [\"TIR\", \"TBR\", \"TAR\"]\n\n if \"low\" not in limits.keys() or \"high\" not in limits.keys():\n raise Exception(f\"limits.keys() should be ['low', 'high'] not {limits.keys()}\")\n\n titles = {\n \"TIR\": f\"Time In Range [{limits['low']},{limits['high']})\",\n \"TAR\": f\"Time Above Range >= {limits['high']}\",\n \"TBR\": f\"Time Below Range < {limits['low']}\",\n }\n\n kind = kind.upper()\n if kind not in valid_kinds:\n raise Exception(\n f\"Invalid kind `{kind}`, select one from {valid_kinds} or refer to help({self.__name__})\"\n )\n\n TIR = (\n lambda y: 100\n * y[(y >= limits[\"low\"]) & (y < limits[\"high\"])].count()\n / y.count()\n )\n TBR = lambda y: 100 * y[(y < limits[\"low\"])].count() / y.count()\n TAR = lambda y: 100 * y[(y >= limits[\"high\"])].count() / y.count()\n\n _proportions = df[colum_name].groupby(df.index.date).apply(eval(f\"{kind}\"))\n\n _proportions.plot(**{\"label\": \"daily\"})\n\n for key, value in windows.items():\n _ax = _proportions.rolling(value).mean().plot(**{\"label\": key})\n\n _mean_proportion = _proportions.mean()\n plt.ylabel(\"Percentage\")\n plt.axhline(\n _mean_proportion,\n **{\"label\": f\"mean = {round(_mean_proportion,1)}\", \"c\": \"blue\"},\n )\n plt.legend()\n plt.title(titles[kind])", "def plot_bar_chart_quantum_vs_classical(\n df_bugs: pd.DataFrame,\n column_to_inspect: str,\n mapping_dict: Dict[str, str],\n categories_to_exclude: List[str] = [],\n categories_keep_only: List[str] = None,\n out_file_name: str = None,\n out_folder_path: str = None,\n horizontal: bool = False,\n map_value_since_beginning: bool = False,\n figsize: Tuple[int, int] = (10, 5),\n legend_placement: str = 'upper center'\n ):\n\n fig, ax = plt.subplots(figsize=figsize)\n\n df = expand_columns(df_bugs, column_to_inspect)\n df = df[~(df[column_to_inspect].isin(categories_to_exclude))]\n\n if categories_keep_only is not None:\n df = df[df[column_to_inspect].isin(categories_keep_only)]\n\n if map_value_since_beginning:\n df[column_to_inspect] = df[column_to_inspect].map(mapping_dict)\n\n categories_q_bugs = list(df[\n df['type'] == 'Quantum'].groupby(\n column_to_inspect).count().sort_values(\n by='type', ascending=False).index)\n\n for component in df[column_to_inspect].unique():\n if component not in categories_q_bugs:\n categories_q_bugs.append(component)\n\n args = {\n \"hue\": \"type\",\n \"data\": df,\n \"palette\": PALETTE,\n \"ax\": ax,\n \"order\": categories_q_bugs\n }\n\n if horizontal:\n sns.countplot(y=column_to_inspect, **args)\n ax.grid(axis='x')\n else:\n sns.countplot(x=column_to_inspect, **args)\n ax.grid(axis='y')\n\n if not map_value_since_beginning:\n # map the value at the latest stage, thus in the labels\n obj_labels = ax.get_xticklabels()\n for i, l in enumerate(obj_labels):\n obj_labels[i] = mapping_dict[l.get_text()]\n ax.set_xticklabels(obj_labels, rotation=60, ha='right')\n\n ax.set_xlabel(capitalize(column_to_inspect), fontsize=15)\n ax.set_ylabel(\"Count\", fontsize=15)\n plt.legend(title=\"Type of Bug\", loc=legend_placement)\n plt.tight_layout()\n\n if out_file_name is not None and out_folder_path is not None:\n fig.savefig(os.path.join(out_folder_path, out_file_name), format=\"pdf\")", "def plot_norm_bar(df, title, figsize=(12,7)):\n fig, ax = plt.subplots(ncols=1, figsize=figsize)\n fig.suptitle(title)\n cat_value_counts = df.fillna('missing').value_counts(normalize=True)\n sns.barplot(y = cat_value_counts.index, x= cat_value_counts.values*100)\n ax.set(xlabel= 'percentage', ylabel=str(df.name))\n \n plt.plot()\n\n return", "def plot_uv_bar(df, colname, colorid=0):\n if (colname in list(df.columns)):\n \n # Set figure size \n fig, ax = plt.subplots(figsize=(8,6))\n \n # set colorid for bar plot\n base_color = sns.color_palette()[colorid]\n\n # variable counts to calculate percentage\n cdict_count = df[colname].value_counts().to_dict() \n total_count = df.shape[0]\n \n \n if (len(list(cdict_count.keys())) > 5):\n # max.count to position the %\n maxcount_pct= np.max(list(cdict_count.values()))*0.125\n # max. no. of categories Vs % rotation \n rottext_pct = 90 \n # font size for % display\n fontsiz_pct = 12\n else:\n # max.count to position the %\n maxcount_pct= np.max(list(cdict_count.values()))*0.075\n # max. no. of categories Vs % rotation \n rottext_pct = 0 \n # font size for % display\n fontsiz_pct = 16\n \n \n # plotting...\n sns.countplot(data = df, x = colname\n , order = list(cdict_count.keys())\n , color = base_color\n , saturation = 0.7)\n\n # title and labels\n plt.title('Order of '+ colname, fontsize=20)\n plt.xlabel(colname + ' Type', fontsize=16)\n plt.ylabel('Count', fontsize=16)\n \n # x-,y- ticks\n locs, labels = plt.xticks(fontsize=16)\n plt.yticks(fontsize=16)\n\n # display % count information on each tower of bar plot\n for loc, label in zip(locs, labels):\n count = cdict_count[label.get_text()]\n pct_string = '{:0.1f}%'.format(count*100/total_count)\n plt.text(loc, count-maxcount_pct, pct_string, ha='center', color='w', fontsize=fontsiz_pct, rotation=rottext_pct)\n\n return plt.show()\n\n else:\n \n print(' >>>Error:',colname,' is not in DataFrame')", "def leitner_bar(levels):\n\n df = pd.DataFrame(levels, columns=['comfort_level'])\n prop_df = leitner_proportions(df)\n locs = get_label_locs(prop_df)\n\n fig = px.bar(prop_df.T, orientation='h', width=400, height=200)\n fig.update_xaxes(\n showticklabels=False,\n showgrid=False,\n title_text='')\n fig.update_yaxes(showticklabels=False,\n showgrid=False,\n showline=False,\n zeroline=False,\n title_text='')\n fig.update_layout(\n plot_bgcolor = '#ffffff',\n showlegend = False,\n annotations=[\n dict(\n x=xval,\n y=0.5,\n text=txt,\n showarrow=False,\n xref='paper',\n yref='paper',\n font=dict(\n family='Lato',\n size=30,\n color=\"#000000\")\n ) for xval, txt in zip(locs, prop_df.index)\n ]\n )\n fig.update_traces(marker=dict(color=\"#FF909A\"),\n selector=dict(name='1'))\n fig.update_traces(marker=dict(color=\"#EFC9ED\"),\n selector=dict(name='2'))\n fig.update_traces(marker=dict(color=\"#C8F5FF\"),\n selector=dict(name='3'))\n fig.update_traces(marker=dict(color=\"#D5E3FF\"),\n selector=dict(name='4'))\n fig.update_traces(marker=dict(color=\"#FFF4BD\"),\n selector=dict(name='5'))\n return fig.to_json()", "def plot_bar(source_files, column_ids, column_names, normalize, sort, plot_difference, freq_bound, title=None,\n dtype=int):\n\n def _filter_data(raw_data, numerical):\n \"\"\" Filters plot-able data. \"\"\"\n # Retain numeric information\n legal_count_inventory = digits + '.'\n # Retain POS tags, also\n legal_entry_inventory = ascii_uppercase + '$'\n filtered_data = list()\n for data_point in raw_data:\n skip = False\n for symbol in list(str(data_point)):\n if symbol not in legal_count_inventory and symbol not in legal_entry_inventory:\n skip = True\n if not skip:\n if numerical:\n filtered_data.append(dtype(data_point))\n else:\n filtered_data.append(data_point)\n # Optionally normalize count values, resulting in a proportion plot\n if numerical and normalize:\n filtered_data = filtered_data / np.sum(filtered_data)\n return np.array(filtered_data)\n\n # Set plot parameters\n sns.set_style('whitegrid')\n sns.set_context('paper')\n\n # Compile data to be plotted within a new dataframe\n # Not necessary, but convenient when plotting with seaborn\n source_dict = dict()\n # Read in data and sort alphanumeric features (e.g. POS tags) alphabetically\n df_features = pd.read_table(source_files[0], header=None, names=['Tag', 'Count'], skip_blank_lines=True)\n df_features = df_features.sort_values('Tag', ascending=True)\n df_reference = pd.read_table(source_files[1], header=None, names=['Tag', 'Count'], skip_blank_lines=True)\n df_reference = df_reference.sort_values('Tag', ascending=True)\n # Isolate columns to be plotted\n entries = _filter_data(df_features.iloc[:, column_ids[0]].values, False)\n counts = _filter_data(df_features.iloc[:, column_ids[1]].values, True) # e.g. counts from corpus A\n reference_counts = _filter_data(df_reference.iloc[:, column_ids[1]].values, True) # e.g. counts from corpus B\n # Construct dataframe to be visualized\n source_dict[column_names[0]] = entries\n source_dict['reference_counts'] = reference_counts\n # Generate frequency mask to exclude low-frequency features from the plot\n # Optional; results in a clearer, better readable visualization\n frequency_mask = np.array(\n [int(counts[i] >= freq_bound or reference_counts[i] >= freq_bound) for i in range(counts.shape[0])])\n source_dict['frequency_mask'] = frequency_mask\n # Calculate per-feature count differences (i.e. target counts vs. reference counts), if specified\n if plot_difference:\n diffs = counts - reference_counts\n source_dict[column_names[1]] = diffs\n else:\n source_dict[column_names[1]] = counts\n features = pd.DataFrame.from_dict(source_dict)\n # Sort by count value and apply frequency mask\n if sort:\n features = features.sort_values(column_names[0], ascending=True)\n if freq_bound > 0:\n features = features.drop(features[features.frequency_mask == 0].index)\n\n # Make plot\n fig, ax = plt.subplots()\n fig.set_size_inches(8, 6)\n if plot_difference:\n colors = ['coral' if feature >= 0 else 'skyblue' for feature in features[column_names[1]]]\n sns.barplot(x=column_names[1], y=column_names[0], data=features, ax=ax, palette=colors)\n else:\n sns.barplot(x=column_names[1], y=column_names[0], data=features, ax=ax, palette='Set2')\n sns.despine()\n if title is not None:\n plt.title(title)\n plt.show()", "def __word_frequency_barplot(self,df, column_name, nr_top_words=20):\n tokenized_only_dict = Counter(np.concatenate(df[column_name].values))\n tokenized_only_df = pd.DataFrame.from_dict(tokenized_only_dict, orient='index')\n tokenized_only_df.rename(columns={0: 'count'}, inplace = True)\n tokenized_only_df.sort_values('count', ascending=False, inplace=True)\n fig, axs = plt.subplots(1,2,figsize=(20,8))\n \n a = tokenized_only_df['count'].values[:nr_top_words]\n amin, amax = min(a) , max(a)\n norm = []\n\n for i, val in enumerate(a):\n norm.append( (val - amin) / (amax- amin))\n\n sns.barplot( norm, list(range(nr_top_words)), palette='hls', orient= 'h', ax=axs[0])\n axs[0].set_yticks(list(range(nr_top_words)))\n axs[0].set_yticklabels(tokenized_only_df.index[:nr_top_words], fontsize=18)\n axs[0].set_title(\"Word Frequencies \" , fontsize=20)\n axs[0].set_xlabel(\"(a) Frequency of a Word\", fontsize = 18)\n\n document_lengths = []\n if column_name == self.__origintext_columnname or column_name == \"clean_text\" :\n document_lengths = np.array(list(map(len, df[column_name].str.split())))\n elif column_name == \"removed_stopwords\" or column_name == \"stem_words\":\n document_lengths = np.array(list(map(len, df[column_name])))\n\n print(\"The average number of Words in a document is: {}.\".format(np.mean(document_lengths)))\n print(\"The max number of Words in a document is: {}.\".format(np.max(document_lengths)))\n print(\"The min number of Words in a document is: {}.\".format(np.min(document_lengths)))\n axs[1].set_title('Distribution of number of words on ' , fontsize = 20)\n axs[1].set_xlabel(\"(b) Sentence Length\", fontsize = 18)\n sns.distplot(document_lengths, bins = 50 , ax =axs[1])\n plt.show()", "def eda_plot():\n\n df1 = pd.read_csv('eda_malware.csv')\n df2 = pd.read_csv('eda_random.csv')\n df3 = pd.read_csv('eda_popular.csv')\n\n df = pd.concat([df1, df2, df3], ignore_index=True)\n df['label'].replace([0,1],['Benign','Malware'],inplace=True)\n\n colors = ['#EAB6AB','#D9E6F3','#CBAACB','#CCE2CB', '#FFAEA5', '#A2E1DB', '#97C1A9']\n # b vs. m: node types counts\n f1 = pd.crosstab(df['label'], df['node_types_counts'])\n\n f1 = pd.DataFrame({\"3 Types\": [1, 4], \"4 Types\": [1, 407], \"5 Types\": [245, 5768], \"6 Types\": [39, 1113], \"7 Types\": [83, 487], \"8 Types\": [154, 368], \"9 Types\": [103, 286]}).rename(index={0:'Benign', 1:'Malware'})\n f1.plot(kind='bar', color=colors)\n fig = plt.gcf()\n plt.legend(loc='upper left')\n plt.title('Benign vs. Malicious: Number of Node Types')\n fig.savefig('bv_node_types.png')\n\n # for a better look, limit type 5 malware to 2k counts only\n f1 = pd.DataFrame({\"3 Types\": [1, 4], \"4 Types\": [1, 407], \"5 Types\": [245, 2000], \"6 Types\": [39, 1113], \"7 Types\": [83, 487], \"8 Types\": [154, 368], \"9 Types\": [103, 286]}).rename(index={0:'Benign', 1:'Malware'})\n f1.plot(kind='bar', color=colors)\n fig = plt.gcf()\n plt.legend(loc='upper left')\n plt.title('Benign vs. Malicious: Number of Node Types')\n fig.savefig('bv_node_types1.png')\n\n # node types\n # for malware: extract node types info for node types counts > 5, and sum up each types counts\n node_types = df[(df['label'] == 'Malware') & (df['node_types_counts'] >= 5)]['node_types'] #series\n lst = [ast.literal_eval(s) for s in node_types]\n\n c = Counter()\n for d in lst:\n c.update(d)\n\n df_nt = pd.DataFrame(dict(c).items(), columns=['node_types', 'counts'])\n df_nt = df_nt.sort_values(by=['counts'])\n\n sizes = [215060, 2823059, 3135725, 5641356, 10679709, 16547701]\n labels = ['Others', 'static,Node', 'public,static,Node', 'Node', 'external,Node', 'public,Node']\n\n colors = ['#EAB6AB','#D9E6F3','#CBAACB','#CCE2CB', '#FFAEA5', '#A2E1DB']\n\n fig1, ax1 = plt.subplots(figsize=(7, 7))\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%',\n shadow=False, startangle=90, colors=colors)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.title('Malware: Top Node Types and Its Counts', y=1.05)\n\n plt.show()\n fig1.savefig('counts_pie_m.png')\n\n # for benign: extract node types info for node types counts, and sum up each types counts\n node_types = df[(df['label'] == 'Benign')]['node_types'] #series\n lst = [ast.literal_eval(s) for s in node_types]\n\n c = Counter()\n for d in lst:\n c.update(d)\n\n df_nt = pd.DataFrame(dict(c).items(), columns=['node_types', 'counts'])\n df_nt = df_nt.sort_values(by=['counts'])\n\n sizes = [77967, 2892033, 2964924, 5287258, 6478196, 20364339]\n labels = ['Others', 'staticNode', 'public,staticNode', 'external,Node', 'Node', 'public,Node']\n\n colors = ['#EAB6AB','#D9E6F3','#CBAACB','#CCE2CB', '#FFAEA5', '#A2E1DB']\n\n fig1, ax1 = plt.subplots(figsize=(7, 7))\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%',\n shadow=False, startangle=90, colors=colors)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.title('Benign: Top Node Types and Its Counts', y=1.05)\n\n plt.show()\n fig1.savefig('counts_pie_b.png')\n\n # benign vs malware: counts\n sizes = [8435, 802]\n labels = ['Benign', 'Malware']\n\n colors = ['#EAB6AB','#D9E6F3']\n\n fig1, ax1 = plt.subplots(figsize=(7, 7))\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%',\n shadow=False, startangle=90, colors=colors)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.title('Number of Benign vs. Malware', y=1.05)\n\n plt.show()\n fig1.savefig('bm_counts.png')\n\n # number of edges vs number of nodes\n groups = df.groupby('label')\n colors = ['#FFAEA5', '#A2E1DB']\n\n # Plot\n fig, ax = plt.subplots()\n ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling\n for name, group in groups:\n if name == 'Benign':\n c = colors[0]\n else:\n c = colors[1]\n ax.plot(group.number_edges, group.number_nodes, marker='o', linestyle='', ms=4, label=name, c=c)\n ax.legend()\n ax.set_xlabel('Number of Edges')\n ax.set_ylabel('Number of Nodes')\n ax.set_title('Benign & Malware: Number of Edges vs. Number of Nodes', y=1.05)\n\n plt.show()\n fig.savefig('bm_edges_nodes.png')", "def createChart(cladeGroup, data, taxonomyDict, outputFile):\n dfData = []\n for clade in cladeGroup: \n temp, other, totalTemp = valueCountsSpecies(data, cladeGroup[clade], taxonomyDict)\n relativeTemp = {}\n for val in temp:\n relativeTemp[val] = (temp[val] / sum(list(temp.values())))*100\n dfData.append(relativeTemp)\n\n tempDF = pd.DataFrame(dfData, index=list(cladeGroup.keys()))\n tempDF = tempDF.fillna(0)\n\n # Plotting\n sns.set(rc={'figure.figsize':(20,15)}, font_scale=2)\n ax = tempDF.plot(kind=\"bar\", stacked=True, colormap=ListedColormap(sns.color_palette(\"twilight\", 12)), rot=0)\n for rect in ax.patches:\n # Find where everything is located\n height = rect.get_height()\n width = rect.get_width()\n x = rect.get_x()\n y = rect.get_y()\n \n # The height of the bar is the data value and can be used as the label\n label_text = f'{height:.2f}%' # f'{width:.2f}' to format decimal values\n \n # ax.text(x, y, text)\n label_x = x + width / 2\n label_y = y + height / 2\n \n # only plot labels greater than given width\n if height > 0.00:\n ax.text(label_x, label_y, label_text, ha='center', va='center', fontsize=20, color=\"w\")\n\n plt.legend(loc=\"center right\", bbox_to_anchor=(1.25, 0.5), ncol=1)\n plt.savefig(outputFile, bbox_inches=\"tight\")\n plt.show()\n return", "def visualize(X: pd.DataFrame, y: pd.DataFrame) -> None:\r\n y[\"Action\"].value_counts().plot.pie(explode=(0.02, 0.04, 0.05, 0.09), title=\"Proportion of classes in dataset\")\r\n plt.savefig(\"Figures/proportions\")\r\n\r\n for i, column in enumerate(X.columns):\r\n fig, ax = plt.subplots(1, 2)\r\n\r\n ax[0].hist(\r\n (\r\n X[y[\"Action\"] == \"allow\"][column],\r\n X[y[\"Action\"] == \"deny\"][column],\r\n X[y[\"Action\"] == \"drop\"][column],\r\n X[y[\"Action\"] == \"reset-both\"][column],\r\n )\r\n )\r\n ax[0].set_xlabel(column)\r\n ax[0].set_ylabel(\"Frequency\")\r\n\r\n ax[1].boxplot(\r\n (\r\n X[y[\"Action\"] == \"allow\"][column],\r\n X[y[\"Action\"] == \"deny\"][column],\r\n X[y[\"Action\"] == \"drop\"][column],\r\n X[y[\"Action\"] == \"reset-both\"][column],\r\n )\r\n )\r\n ax[1].set_xlabel(\"Action\")\r\n ax[1].set_ylabel(column)\r\n\r\n X[column].hist(by=y[\"Action\"])\r\n\r\n ax[0].legend([\"allow\", \"deny\", \"drop\", \"reset-both\"])\r\n ax[1].set_xticklabels([\"allow\", \"deny\", \"drop\", \"reset-both\"])\r\n fig.suptitle(\"Distribution of classes among attributes\")\r\n plt.savefig(\"Figures/boxplots\")", "def barplot_topn_countries(df: pd.core.frame.DataFrame, feature: str,\n topn: int, kind: str, year: str, figsize=(12,6)) -> None:\n if kind != 'Import' and kind != 'Export':\n raise ValueError('Trade flow is not set to Import or Export')\n\n plt.figure(figsize=figsize)\n g = sns.barplot(x='Reporter', y=(feature,'sum'), data=df[0:topn],\n palette='muted')\n\n if topn > 5 and topn <= 10:\n rot = 0\n elif topn > 10:\n rot = 75\n else:\n rot = 0\n\n g.set_xticklabels(g.get_xticklabels(), rotation=rot)\n plt.ticklabel_format(style='plain', axis='y')\n if year == 'all':\n plt.title(f'Top-{topn} {kind}ers of vaccines around the globe', fontweight='bold')\n else:\n plt.title(f'Top-{topn} {kind}ers of vaccines around the globe in {year}', fontweight='bold')\n plt.xlabel(f'{kind}er Country')\n if feature == 'Trade Value (US$)':\n plt.ylabel(f'Total amount of {kind}s in US$')\n else:\n plt.ylabel(f'Total amount of {kind}s in Netweight (kg)')\n plt.grid(True, alpha = 0.3)\n plt.show()", "def drawStackedBarPlot(df, column, hue):\n plt.style.use('default')\n plt.style.use('dark_background')\n p_table = pd.pivot_table(df, index=column, \n columns=hue, aggfunc='size')\n p_table = p_table.div(p_table.sum(axis=1), axis=0)\n p_table.plot.bar(stacked=True, figsize=(14,7))\n plt.xlabel('Spekraltyp')\n plt.ylabel('Anteil')\n plt.show()", "def barGraph(listOfWord, listOfFrequency):\r\n\r\n\tindex = np.arange(len(listOfWord))\r\n\r\n\tplt.title(\"Frekuensi Kemunculan Kata\")\r\n\tplt.barh(index, listOfFrequency)\r\n\tplt.xlabel('Frekuensi')\r\n\tplt.yticks(index, listOfWord, fontsize=6)\r\n\r\n\tplt.show()", "def plot_df(data_frame):\n plt.figure(figsize = (10, 5))\n chart = sns.countplot(data_frame['label'], \n palette=\"Set1\"\n )\n plt.show()", "def forebears (WFROM,WTO,efrom, eto, g=25):\n \n c.execute(\"\"\"\n SELECT wyear, eyear, count (eyear), wnationality\n FROM clean \n WHERE (eyear IS NOT Null) AND (wyear IS NOT Null)\n AND WYEAR >= ? and WYEAR <= ? \n AND eyear >= ? AND eyear <= ? \n GROUP BY wyear, eyear\n ORDER BY wyear, eyear\"\"\", (WFROM, WTO, efrom, eto))\n\n years = c.fetchall()\n epigraphtotal = sum (s for (x,y,s,n) in years)\n #plt.xlim(WFROM, WTO)\n #plt.ylim(100, -1500)\n #colors = list(mcolors.TABLEAU_COLORS.keys()) *20\n #print(colors)\n \n \n gen =dd(lambda: dd(int))\n gentotal= dd(int)\n for (x,y,s,n) in years:\n gen[generation(x,g)][generation(y-x,g)] += 1\n gentotal[generation(x,g)] +=1\n \n for x in gen:\n for y in gen[x]:\n print(x, y, gen[x][y], gentotal[x])\n\n \n\n plt.figure(figsize=(10, 5))\n ax=plt.axes()\n\n\n #df.plot(colormap=gray) \n cumtotal = [0]*len(gen)\n\n for d in range(0,-200, -1):\n #for d in range(min(gen.keys()),max(gen.keys()),-1):\n xv = list(gen.keys())\n yv = [rat(gen[x][d],gentotal[x]) for x in xv]\n plt.bar(xv, yv, bottom=cumtotal,\n tick_label=[x*g for x in xv])\n cumtotal = [x + y for x, y in zip(yv, cumtotal)]\n #colors.pop()\n #print(d, cumtotal)\n plt.xlabel('Year of Work (in generations)')\n plt.ylabel(f'Share of Distance to forebear (in {g} year generations)')\n plt.title(f'Distance back vs Year of Work ({epigraphtotal} epigraphs)')\n plt.savefig(f\"figs/eg-forebear-{WFROM}:{WTO}-{efrom}:{eto}-{g}.png\")\n plt.close()", "def _bar_example_1(quantity_by_fruit):\n ch = chartify.Chart(blank_labels=True, x_axis_type=\"categorical\")\n ch.set_title(\"Vertical bar plot\")\n ch.set_subtitle(\"Automatically sorts by value counts.\")\n ch.plot.bar(\n data_frame=quantity_by_fruit,\n categorical_columns=\"fruit\",\n numeric_column=\"quantity\",\n )\n ch.show(_OUTPUT_FORMAT)", "def plot_cat(df, cat_columns, hue = \"default_payment_next_month\"):\n fig = plt.figure(figsize = (20,(len(cat_columns)/2+1)*8))\n loc = 1\n for col in cat_columns:\n ax = fig.add_subplot(len(cat_columns)/2+1, 2, loc)\n df_plot = df[[col, hue, \"id\"]].groupby([col, hue]).count()\n df_plot.reset_index(inplace = True)\n sns.barplot(x=col, y= \"id\", hue = hue, data=df_plot, palette = \"GnBu_d\", ax = ax);\n plt.legend(title = \"default payment (1=yes, 0=no)\")\n plt.ylim([0.0001,15000])\n plt.ylabel(\"clients\");\n loc += 1", "def get_proba_plot(data_type):\n buffer = io.BytesIO()\n plt.subplots(figsize = (25,15))\n ax = sns.barplot(x='proba', y='type', data=data_type, palette = \"Blues_r\")\n ax.set_xlabel('Probability')\n plt.yticks(fontsize = 30)\n plt.xticks(fontsize = 30)\n plt.ylabel(\"Type\", fontsize = 38)\n plt.xlabel(\"Probability\", fontsize = 38);\n plt.title(\"Model Results\", fontsize = 50)\n plt.savefig(buffer, format='png')\n# plt.show()\n plt.close()\n buffer.seek(0)\n chart_probability= Image.open(buffer).resize((512+256,512))\n return chart_probability", "def graph_cause_count_each(df, label):\r\n # set the visual features of the graph\r\n sns.set(font_scale=1.5)\r\n sns.set_style(\"darkgrid\")\r\n fig, ax = plt.subplots()\r\n fig.set_size_inches(15, 8)\r\n plt.xticks(rotation=45)\r\n ax.set_title(label.capitalize() + \" Police Death Causes\")\r\n # create the graph of the data\r\n plot = sns.barplot(\"year\", \"count\", data=df, palette=\"winter_d\")\r\n # plt.show()\r\n # save the graph as an image with the correct cause naming\r\n name = \"2_graph_cause_count_\" + label + \".png\"\r\n fig.savefig(name)", "def bar_plot_with_population_proportion(df, x, y,\n func=np.median,\n show_error_bar=True,\n show_na=True,\n na_label='Null',\n circle_diameter=150,\n split_variable=False,\n colors=None,\n color_palette=None,\n x_label=None,\n y_label=None,\n show_qty=True,\n qty_label='Quantity',\n proportion_label='Percentage',\n proportion_format='.2f',\n show_population_func=False,\n population_format='.0f',\n population_func_legend='Median population value',\n population_legend='Population %',\n up_label='Positive values',\n down_label='Negative values',\n plot_title=None,\n figsize=(16, 7),\n **legend_kwargs):\n data = df.copy()\n\n if show_na:\n data[x] = data[x].fillna(na_label)\n else:\n data = data.dropna(subset=[x])\n\n data[x] = data[x].astype('str')\n data = data.sort_values(by=x)\n\n fig, ax = plt.subplots(figsize=figsize, dpi=120)\n\n if colors is None:\n colors = get_palette()\n\n if color_palette:\n colors = sns.color_palette(color_palette, 3)\n\n if colors is not None:\n if split_variable and len(colors) < 3:\n raise KeyError(f'Expected 3 colors but only {len(colors)} was/were passed.')\n elif len(colors) < 2:\n raise KeyError(f'Expected 2 colors but only {len(colors)} was/were passed.')\n\n categories_names = ['df_up', 'population', 'df_down']\n\n colormap = dict(zip(categories_names, colors))\n\n _plot_bars(data, x, y, func, split_variable, ax, colormap, up_label, down_label, x_label, show_error_bar)\n\n _set_ticks_and_annotation(data, x, y, func, ax, circle_diameter, colormap, proportion_format, show_qty, qty_label,\n proportion_label, show_population_func, population_func_legend, population_format)\n\n _set_titles_and_labels(ax, colormap, plot_title, population_legend, x, y, y_label, x_label, **legend_kwargs)\n\n plt.close()\n\n return fig", "def plot(var):\n # MISSCHIEN KUNNEN WE HIER NOG IETS MEE\n # total_dead = len(train_data[\"Survived\"] == 0)\n # total_survived = len(train_data[\"Survived\"] == 1)\n # died = train_data[train_data[\"Survived\"] == 0][var].value_counts() / total_dead\n # survived = train_data[train_data[\"Survived\"] == 1][var].value_counts() / total_survived\n sns.set()\n sns.set_color_codes(\"pastel\")\n\n # order bars for family size variable\n if var == \"FamSize\":\n sns.barplot(x=var, y=\"Survived\", data=train_data, color=\"b\",\\\n capsize=.1, errwidth=.7, order=[\"alone\", 1, 2, 3, \"4 or more\"]).\\\n tick_params(labelsize=18)\n else:\n sns.barplot(x=var, y=\"Survived\", data=train_data, color=\"b\",\\\n capsize=.1, errwidth=1.1).tick_params(labelsize=18)\n\n # plot style properties\n ax = plt.gca()\n\n for ax in plt.gcf().axes:\n x = ax.get_xlabel()\n y = ax.get_ylabel()\n ax.set_xlabel(x, fontsize=20)\n ax.set_ylabel(y, fontsize=20)\n\n plt.title(\"Ratio of survivors for variable \" + str(var), fontsize=22)\n t = ax.title\n t.set_position([.5, 1.05])\n plt.ylim([0, 1])\n plt.subplots_adjust(bottom=.15, left=.15)\n plt.savefig(\"results/survived_\" + str(var) + \".png\", bbox_inches=\"tight\")\n\n plt.show()", "def _bar_example_4(quantity_by_fruit):\n ch = chartify.Chart(x_axis_type=\"categorical\", blank_labels=True)\n ch.set_title(\"Vertical bar plot with labels\")\n ch.set_subtitle(\"Hidden y-axis\")\n ch.plot.bar(\n data_frame=quantity_by_fruit,\n categorical_columns=\"fruit\",\n numeric_column=\"quantity\",\n color_column=\"fruit\",\n )\n ch.style.color_palette.reset_palette_order()\n ch.plot.text(\n data_frame=quantity_by_fruit,\n categorical_columns=\"fruit\",\n numeric_column=\"quantity\",\n text_column=\"quantity\",\n color_column=\"fruit\",\n )\n # Adjust the axis range to prevent clipping of the text labels.\n ch.axes.set_yaxis_range(0, 1200)\n ch.axes.hide_yaxis()\n ch.show(_OUTPUT_FORMAT)", "def plot_total_effects( total_effects, ax, truncation_pct=0.95,\n rv='z', qoi=0):\n\n total_effects=total_effects[:,qoi]\n\n width=.95\n locations = np.arange(total_effects.shape[0])\n p = ax.bar(locations-width/2,total_effects,width,align='edge')\n labels = ['$%s_{%d}$' %(rv,ii+1) for ii in range(total_effects.shape[0])]\n ax.set_xticks(locations)\n ax.set_xticklabels(labels,rotation=0)\n return p", "def create_course_bars(hist_df, fig, labels):\n colors = [\n \"#60a7ba\",\n \"#f0912d\",\n \"#357025\",\n \"#ba3622\",\n \"#8f33d6\",\n \"#6a4c4d\",\n \"#cf8af3\",\n ]\n all_numbers = []\n\n for index, _ in enumerate(fig[\"layout\"][\"annotations\"]):\n all_numbers.append(float(fig[\"layout\"][\"annotations\"][index][\"text\"]))\n\n for _, idx in enumerate(hist_df.index.unique()):\n row = all_numbers.index(idx)\n show_legend = row == 0\n traces = []\n\n # Calculate subfigure position in figure\n row = (row + 1) / 2\n col = 1 if row.is_integer() else 0\n row = math.ceil(row) - 1\n\n # Calculate dataframe for plot\n task_subset_df = hist_df.loc[idx]\n task_subset_df = task_subset_df.apply(pd.value_counts).T\n task_subset_df = task_subset_df.div(task_subset_df.sum(axis=1), axis=0)\n\n # Handle case if there are only correct answers\n if task_subset_df.shape != (\n 7,\n 2,\n ): # sometimes task_subset_df is in the wrong shape\n if task_subset_df.shape != (\n 7,\n 1,\n ):\n task_subset_df = task_subset_df.T\n\n if \"correct\" in task_subset_df.columns.values:\n task_subset_df[\"incorrect\"] = 0\n\n # Each bar needs a color and a legend entry and will therefore\n # be plotted individually\n for i, color in enumerate(colors):\n trace = go.Bar(\n x=[task_subset_df.index.values[i]],\n y=[task_subset_df.incorrect[i] * 100],\n name=labels[i],\n marker={\"color\": color},\n showlegend=show_legend,\n )\n traces.append(trace)\n\n # All traces build one subfigure\n for trace in traces:\n fig.append_trace(trace, row=row + 1, col=col + 1)\n\n # Figure styling\n fig.update_layout(\n height=400 * (row + 1),\n legend={\n \"orientation\": \"h\",\n \"xanchor\": \"left\",\n \"yanchor\": \"bottom\",\n \"x\": 0.15,\n \"y\": 1.05,\n },\n )\n fig.update_xaxes(showticklabels=False)\n\n # for i in range(0, row + 1):\n fig.update_yaxes(title_text=\"Students\", row=i + 1, col=1)\n return fig", "def graph_year_cause_count(df):\r\n # set the visual features of the graph\r\n sns.set(font_scale=1.5)\r\n sns.set_style(\"darkgrid\")\r\n fig, ax = plt.subplots()\r\n fig.set_size_inches(20, 12)\r\n plt.xticks(rotation=25)\r\n ax.set_title(\"2001 and 2007 Police Death Causes\")\r\n # create the graph of the data\r\n plot = sns.barplot(\"cause_short\", \"count\", data=df, palette=\"bone\", hue='year')\r\n # plt.show()\r\n # save the graph as an image\r\n fig.savefig(\"1_graph_top_cause_count.png\")", "def summer_bar_chart(self):\n # Create top n countries data from 1996 to 2014\n df_summer = self.df_summer[self.df_summer['Year'] >= 1996]\n m = list(df_summer['Country'].value_counts()[:self.n_top].index)\n df_top = df_summer[df_summer['Country'].isin(m)].groupby(['Country', 'Medal']).size()\n new_index = pd.MultiIndex.from_product([m, ['Gold', 'Silver', 'Bronze']], names=df_top.index.names)\n df_top = df_top.reindex(new_index)\n unstacked_df_top = df_top.unstack().reindex(m, columns=['Gold', 'Silver', 'Bronze'])\n k = []\n # Create the dataframe in 2016.\n for j in self.df_2016_summer['NOC'].tolist():\n n = j[j.find('(') + 1:j.find(')')]\n k.append((n, j))\n k = dict(k)\n summer_2016 = pd.DataFrame()\n for i in m:\n df_tmp = self.df_2016_summer[self.df_2016_summer['NOC'] == k[i]]\n summer_2016 = pd.concat([summer_2016, df_tmp])\n summer_2016['Country'] = m\n new_summer_2016 = summer_2016.set_index(['Country'])[['Gold', 'Silver', 'Bronze']]\n # Add the two dataframes and plot\n unstacked_df_top.add(new_summer_2016).reindex(m[::-1], columns=['Bronze', 'Silver', 'Gold']).plot(kind='barh')\n plt.title('Medal Result of Summer Olympics since 1996')\n fname = './medal_figures_summer/summer_bar_chart.png'\n plt.savefig(fname=fname, format='png')\n return", "def graph_cause_count(df):\r\n # set the visual features of the graph\r\n sns.set(font_scale=2)\r\n sns.set_style(\"darkgrid\")\r\n fig, ax = plt.subplots()\r\n fig.set_size_inches(20, 12)\r\n plt.xticks(rotation=45)\r\n ax.set_title(\"Yearly Vehicle Accident Police Deaths\")\r\n # create the graph of the data\r\n plot = sns.barplot(\"year\", \"count\", data=df, palette=\"winter_d\", ci=None)\r\n # plt.show()\r\n # save the graph as an image\r\n fig.savefig(\"2_graph_cause_count.png\")", "def graph_year_count(df):\r\n # set the visual features of the graph\r\n sns.set(font_scale=1.5)\r\n sns.set_style(\"darkgrid\")\r\n fig, ax = plt.subplots()\r\n fig.set_size_inches(15, 8)\r\n plt.xticks(rotation=45)\r\n ax.set_title(\"Yearly Police Deaths\")\r\n # create the graph of the data\r\n plot = sns.barplot(\"year\", \"count\", data=df, palette=\"winter_d\")\r\n # plt.show()\r\n # save the graph as an image\r\n fig.savefig(\"1_graph_year_count.png\")", "def graphy2():\n data = pd.read_csv(\"week2.csv\")\n plot_g = pygal.Bar(fill=True, interpolate='cubic', style=LightSolarizedStyle)\n plot_g.title = \"Top Fans in Week 2\"\n plot_g.x_labels = data.GENDER\n plot_g.y_labels = map(int, range(0, 80, 10))\n plot_g.add(\"Male\", data.COUNT)\n plot_g.add(\"Female\", data.COUNT2)\n plot_g.add(\"Total\", data.COUNT3)\n plot_g.render_to_file(\"plotweek2.svg\")", "def sum_by_university(self,df):\n with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also\n df_univ = df.groupby(['Coll/Univ']).sum()\n \n df_univ = df_univ.sort_values('PB')\n df_top_univ = df_univ[-30:]\n \n #Visual bargraph for top 30 Colleges and number of pro-bowl appearances they produce\n df_univ_PB = df_top_univ['PB']\n univ_plot = df_univ_PB.plot(kind=\"barh\", fontsize=4)\n univ_plot.set_xlabel(\"Pro bowl appearances\")\n univ_plot.set_title(\"PRO BOWL APPEARANCES, BY COLLEGE/UNIVERSITY, 2010-2020\")\n plt.show()\n \n return", "def _bar_example_2(quantity_by_fruit):\n ch = chartify.Chart(blank_labels=True, x_axis_type=\"categorical\")\n ch.set_title(\"Vertical bar plot - Label sort\")\n ch.set_subtitle(\"Set `categorical_order_by` to sort by labels\")\n ch.plot.bar(\n data_frame=quantity_by_fruit,\n categorical_columns=\"fruit\",\n numeric_column=\"quantity\",\n categorical_order_by=\"labels\",\n categorical_order_ascending=True,\n )\n ch.show(_OUTPUT_FORMAT)", "def figure_4(df):\n\n sns.set(style=\"white\", palette=sns.color_palette(\"cubehelix\", 6))\n f, axes = plt.subplots(4, 1, figsize=(6, 9)) # , sharex=True)\n sns.despine(top=True, bottom=True)\n\n # Add collapsed final treatments\n df['FinalTx_coll'] = df.apply(collapse_final_treatment, axis=1)\n\n OSA_pure_hist = df.loc[df['BaseDx'] == \"Mainly OSA\"].sort_values(\"FinalTx_coll\")\n OSA_predom_hist = df.loc[df['BaseDx'] == \"Combined OSA/CSA\"].sort_values(\"FinalTx_coll\")\n CSA_predom_hist = df.loc[df['BaseDx'] == \"Predominantly CSA\"].sort_values(\"FinalTx_coll\")\n CSA_pure_hist = df.loc[df['BaseDx'] == \"Pure CSA\"].sort_values(\"FinalTx_coll\")\n\n # Create count plot for each #CSA on the left, then a Pie Chart with proportion on the right\n\n hatches = ['', '', '\\\\\\\\\\\\', '...']\n face_color = ['dimgrey','white', 'white', 'white']\n\n # Pure OSA\n\n bar = sns.countplot(y=\"FinalTx_coll\", data=OSA_pure_hist, ax=axes[3])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[3].set(xlabel=\"Number of Patients\", ylabel=\"<10% CSA\")\n\n # Predom OSA\n bar = sns.countplot(y=\"FinalTx_coll\", data=OSA_predom_hist, ax=axes[2])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[2].set(xlabel=\"\", ylabel=\"10-49.9% CSA\")\n\n # Predom CSA\n bar = sns.countplot(y=\"FinalTx_coll\", data=CSA_predom_hist, ax=axes[1])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[1].set(xlabel=\"\", ylabel=\"50-90% CSA\")\n\n # Pure CSA\n bar = sns.countplot(y=\"FinalTx_coll\", data=CSA_pure_hist, ax=axes[0])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[0].set(xlabel=\"\", ylabel=\">90% CSA\")\n\n # Combined X axis for L side\n axes[3].get_shared_x_axes().join(axes[3], axes[2], axes[1], axes[0])\n axes[0].set_xticklabels(\"\")\n axes[1].set_xticklabels(\"\")\n axes[2].set_xticklabels(\"\")\n # Leave bottom aka [3,0] labels in\n\n # Resize all\n axes[0].autoscale()\n axes[1].autoscale()\n axes[2].autoscale()\n axes[3].autoscale()\n\n f.tight_layout(rect=[0, 0, 1, 1]) # .95 to leave space for title\n f.savefig('Figure 4 - final tx by perc csa', dpi=100)\n # plt.show()", "def missing_analysis(df):\n df_isnull = (df.isnull().sum() / len(df))*100\n df_isnull = df_isnull.drop(df_isnull[df_isnull ==0].index).sort_values(ascending = False)\n missing_data = pd.DataFrame({'Percentual Missing': df_isnull})\n missing_data.plot.bar()", "def winter_bar_chart(self):\n # Create the top n countries dataframe from 1994 to 2016\n df_winter = self.df_winter[self.df_winter['Year'] >= 1994]\n m = list(df_winter['Country'].value_counts()[:self.n_top].index)\n df_top = df_winter[df_winter['Country'].isin(m)].groupby(['Country', 'Medal']).size()\n new_index = pd.MultiIndex.from_product([m, ['Gold', 'Silver', 'Bronze']], names=df_top.index.names)\n df_top = df_top.reindex(new_index)\n unstacked_df_top = df_top.unstack().reindex(m, columns=['Gold', 'Silver', 'Bronze'])\n # Create the dataframe in 2018\n k = []\n for j in self.df_2018_winter['NOC'].tolist():\n n = j[j.find('(') + 1:j.find(')')]\n k.append((n, j))\n k = dict(k)\n winter_2018 = pd.DataFrame()\n for i in m:\n if i != 'RUS':\n df_tmp = self.df_2018_winter[self.df_2018_winter['NOC'] == k[i]]\n else:\n df_tmp = self.df_2018_winter[self.df_2018_winter['NOC'] == k['OAR']]\n winter_2018 = pd.concat([winter_2018, df_tmp])\n winter_2018['Country'] = m\n new_winter_2018 = winter_2018.set_index(['Country'])[['Gold', 'Silver', 'Bronze']]\n # Add two dataframes and plot.\n unstacked_df_top.add(new_winter_2018).reindex(m[::-1], columns=['Bronze', 'Silver', 'Gold']).plot(kind='barh')\n plt.title('Medal Result of Winter Olympics since 1994')\n fname = './medal_figures_winter/winter_bar_chart.png'\n plt.savefig(fname=fname, format='png')\n return", "def bar_charts(cluster, genelist, groups=[\"SP\", \"SL06\", \"SL12\", \"SL24\",\"SL48\", \"SL96\", \"FL\", \"FP06\", \"FP12\", \"FP24\",\"FP48\", \"FP96\" ], postfix=''):\n\n limits = cluster.reorder_matrix(groups)\n pp = PdfPages(cluster.exportPath[0:-4] + postfix + '.bar_plots.pdf')\n\n # get kegg pathways and NCBI values for each gene:\n ko_dict = genematch.cbir_to_pathway(genelist.keys()) # ko_dict = {gene:str(pathway)}\n go_monster = genematch.GO_maker()\n ncbi_terms = genematch.cbir_ncbi(genelist)\n\n for gene in genelist:\n # get gene details for later use:\n ignore, kotermdic = genematch.cbir_to_kegg([gene],reversedic=True)\n\n anova = degs_anova(cluster, onegene=gene, groups=groups)\n\n try:\n koterm = kotermdic[gene]\n except KeyError:\n koterm = 'no KO'\n\n genegos = go_monster.findem(gene)\n godesc = \"\".join([ \"%s %s %s\\n\" % (g, genegos[g][1], genegos[g][0]) for g in genegos ])\n\n # calculate mean/SEM...\n if gene in cluster.column_header:\n pos = cluster.column_header.index(gene)\n else:\n continue\n gm = [groups[0]] * (limits[0]) # matrix of group names for Tukey's post hoc\n v = [numpy.average(cluster.data_matrix[:limits[0],pos])] # averages\n se = [numpy.std(cluster.data_matrix[:limits[0],pos])/numpy.sqrt(limits[0]+1)] #SEM\n for i in range(len(groups)-1):\n gm += [groups[i+1]] * (limits[i+1]-limits[i])\n v.append(numpy.average(cluster.data_matrix[limits[i]:limits[i + 1],pos]))\n se.append(numpy.std(cluster.data_matrix[limits[i]:limits[i + 1],pos])/numpy.sqrt(limits[i+1]-limits[i]+1))\n\n # calculate tukey's post-hoc values and plot:\n tfig, taxes = plt.subplots()\n\n try:\n posthoc = pairwise_tukeyhsd(cluster.data_matrix[:,pos],gm)\n except Exception as inst:\n verbalise(\"R\", \"Tukey calculation error - check that you have >1 value for each category.\")\n print inst\n continue\n phimg = posthoc.plot_simultaneous(comparison_name='SP', \\\n ax=taxes, ylabel='Groups', xlabel='Normalised Expression', \\\n labelorder = [\"SP\", \"SL06\", \"SL12\", \"SL24\",\"SL48\", \"SL96\", \\\n \"FL\", \"FP06\", \"FP12\", \"FP24\",\"FP48\", \"FP96\" ])\n\n # plot_simultaneous does not correctly report the y-axis labels. So to fix:\n taxes.set_xticks(numpy.arange(13.0)*1) # increase to gain all labels\n plt.tight_layout() # resets axes\n xlabels = taxes.get_xticklabels() # gets values I need\n\n labelist = [xtick.get_text() for xtick in xlabels] # creates an ordered list of labels\n labelist.pop(0) # removes first element (blank label)\n taxes.set_xticks(numpy.arange(12.0)*1) # now create the right number of ticks\n taxes.set_xticklabels(labelist) # reset with new names\n title_string = \"%s %s(ANOVA P-value %.8f)\\n%s\\n KEGG ortholog %s:\\n%s\\n%s\"\n taxes.set_title(title_string % (os.path.basename(cluster.exportPath[:-4]), gene, anova[gene], ncbi_terms[gene], koterm, ko_dict[gene], godesc), fontsize=12 )\n\n plt.tight_layout()\n plt.savefig(pp, format='pdf')\n #plt.show(phimg)\n plt.close()\n # print summary to file:\n tukeys_h = open(cluster.exportPath[:-4] + '.tukeys.txt','a')\n tukeys_h.write('Gene ' + str(gene) + ':\\n')\n tukeys_h.write(str(posthoc) + '\\n\\n')\n tukeys_h.close()\n\n \"\"\"\n # create box plot of expression values:\n ind = numpy.arange(len(groups)) # x-coords for bars\n width = 0.35 # box width\n\n fig, ax = plt.subplots()\n rects1 = ax.bar(ind, v, width, color='r', yerr=se)\n\n # add details:\n ax.set_ylabel('Normalised Expression')\n ax.set_title('Gene Expression for %s (%s):\\n %s\\n%s' % (str(gene), koterm, ko_dict[gene], godesc), fontsize=12 )\n ax.set_xticks(ind+width)\n ax.set_xticklabels(groups)\n\n plt.tight_layout()\n plt.savefig(pp, format='pdf')\n plt.show()\n \"\"\"\n pp.close()", "def generate_barplot(predictions, labels):\n plot = figure(x_range=labels, plot_height=300, plot_width=400)\n plot.vbar(x=labels, top=predictions, width=0.8)\n # plot.xaxis.major_label_orientation = pi / 2.\n # plot.xaxis.axis_label_text_font_size = \"40pt\"\n # plot.yaxis.axis_label_text_font_size = \"40pt\"\n\n return components(plot)", "def multiple_bars(self, df, nrows, ncols, dict):\n fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=(6, 9.3))\n\n fig.subplots_adjust(left=0.03, right=0.97, hspace=0.50, wspace=0.05)\n\n bar_width = 0.35\n for ax, (key, dat) in zip(axs.flatten(), df):\n n_groups = len(dat.index)\n index = np.arange(n_groups)\n\n # make barchart for permutation test\n bar1 = ax.bar(index, dat[\"perm\"], bar_width, color='b',\n label='Permutation test')\n # make barchart for t-test\n bar2 = ax.bar(index + bar_width, dat[\"t_test\"], bar_width, color='r',\n label='t-test')\n\n ax.set_ylabel(\"Error\")\n ax.set_xticks(index + bar_width / 2)\n ax.set_xticklabels(dict[\"xtickslabels\"])\n ax.set_title(f\"Effect size = {key}\")\n ax.set_xlabel(f\"Group Size\")\n ax.legend()\n\n for rect, i in zip(bar1 + bar2, dat[\"sig\"]):\n height = rect.get_height()\n if i:\n ax.text(rect.get_x() + rect.get_width(), height, \"**\", ha='center', va='bottom')\n\n\n fig.suptitle(dict[\"title\"], y=1.0, fontsize = 15)\n fig.tight_layout()\n plt.show()", "def draw_bar_plot():\n # Copy and modify data for monthly bar plot\n \n df_bar = df.copy()\n\n # Draw bar plot\n leglab = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\", \"December\"]\n labels = [2016, 2017, 2018, 2019]\n months = np.zeros([12, 4])\n\n for i in range(12):\n for j, year in enumerate(labels):\n t = df[df.index.year == year]\n months[i][j] = t[t.index.month == i].value.mean()\n\n x = np.arange(len(labels))\n width = 0.7\n fig, ax = plt.subplots()\n fig.set_figwidth(10)\n fig.set_figheight(8)\n for i, month in enumerate(months):\n ax.bar(x - (width * (12 - i) / 12), months[i], width / 12, label=leglab[i])\n\n ax.set_ylabel(\"Average Page Views\")\n ax.set_xlabel(\"Years\")\n ax.set_xticks(x)\n ax.set_xticklabels(labels)\n ax.legend(title='Months')\n\n # Save image and return fig (don't change this part)\n fig.savefig('bar_plot.png')\n return fig", "def plot_results(t_val, mood):\r\n N = 8\r\n theta = np.linspace(0.0, 2 * np.pi , N, endpoint=False)\r\n the_stats = [t_val['number_words'], t_val['average_character_length'], \r\n t_val['signs'], t_val['multiple_signs'], t_val['question'],\r\n t_val['exclamation'], t_val['name'], mood] \r\n \r\n width = np.pi / N \r\n\r\n plt.figure()\r\n \r\n handle = plt.subplot(111, polar=True)\r\n handle.set_xticklabels(['Word', 'AvrChar', 'Signs', '2Signs', '?', '!', 'name', 'mood'])\r\n \r\n handle.bar(theta, the_stats, width=width, bottom=1.0)\r\n \r\n plt.show()", "def plot_bv_bar(df, xcolname, ycolname, icol=0):\n # set plot size\n fig, ax = plt.subplots(figsize=(8,6))\n \n # plotting... box\n sns.barplot(ax=ax, data = df\n , x = str(xcolname)\n , y = str(ycolname)\n , color = sns.color_palette()[icol]);\n \n \n # title and labels\n plt.title(xcolname+' Vs '+ycolname, fontsize=20)\n plt.xlabel(xcolname+ ' (units)', fontsize=16)\n plt.ylabel(ycolname+ ' (units)', fontsize=16)\n \n return plt.show()", "def grafMortes(tipPDF):\n\n\t\tdf = pd.read_csv('dadosPaises.csv') #dataFrame que recebe a tabela de dados\n\t\tdf = df.sort_values(by=['Quantidade de Mortes'], ascending=False) #ordena pela coluna 'casos ativos' de forma descendente\n\t\tdf2 = df[:5] # Separa em um novo dataframe as 5 primeiras linhas\n\n\t\tfig, ax = plt.subplots()\n\n\t\tindex = np.arange(len(df2['Quantidade de Mortes']))\n\t\tbar_width = 0.45\n\t\tscore_label = np.arange(0,6000,1000)\n\t\tbar = ax.bar(index, df2['Quantidade de Mortes'], bar_width, label='Quantidade de Mortes',color='#cc0000')\n\n\t\tax.set_xticks(index) #distancia entre barras\n\t\tax.set_xticklabels(df2['Dados do País']) #label das barras\n\t\tax.set_yticks(score_label) #distancia entre marcas do eixo y\n\t\tax.set_yticklabels(score_label) #labels das marcas do eixo y\n\t\tax.legend()\n\n\t\t#ciclo para plotar os labels\n\t\tfor i in index:\n\t\t\tax.annotate('{}'.format(bar[i].get_height()), #string a ser plotada\n\t\t\t\t\t\txy=(bar[i].get_x(),bar[i].get_height()), #local da plotagem do label\n\t\t\t\t\t\txytext=(0,2), #movimentacao adicional do label\n\t\t\t\t\t\ttextcoords='offset points',\n\t\t\t\t\t\tva='bottom'\n\t\t\t\t\t\t)\n\n\t\tplt.title('Paises com mais mortes')\n\n\t\tif tipPDF:\n\t\t\tnomFig = 'grafico2.pdf'\n\t\telse :\n\t\t\tnomFig = 'grafico2.png'\n\t\t\n\t\tplt.savefig(nomFig)", "def dr_distribution(df_train, df_test, df_tune, research_feature, flag_column, xticks = []):\r\n plt_ = __import__(\"matplotlib.pyplot\")\r\n\r\n dff = df_train.copy()\r\n dff_te = df_test.copy()\r\n dff_tu = df_tune.copy()\r\n\r\n counts = []\r\n counts_te = []\r\n counts_tu = []\r\n\r\n dr = []\r\n dr_te = []\r\n dr_tu = []\r\n\r\n vals = sorted(dff[research_feature].unique())\r\n vals_te = sorted(dff_te[research_feature].unique())\r\n vals_tu = sorted(dff_tu[research_feature].unique())\r\n\r\n for value in vals:\r\n sub_df = dff[dff[research_feature] == value]\r\n counts.append(len(sub_df) / len(dff) * 100)\r\n dr.append(len(sub_df[sub_df[flag_column] == 1]) / len(sub_df))\r\n\r\n for value in vals_te:\r\n sub_df = dff_te[dff_te[research_feature] == value]\r\n counts_te.append(len(sub_df) / len(dff) * 100)\r\n dr_te.append(len(sub_df[sub_df[flag_column] == 1]) / len(sub_df))\r\n\r\n for value in vals_tu:\r\n sub_df = dff_tu[dff_tu[research_feature] == value]\r\n counts_tu.append(len(sub_df) / len(dff) * 100)\r\n dr_tu.append(len(sub_df[sub_df[flag_column] == 1]) / len(sub_df))\r\n\r\n fig, ax1 = plt_.pyplot.subplots(figsize=(15, 7))\r\n ax2 = ax1.twinx()\r\n\r\n p1 = ax1.bar(np.arange(len(counts)), counts, width=0.35, color=(24 / 254, 192 / 254, 196 / 254))\r\n p2 = ax1.bar(np.arange(len(counts)), counts_te, width=0.35, color=(246 / 254, 115 / 254, 109 / 254), align='edge')\r\n p3 = ax1.bar(np.arange(len(counts)), counts_tu, width=0.35, color=(123 / 254, 197 / 254, 13 / 254), align='edge')\r\n\r\n p1.set_label('Train')\r\n p2.set_label('Test')\r\n p3.set_label('Tune')\r\n\r\n ax2.plot(np.arange(len(dr)), dr, marker='o', color='orange')\r\n ax2.plot(np.arange(len(dr_te)), dr_te, marker='o', color='blue')\r\n ax2.plot(np.arange(len(dr_tu)), dr_tu, marker='o', color='red')\r\n\r\n for i, v in enumerate(dr):\r\n ax2.text(i, v + 0.005 * v, str(round(v * 100, 2)) + '%')\r\n for i, v in enumerate(counts):\r\n ax1.text(i - 0.1, v - 0.5 * v, str(round(v, 2)) + '%', fontsize=12)\r\n # ax2.plot(np.arange(len(datas)-2), [gini[2] for gini in ginies_test_sigmoid], marker = 'o', color='red')\r\n\r\n ax1.set_ylabel('Распределение фактора по бакетам (в %)', fontsize=15)\r\n ax1.set_xlabel('WOE-значение фактора %s' % (research_feature), fontsize=15)\r\n ax2.set_ylabel('Уровень дефолтов', fontsize=15)\r\n\r\n plt_.pyplot.title('Распределение фактора по бакетам (WOE) и динамика уровня дефолтов', fontsize=15)\r\n if xticks:\r\n plt_.pyplot.xticks(np.arange(len(vals)), xticks, rotation=45, fontsize=20)\r\n else:\r\n plt_.pyplot.xticks(np.arange(len(vals)), vals, rotation=45, fontsize=20)\r\n plt_.pyplot.grid(alpha=0.2)\r\n plt_.pyplot.show()\r\n return", "def bar_time_series(df, title, ylabel, report):\n for col in df:\n fig, ax = plt.subplots(1, 1, figsize=(12, 4))\n plt.gcf().subplots_adjust(bottom=0.25)\n df[col].plot.bar();\n ax.set_xticklabels([v if i % 4 == 0 else '' for i, v in enumerate(df.index)])\n ax.xaxis.set_tick_params(rotation=45, length=0);\n ax.set_xlabel('Date')\n ax.set_ylabel(ylabel)\n full_title = title if df.shape[1] == 1 else '{} {}'.format(col, title)\n report.write_plot(full_title)\n plt.title(full_title)\n plt.show();\n plt.close();", "def grid_plot_gutenberg(proverbs_list, data, counts, begin_at =1800, end_at = 1950, bin_size = 20): \n \n plt.rcParams.update({\n 'font.size': 9,\n 'axes.titlesize': 8,\n 'axes.labelsize': 14,\n 'xtick.labelsize': 7,\n 'ytick.labelsize': 7,\n 'legend.fontsize': 10,\n })\n \n rows, cols = 4, 4\n fig = plt.figure(figsize=(12, 5.75))\n gs = gridspec.GridSpec(ncols=cols, nrows=rows)\n gs.update(wspace = 0.2, hspace = 0.2) \n \n \n i = 0\n \n fig.text(0.5, 0.02,'Year' , ha='center', fontsize=14)\n fig.text(0.02, 0.5, 'Frequency among all volumes in Gutenberg', va='center', rotation='vertical', fontsize=14)\n \n ts = data.copy()\n ts_bin = ts.groupby(lambda x: (x//bin_size)*bin_size).sum()\n ts_norm = ts_bin.div(ts_bin['num_books'], axis=0)\n ts_norm = ts_norm.fillna(0)\n ts = ts_norm.truncate(before = begin_at, after = end_at)[proverbs_list]\n\n #loop to create each timeseries plot in the grid\n for r in np.arange(0, rows, step=1):\n for c in np.arange(cols):\n\n ts2 = ts[proverbs_list[i]].to_frame()\n\n ax = fig.add_subplot(gs[r, c])\n\n ax.text(0.1,0.9,'\\\"{}\\\"'.format(proverbs_list[i]),horizontalalignment='left', transform=ax.transAxes)\n\n ax.plot(ts2.index, ts2[proverbs_list[i]], alpha = 0.5)\n i+=1\n \n plt.subplots_adjust(left=0.08, right=0.95, top=0.95, bottom=0.1)", "def _bar_example_3(quantity_by_fruit):\n ch = chartify.Chart(blank_labels=True, y_axis_type=\"categorical\")\n ch.set_title(\"Horizontal bar plot\")\n ch.set_subtitle(\"Horizontal with color grouping\")\n ch.plot.bar(\n data_frame=quantity_by_fruit,\n categorical_columns=\"fruit\",\n numeric_column=\"quantity\",\n color_column=\"fruit\",\n )\n ch.show(_OUTPUT_FORMAT)", "def bar_plot(df, field_name, graph_title, threshold_value, x_axis_label, y_axis_label):\n\n x = df[field_name].value_counts().sort_values()\n x[x > threshold_value].plot(kind='barh', figsize=(12, 8), title=graph_title, x=x_axis_label, y=y_axis_label)\n return", "def visualize_tgt_by_categorical(df, var, target):\n import seaborn as sns\n import matplotlib.pyplot as plt\n import pandas as pd\n \n plt.figure(figsize=(10,5))\n \n grouped_values = df.groupby(var)[target].mean().sort_values(ascending = False).reset_index()\n\n sns.set(style = 'white')\n sns.barplot(x = var, y = target, data = grouped_values, palette = sns.color_palette(\"RdBu\", n_colors = 7))\n\n return plt.show()", "def plot_class_distribution(labels):\n num_classes = get_num_classes(labels)\n count_map = Counter(labels)\n counts = [count_map[i] for i in range(num_classes)]\n idx = np.arange(num_classes)\n plt.bar(idx, counts, width=0.8, color='b')\n plt.xlabel('Class')\n plt.ylabel('Number of samples')\n plt.title('Class distribution')\n plt.xticks(idx, idx)\n plt.show()", "def grant_outcomes_barchart(dframe):\n # prepare dataframe\n dframe = df.copy()\n dframe.columns = [col.lower().replace(' ','_') for col in dframe.columns]\n dframe = dframe[dframe['organization_name'].notnull()]\n dframe.drop(['thank_you_sent','report_due','report_sent'],axis=1,\n inplace=True)\n dframe.set_index(dframe['date_application_sent'],inplace=True)\n\n grant_stage = []\n [grant_stage.append(status.lower().strip()) for status in dframe.stage]\n dframe['stage'] = grant_stage\n grant_status = [] # merge status to 3 primary categories, make 'awarded' tag\n for status in dframe.stage:\n if status not in ['obligations complete','pledged','posted']:\n grant_status.append(status)\n else:\n grant_status.append('awarded')\n dframe['grant_status'] = grant_status\n\n # create chart\n color_dict = {'awarded':'#adebad','not approved':'#d6746f',\n 'submitted':'#ffffb3'}\n grant_count_trace = []\n for status in dframe.grant_status.unique():\n grant_count_trace.append(go.Bar(\n x = dframe[dframe.grant_status==status].resample('Q')['stage'].count().index,\n y = dframe[dframe.grant_status==status].resample('Q')['stage'].count(),\n name = status,\n marker = {'color':color_dict[status]},\n opacity = .8))\n\n layout = {'barmode':'stack',\n 'hovermode':'closest',\n 'paper_bgcolor':'#303939',\n 'plot_bgcolor':'#303939',\n 'legend':{'font':{'color':'#CCCCCC'}},\n 'yaxis':{'title':'no. applications',\n 'tickfont':{'color':'#CCCCCC'},\n 'titlefont':{'color':'#CCCCCC'},\n 'showgrid':False},\n 'xaxis':{'title':'quarter submitted',\n 'titlefont':{'color':'#CCCCCC'},\n 'tickfont': {'color':'#CCCCCC'}},\n 'title':'Grant Application<br>Status Overview',\n 'titlefont':{'color':'#CCCCCC'}}\n\n fig = {'data':grant_count_trace, 'layout':layout}\n return fig", "def visualize_confidence_level(prediction_proba):\n data = (prediction_proba[0]*100).round(2)\n grad_percentage = pd.DataFrame(data = data,columns = ['Porcentage'],index = ['Est','Int','Int_Est','Rob','Rob_Est','Rob_Int','Rob_Int_Est'])\n ax = grad_percentage.plot(kind='barh', figsize=(7, 4), color='#0067e7', zorder=10, width=0.8)\n ax.legend().set_visible(False)\n ax.set_xlim(xmin=0, xmax=100)\n \n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.spines['left'].set_visible(True)\n ax.spines['bottom'].set_visible(True)\n\n ax.tick_params(axis=\"both\", which=\"both\", bottom=\"off\", top=\"off\", labelbottom=\"on\", left=\"off\", right=\"off\", labelleft=\"on\")\n \n vals = ax.get_xticks()\n for tick in vals:\n ax.axvline(x=tick, linestyle='dashed', alpha=0.4, color='#eeeeee', zorder=1)\n\n ax.set_xlabel(\" Porcentage(%) Nivel de confianza\", labelpad=2, weight='bold', size=12)\n ax.set_ylabel(\"Victimización\", labelpad=10, weight='bold', size=12)\n ax.set_title('Nivel de confianza de la predicción ', fontdict=None, loc='center', pad=None, weight='bold')\n\n st.pyplot()\n \n return", "def makeComparsionChart(columns, data):\n fig = plt.figure(figsize=(16, 10))\n gs = gridspec.GridSpec(2, 3, wspace = 0.2, hspace=0.2, right=0.96, left=0.04)\n ax1 = plt.subplot(gs[0, 0:1], label=\"\")\n ax2 = plt.subplot(gs[0, 1:2], label=\"\" )\n ax3 = plt.subplot(gs[0, 2:3], label=\"\" )\n ax4 = plt.subplot(gs[1, 0:1], label=\"\" )\n ax5 = plt.subplot(gs[1, 1:2], label=\"\" )\n ax1.set_title('Before Scaling')\n ax2.set_title('After Standard Scaler')\n ax3.set_title('After Min-Max Scaler')\n ax4.set_title('After Roboust Scaler')\n ax5.set_title('After Normalization')\n\n for column in columns:\n sns.kdeplot(data[0][column], ax=ax1, legend=False)\n sns.kdeplot(data[1][column], ax=ax2, legend=False)\n sns.kdeplot(data[2][column], ax=ax3, legend=False)\n sns.kdeplot(data[3][column], ax=ax4, legend=False)\n sns.kdeplot(data[4][column], ax=ax5, legend=False)\n\n plt.show()", "def figure_2(df):\n\n sns.set(style=\"white\", palette=sns.color_palette(\"cubehelix\", 6))\n f, axes = plt.subplots(4, 1, figsize=(6, 9)) # , sharex=True)\n sns.despine(top=True, bottom=True)\n # f.suptitle(\"Etiology of Central Events, Grouped by %Central Events\")\n\n OSA_pure_hist = replace_etiology_labels(histo_dx_includes(df.loc[df['BaseDx'] == \"Mainly OSA\"], return_df=True).sort_values(\"Dx\"))\n OSA_predom_hist = replace_etiology_labels(histo_dx_includes(df.loc[df['BaseDx'] == \"Combined OSA/CSA\"], return_df=True).sort_values(\"Dx\"))\n CSA_predom_hist = replace_etiology_labels(histo_dx_includes(df.loc[df['BaseDx'] == \"Predominantly CSA\"], return_df=True).sort_values(\"Dx\"))\n CSA_pure_hist = replace_etiology_labels(histo_dx_includes(df.loc[df['BaseDx'] == \"Pure CSA\"], return_df=True).sort_values(\"Dx\"))\n\n # Create count plot for each #CSA on the left, then a Pie Chart with proportion on the right\n\n hatches = ['///', 'xxx', '---', '', '']\n face_color = ['white', 'white', 'white', 'white', 'dimgrey']\n\n # Pure OSA\n bar = sns.barplot(x=\"Count\", y=\"Dx\", data=OSA_pure_hist, ax=axes[3])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[3].set(xlabel=\"Number of Patients\", ylabel=\"<10% CSA\")\n\n # Predom OSA\n bar = sns.barplot(x=\"Count\", y=\"Dx\", data=OSA_predom_hist, ax=axes[2])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[2].set(xlabel=\"\", ylabel=\"10-49.9% CSA\")\n\n # Predom CSA\n bar = sns.barplot(x=\"Count\", y=\"Dx\", data=CSA_predom_hist, ax=axes[1])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[1].set(xlabel=\"\", ylabel=\"50-90% CSA\")\n\n # Pure CSA\n bar = sns.barplot(x=\"Count\", y=\"Dx\", data=CSA_pure_hist, ax=axes[0])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[0].set(xlabel=\"\", ylabel=\">90% CSA\")\n\n # Combined X axis for L side\n axes[3].get_shared_x_axes().join(axes[3], axes[2], axes[1], axes[0])\n axes[0].set_xticklabels(\"\")\n axes[1].set_xticklabels(\"\")\n axes[2].set_xticklabels(\"\")\n # Leave bottom aka [3,0] labels in\n\n # Resize all\n axes[0].autoscale()\n axes[1].autoscale()\n axes[2].autoscale()\n axes[3].autoscale()\n\n f.tight_layout(rect=[0, 0, 1, 1]) # .95 to leave space for title\n f.savefig('Figure 2 - etio by perc csa', dpi=100)\n # plt.show()", "def exercise_2(self):\n region = self.countries_df[\"Region\"]\n ylabels = [s.rstrip() for s in region.unique().tolist() ]\n print(ylabels)\n\n # Create count plot with region on the y-axis\n ax = sns.countplot(y=region)\n ax.set_yticklabels(ylabels, rotation=0, fontsize=\"9\", va=\"center\")\n\n # Show plot\n plt.show()", "def grafCasosAtivos(tipPDF):\n\n\t\tdf = pd.read_csv('dadosPaises.csv') #dataFrame que recebe a tabela de dados\n\t\tdf = df.sort_values(by=['Casos Ativos'], ascending=False) #ordena pela coluna 'casos ativos' de forma descendente\n\t\tdf2 = df[:5] # Separa em um novo dataframe as 5 primeiras linhas\n\n\t\tfig, ax = plt.subplots()\n\n\t\tindex = np.arange(len(df2['Casos Ativos'])) #indice para definir as posicoes dos labels\n\t\tbar_width = 0.45 #grossura da barra\n\t\tscore_label = np.arange(0,50000,5000) #array com volares a serem apresentados no eixo y\n\t\tbar = ax.bar(index, df2['Casos Ativos'], bar_width, label='Casos Ativos',color='#ff4500')\n\t\t\n\t\t#setting axis labels\n\t\tax.set_xticks(index) #distancia entre barras\n\t\tax.set_xticklabels(df2['Dados do País']) #label das barras\n\t\tax.set_yticks(score_label) #distancia entre marcas do eixo y\n\t\tax.set_yticklabels(score_label) #labels das marcas do eixo y\n\t\tax.legend() #legenda\n\n\t\t#ciclo para plotar os labels\n\t\tfor i in index:\n\t\t\tax.annotate('{}'.format(bar[i].get_height()), #string a ser plotada\n\t\t\t\t\t\txy=(bar[i].get_x(),bar[i].get_height()), #local da plotagem do label\n\t\t\t\t\t\txytext=(0,2), #movimentacao adicional do label\n\t\t\t\t\t\ttextcoords='offset points',\n\t\t\t\t\t\tva='bottom'\n\t\t\t\t\t\t)\n\n\t\tplt.title('Paises com mais casos ativos')\n\n\t\t#Se true, o tipo selecionado sera pdf, senao png\n\t\tif tipPDF:\n\t\t\tnomFig = 'grafico.pdf'\n\t\telse :\n\t\t\tnomFig = 'grafico.png'\t\n\t\tplt.savefig(nomFig) #salva a imagem (poder ser salvo como pdf mudando o nome do arquivo", "def plot_analysis(self, pw, ri):\r\n print(\"\\n\\n*************************\\n\\\r\n Analysing User's liked songs\\n*************************\")\r\n attributes = sum([[i]*4 for i in ri.columns],[])\r\n levels = pw.columns\r\n \r\n work_space = pd.DataFrame({\"attributes\":attributes, \"levels\":levels, \"part_worth\":pw.values[0]})\r\n\r\n print(\"\\nPart worth of levels:\")\r\n f = plt.figure(figsize=(10, 10))\r\n sns.barplot(y=\"levels\", x=\"part_worth\", data = work_space, hue=\"attributes\", \r\n dodge=False)\r\n plt.legend(bbox_to_anchor=(1, 1), loc=2)\r\n plt.show()\r\n \r\n print(\"\\nRelative importance of attributes:\")\r\n relative_imp = pd.DataFrame({\"attributes\":ri.columns, \"relative_importance\":ri.values[0]})\r\n f = plt.figure(figsize=(8, 8))\r\n sns.barplot(x=\"attributes\", y=\"relative_importance\", data = relative_imp, dodge=False)\r\n plt.xticks(rotation=45)\r\n plt.show()", "def plots(corpus_parts, corpus):\n \"\"\"\n given the data obtained by the function percentage_hapaxes(dv_corpus, tokenized_corpus),\n the graphic for the percentage of hapaxes per partition is plotted\n \"\"\"\n h_parts = hapaxes_parts(corpus_parts)\n part_size = [x for x in range(len(h_parts))]\n \n percent_h = percentage_hapaxes(corpus_parts, corpus)\n percent_length = [i for i in range(len(percent_h))] \n \n fig, (ax1, ax2) = plt.subplots(1, 2)\n plt.setp(ax1, xticks=np.arange(0, len(part_size), 1))\n plt.setp(ax2, xticks=np.arange(0, len(percent_length), 1))\n fig.suptitle('Number (left) and percentage (right) of hapaxes in each part')\n ax1.bar(part_size, h_parts)\n ax2.bar(percent_length, percent_h) \n return plt.show()", "def prop_types(houses:pd.DataFrame) -> None:\n sns.set_style('whitegrid')\n indexNames= houses[houses['PRICE'] >= 3000000].index\n houses= houses.drop(indexNames)\n \n ax= sns.catplot(x= 'PROPERTY_TYPE', y= 'PRICE', kind= 'box', data= houses)\n ax.set_xticklabels(rotation=30)\n plt.tight_layout()\n plt.show()\n \n ax= sns.countplot(x= 'PROPERTY_TYPE', data= houses)\n ax.set_xticklabels(ax.get_xticklabels(), rotation= 30, ha=\"right\", fontsize=9)\n plt.show()", "def plot_scenario_distribution(self):\n x = self.arms\n\n y = self.df.groupby('price').mean().Converted[x]\n y_sex_0 = self.df[self.df.Sex == 0].groupby('price').mean().Converted[x]\n y_sex_1 = self.df[self.df.Sex == 1].groupby('price').mean().Converted[x]\n y_age_0 = self.df[self.df.Under_30 == 0].groupby('price').mean().Converted[x]\n y_age_1 = self.df[self.df.Under_30 == 1].groupby('price').mean().Converted[x]\n\n fig, ax_list = plt.subplots(2,1, figsize=(12, 9))\n\n for ax in ax_list:\n ax.grid(alpha=0.3, linestyle='--')\n\n ax.set_ylim(bottom=0, top=0.6)\n ax.set_xlim(left=50, right=104)\n\n ax.set_xlabel(\"Price\", fontsize=14)\n ax.set_ylabel(\"Conversion Rate\", fontsize=14)\n\n ax.set_xticks(self.arms)\n ax.set_xticklabels(self.arms.astype(np.int64), fontsize=12, alpha=0.7)\n ax.set_yticks(np.linspace(0, 0.7, 8))\n ax.set_yticklabels([str((i * 100).astype(np.int64)) + \"%\" for i in np.linspace(0, 0.7, 8)], fontsize=12, alpha=0.7)\n\n ax.spines['right'].set_alpha(0)\n ax.spines['left'].set_alpha(0.3)\n ax.spines['top'].set_alpha(0)\n ax.spines['bottom'].set_alpha(0.3)\n\n ax_list[0].plot(x, y, label='Global')\n ax_list[0].plot(x, y_sex_0, label='Male', color='moccasin')\n ax_list[0].plot(x, y_sex_1, label='Female', color='darkorange')\n\n ax_list[1].plot(x, y, label='Global')\n ax_list[1].plot(x, y_age_0, label='Under 30', color='red')\n ax_list[1].plot(x, y_age_1, label='Over 30', color='darkred')\n\n ax_list[0].legend()\n ax_list[1].legend()\n\n fig.suptitle(\"Conversion Rate\", fontsize=22)\n\n fig.show()\n\n plt.savefig('chapter5_pricing.png')", "def dataframe_to_language_stats(dataframe, n_runs, n_batches, n_gens, possible_form_lengths):\n if len(possible_form_lengths) == 1:\n n_language_classes = 4\n else:\n n_language_classes = 7 #TODO: or should this be 6 (i.e. collapsing the two different reduplication strategies?)\n proportion_column = np.array(dataframe['proportion'])\n proportion_column_as_results = proportion_column.reshape((n_runs*n_batches, n_gens, n_language_classes))\n return proportion_column_as_results", "def plot_model_rates(class_name, model, ax):\n true_positives, totals = model.range_metrics[class_name]\n prob_rates = model.class_prob_rates[class_name]\n\n bins = np.arange(5)\n\n # color bars based on freq.\n # norm = plt.Normalize(0, max(totals))\n # colors = mpl.cm.Blues(norm(totals))\n\n ax.bar(bins, prob_rates, color=P_BAR_COLOR, edgecolor=BAR_EDGE_COLOR)\n ax.set_ylim(0, 1)\n for axis in ['top', 'bottom', 'left', 'right']:\n ax.spines[axis].set_linewidth(1.5)\n\n index = 0\n\n for xy in zip(np.arange(5), prob_rates):\n # Get class count of current index\n count = str(totals[index])\n loc = list(xy)\n # lower annotation, so its not out of the plot for large bars\n if loc[1] > .9:\n xy = tuple([loc[0], loc[1] - .1])\n y_val = xy[1]\n ax.annotate(count, xy=xy, textcoords='data', ha='center',\n va='bottom', fontsize=8)\n index += 1", "def run_and_plot(self):\n self.raw_processing()\n self.data_averaging_and_cleaning()\n\n print(self.organized_names)\n print(self.organized_film)\n print(self.organized_plank)\n\n height = self.organized_film\n bars = tuple(self.organized_names.copy())\n y_pos = np.arange(len(bars))\n\n plt.bar(y_pos, height)\n plt.xticks(y_pos, bars)\n plt.xlabel('TH% in 100ul water/TH mixture')\n plt.ylabel('CFU/mL count')\n plt.title('Experiment 2.5 (Sucrose Concentration) 7 Aug 2018')\n\n plt.show()\n\n height2 = self.organized_plank\n\n plt.bar(y_pos, height2)\n plt.xticks(y_pos, bars)\n plt.xlabel('TH% in 100ul water/TH mixture')\n plt.ylabel('Proportion of Biofilm CFUs to Planktonic CFUs')\n plt.title('Experiment 2.5 (Sucrose Concentration) 7 Aug 2018')\n\n plt.show()", "def value_count_bottom(df, cat_features, bottom = 10, save_plot = False, path_dir = None ):\n cat_features = list(set(cat_features))\n cols = cat_features\n if len(cols) != 0:\n for i, col in sorted(list(enumerate(cols)), key=lambda x: x[1]):\n fig, ax = plt.subplots()\n fig.set_size_inches(4.5, 5.5)\n fig.set_size_inches(4, 4)\n ax = df[col].value_counts()[-bottom:].plot(kind='barh')\n plt.title(str(\"Distribution of BOTTOM \"+str(bottom)+ \" \" + col), fontsize=10)\n plt.show(block=False)\n if save_plot == True:\n plt.savefig((plot_dir + \"bottom_\"+str(bottom)+\"_value_count_ordinal.png\"))\n plt.clf()\n else:\n print(\"No categorial features to plot\")", "def plot_results(self):\n #get data\n new_df = self.solution.groupby('supplier').sum()\n new_df['count'] = self.solution.groupby('supplier').supplier.count()\n new_df = new_df.sort_values(by=['count'], ascending = False)\n \n #plotting \n ax = new_df.plot.bar(secondary_y = 'cost', rot=90)\n ax.legend(loc='upper left', bbox_to_anchor=(0., 1.11, 1., .102))\n ax.right_ax.legend(loc='upper right', bbox_to_anchor=(0., 1.11, 1., .102))\n ax.set_ylabel('count')\n ax.set_xlabel('supplier name')\n ax.right_ax.set_ylabel('cost')\n plt.tight_layout()", "def plot_avgs(li,arg):\n key = {'Word':1,'Paragraph':2}\n n_groups = len(article_list)\n en = []\n simple = []\n for sub_li in li:\n en.append(sub_li[1][key[arg]])\n simple.append(sub_li[2][key[arg]])\n\n fig, ax = plt.subplots()\n index = np.arange(n_groups)\n bar_width = 0.35\n\n rects1 = plt.bar(index, en, bar_width, alpha = 1, color = 'b', label = 'English')\n rects2 = plt.bar(index + bar_width, simple, bar_width, alpha = 1, color = 'r', label = 'Simple English')\n\n plt.xlabel('Article')\n plt.ylabel('Average Word Length')\n plt.title('Average ' + arg + ' Length of Simple English and English')\n plt.xticks(index + bar_width, article_list)\n plt.legend()\n\n plt.tight_layout()\n plt.show()", "def value_counts_plot(df):\n \n plt.figure(figsize=(15,10))\n \n #get rid of sort_index() to change the graph\n return df.value_counts().sort_index().plot(kind='bar')", "def create_grouped_bar_chart(stats: dict[str, list[int]]):\n\n figure, axes = plot.subplots()\n\n labels = [str(e) for e in CauseOfDeath]\n x = numpy.arange(len(labels))\n\n bar_width = 0.15\n max_value = 0\n\n rects = []\n i = 0\n for label, values in stats.items():\n max_value = max(max_value, max(values))\n rects.append(axes.bar(x + (i * bar_width), values, bar_width, label=label))\n i = i + 1\n\n axes.set_title(\"Deaths arranged by cause and animal type\")\n axes.set_ylabel(\"Amount\")\n axes.set_xticks(x)\n axes.set_xticklabels(labels)\n axes.legend()\n\n for rect in rects:\n attach_text_labels(rect, axes)\n\n figure.tight_layout()\n return figure", "def multiple_bars(self, df, nrows, ncols, dict):\n fig, axs = plt.subplots(nrows=8, ncols=1, figsize=(6, 9.3), sharey=\"row\")\n\n fig.subplots_adjust(left=0.03, right=0.97, hspace=0.3, wspace=0.05)\n\n indexes = df.index.tolist()\n df[\"index\"] = indexes\n df[\"effect_size\"] = df[\"index\"].apply(lambda x: x[0])\n df[\"sd\"] = df[\"index\"].apply(lambda x: x[1])\n df[\"group\"] = df[\"index\"].apply(lambda x: x[2])\n bar_width = 0.35\n # get an index to set the ticks for the x axis\n\n df_new = df.groupby(\"sd\")\n # for key, item in df_new:\n # print(df_new.get_group(key))\n for ax, (sd, dat) in zip(axs, df_new):\n n_groups = len(dat.index)\n index = np.arange(n_groups)\n\n # make barchart for permutation test\n bar1 = ax.bar(index, dat[\"perm\"], bar_width, color='b',\n label='Permutation test')\n # make barchart for t-test\n bar2 = ax.bar(index + bar_width, dat[\"t_test\"], bar_width, color='r',\n label='t-test')\n es = dat[\"effect_size\"].iloc[0]\n\n ax.set_ylabel(\"Error\")\n ax.set_xticks(index + bar_width / 2)\n ax.set_xticklabels(dict[\"xtickslabels\"])\n ax.set_xlabel(f\"Mean error for sd = {sd} per group size\")\n print(dat[\"sig\"])\n print(\"\\n\\n\")\n for rect, i in zip(bar1 + bar2, dat[\"sig\"]):\n height = rect.get_height()\n if i:\n ax.text(rect.get_x() + rect.get_width(), height, \"**\", ha='center', va='bottom')\n\n ax.legend()\n\n fig.suptitle(f\"Effect size = {es}\", y=1.0, fontsize = 15)\n fig.tight_layout()\n plt.show()", "def plot_barplot(\n df: pd.DataFrame,\n feature: str,\n length: int = None,\n cutoff: float = None,\n figsize: tuple = (5, 10),\n ticksize: int = 15,\n) -> pd.DataFrame:\n\n # count each category within the feature\n count = df.groupby(f\"{feature}\")[f\"{feature}\"].count().sort_values(ascending=False)\n # convert it into a dataframe\n df_count = pd.DataFrame(columns=([\"counts\"]))\n df_count.counts = count\n\n # plot barplot\n plt.figure(figsize=figsize)\n x = df_count.counts[:length]\n y = df_count.index[:length]\n b = sns.barplot(x=x, y=y)\n b.set_yticklabels(df_count.index[:length], size=ticksize)\n # add a cut off line\n if cutoff:\n plt.axhline(y=cutoff, linestyle=\"--\")\n plt.show()\n return df_count", "def graph_decade_count(df):\r\n # set the visual features of the graph\r\n sns.set(font_scale=2)\r\n sns.set_style(\"darkgrid\")\r\n fig, ax = plt.subplots()\r\n fig.set_size_inches(15, 10)\r\n ax.set_title(\"Decade Police Deaths\")\r\n # create the graph of the data\r\n plot = sns.barplot(\"decade\", \"count\", data=df, palette=\"winter_d\")\r\n # plt.show()\r\n # save the graph as an image\r\n fig.savefig(\"1_graph_decade_count.png\")", "def num_of_cat2_per_cat1(df, cat1, cat2, figsize=(12,5), normalize = False, num_label = 1, save_plot = False, path_dir = None ):\n # Group by category #1 and counts the unique values of category #2 for each group\n comp_count = df.groupby(cat1)[cat2].nunique().sort_values(ascending=False)\n if (normalize == True):\n comp_count = comp_count*100.0/(comp_count.sum())\n # Bar plot\n plt.figure(figsize=figsize)\n \n plot = sns.barplot(comp_count.index, comp_count.values, alpha=0.8)\n if (normalize == True):\n plt.ylabel(str('Number of ' + cat2 + ' [%]'), fontsize=12)\n plt.title(str('Percentage of '+ cat2+ ' per '+ cat1))\n else:\n plt.ylabel(str('Number of ' + cat2), fontsize=12)\n plt.title(str('Number of '+ cat2+ ' per '+ cat1))\n plt.xlabel(cat1, fontsize=12)\n plt.xticks(rotation=90)\n for ind, label in enumerate(plot.get_xticklabels()):\n if ind % num_label == 0: # every 15th label is kept\n label.set_visible(True)\n else:\n label.set_visible(False)\n plt.show()\n if save_plot == True:\n plt.savefig((plot_dir + \"count_of\"+str(cat1)+\"per _\"+str(cat2)+\".png\"))\n plt.clf()", "def graph_year_state_count(df):\r\n # set the visual features of the graph\r\n sns.set(font_scale=2)\r\n sns.set_style(\"darkgrid\")\r\n fig, ax = plt.subplots()\r\n fig.set_size_inches(20, 10)\r\n plt.xticks(rotation=45)\r\n ax.set_title(\"2001 and 2007 State Police Deaths\")\r\n # create the graph of the data\r\n plot = sns.barplot(\"state\", \"count\", data=df, palette=\"bone\", hue='year')\r\n # plt.show()\r\n # save the graph as an image\r\n fig.savefig(\"1_graph_top_state_count.png\")", "def grant_dollars_barchart(dframe):\n # prepare dataframe\n dframe = df.copy()\n dframe.columns = [col.lower().replace(' ','_') for col in dframe.columns]\n dframe = dframe[dframe['organization_name'].notnull()]\n dframe.drop(['thank_you_sent','report_due','report_sent'],axis=1,\n inplace=True)\n dframe.set_index(dframe['date_application_sent'],inplace=True)\n\n # create chart\n color_dict = {'awarded':'#adebad','not approved':'#d6746f',\n 'submitted':'#ffffb3'}\n grant_stage = []\n [grant_stage.append(status.lower().strip()) for status in dframe.stage]\n dframe['stage'] = grant_stage\n grant_status = [] # merge status to 3 primary categories, make 'awarded' tag\n for status in dframe.stage:\n if status not in ['obligations complete','pledged','posted']:\n grant_status.append(status)\n else:\n grant_status.append('awarded')\n dframe['grant_status'] = grant_status\n\n # create chart\n grant_outcomes_trace = []\n for status in dframe.grant_status.unique():\n # sum 'amount' column totals for awarded grants\n if status == 'awarded':\n grant_outcomes_trace.append((go.Bar(\n x = dframe[dframe.grant_status==status].resample('Q')['amount'].count().index,\n y = dframe[dframe.grant_status==status].resample('Q')['amount'].sum(),\n name = status,\n marker = {'color': color_dict[status]},\n opacity = .8)))\n\n else:\n # sum 'requested amount' column totals for submitted and not approved\n grant_outcomes_trace.append((go.Bar(\n x = dframe[dframe.grant_status==status].resample('Q')['requested_amount'].count().index,\n y = dframe[dframe.grant_status==status].resample('Q')['requested_amount'].sum(),\n name = status,\n marker = {'color': color_dict[status]},\n opacity = .8)))\n\n layout = {'barmode':'stack',\n 'hovermode':'closest',\n 'legend': {'font': {'color': '#CCCCCC'}},\n 'paper_bgcolor': '#303939',\n 'plot_bgcolor': '#303939',\n 'yaxis':\n {'title':'US$',\n 'tickfont':{'color':'#CCCCCC'},\n 'titlefont': {'color':'#CCCCCC'},\n 'showgrid':False},\n 'xaxis':{'title':'quarter submitted',\n 'titlefont': {'color':'#CCCCCC'},\n 'tickfont': {'color':'#CCCCCC'}},\n 'title':'Grant Application<br>Outcomes Overview',\n 'titlefont': {'color':'#CCCCCC'}}\n\n fig = {'data':grant_outcomes_trace,'layout':layout}\n return fig", "def plot_type_of_topic(data_frame: pb.DataFrame) -> None:\n plt.interactive(False)\n plt.figure()\n data_frame.plot(kind='bar', x= data_frame['TopicID'])\n plt.show()", "def visualize_label_distributions(data, signnames):\n n_classes = len(signnames)\n n_splits = len(data)\n fig = plt.figure(figsize = (10,10))\n ax = fig.add_subplot(111)\n width = 0.8 / n_splits\n for i, split in enumerate(data.keys()):\n X, y = data[split]\n labels, counts = np.unique(y, return_counts = True)\n fraction = counts / y.shape[0]\n ax.bar(labels + i*width, fraction, width, label = split)\n ax.hlines(1.0/n_classes, *(ax.get_xlim()), label = \"uniform distribution\")\n ax.set_xlabel(\"label\")\n ax.set_ylabel(\"fraction\")\n ax.set_title(\"Label Distributions\")\n ax.legend()\n fig.savefig(os.path.join(img_dir, \"class_distributions.png\"))", "def performanceBarCharts(): \n ##tauopathy HCS pearson\n plt.cla()\n plt.clf()\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"ML Model\", \"Null YFP Model\", \"Null DAPI Model\"]\n ml_model_perf = pickle.load(open(\"pickles/ml_model_perf.pkl\", \"rb\"))\n null_model_perf = pickle.load(open(\"pickles/null_model_perf.pkl\", \"rb\"))\n null_dapi_perf = pickle.load(open(\"pickles/single_channel_DAPI_null_model_perf.pkl\", \"rb\"))\n y= np.array([ml_model_perf[0], null_model_perf[0], null_dapi_perf[0]]).round(decimals=2)\n stds = [ml_model_perf[1], null_model_perf[1], null_dapi_perf[1]]\n x = [1, 2, 3]\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=['red', 'gold', 'blue'], zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .20, j +.03),fontsize=12, fontname=\"Times New Roman\")\n plt.title(\"Pearson Performance\",fontname=\"Times New Roman\", fontsize=14)\n ax.set_ylabel(\"Pearson Correlation Coefficient\", fontname=\"Times New Roman\", fontsize=12)\n plt.yticks(fontname=\"Times New Roman\", fontsize=12)\n ax.set_xticklabels(xlabels,fontsize=12, fontname=\"Times New Roman\")\n ax.set_ylim((0,1))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(3))\n plt.savefig(\"matplotlib_figures/tau_performance_pearson_special_HCS_model.png\", dpi=300)\n\n ##tauopathy HCS MSE\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"ML Model\", \"Null YFP Model\", \"Null DAPI Model\"]\n ml_model_perf = pickle.load(open(\"pickles/ml_model_mse_perf.pkl\", \"rb\"))\n null_model_perf = pickle.load(open(\"pickles/null_model_mse_perf.pkl\", \"rb\"))\n null_dapi_perf = pickle.load(open(\"pickles/single_channel_DAPI_null_model_mse_perf.pkl\", \"rb\"))\n y= np.array([ml_model_perf[0], null_model_perf[0], null_dapi_perf[0]]).round(decimals=2)\n stds = [ml_model_perf[1], null_model_perf[1], null_dapi_perf[1]]\n x = [1, 2, 3]\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=['red', 'gold', 'blue'], zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .20, j +.03),fontsize=12, fontname=\"Times New Roman\")\n plt.title(\"MSE Performance\",fontname=\"Times New Roman\", fontsize=14)\n ax.set_ylabel(\"MSE\", fontname=\"Times New Roman\", fontsize=12)\n plt.yticks(fontname=\"Times New Roman\", fontsize=12)\n ax.set_xticklabels(xlabels,fontsize=12, fontname=\"Times New Roman\")\n ax.set_ylim((0,2))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(3))\n plt.savefig(\"matplotlib_figures/tau_performance_mse_special_HCS_model.png\", dpi=300)\n\n ##osteosarcoma 3-fold (raw images) pearson\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"ML Model\", \"Null Model\"]\n x = [1, 2]\n ys = []\n nulls = []\n for fold in [1,2,3]:\n osteo_ml_perf = pickle.load(open(\"pickles/osteo_ml_model_perf_fold_{}.pkl\".format(fold), \"rb\"))\n osteo_null_perf = pickle.load(open(\"pickles/osteo_null_model_perf_fold_{}.pkl\".format(fold), \"rb\"))\n ys.append(osteo_ml_perf)\n nulls.append(osteo_null_perf) \n y = np.array([np.mean([result[0] for result in ys]), np.mean([result[0] for result in nulls])]).round(decimals=2)\n stds = [0.075, 0.1156] ##see https://www.statstodo.com/CombineMeansSDs_Pgm.php\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=['red', 'blue'], zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .16, j +.03),fontsize=16, fontname=\"Times New Roman\")\n plt.title(\"Pearson Performance with Raw Hoechst Images\",fontname=\"Times New Roman\", fontsize=20, y=1.02)\n ax.set_ylabel(\"Pearson Correlation Coefficient\", fontname=\"Times New Roman\", fontsize=18)\n plt.yticks(fontname=\"Times New Roman\", fontsize=18)\n ax.set_xticklabels(xlabels,fontsize=18, fontname=\"Times New Roman\")\n ax.set_ylim((0,1))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(2))\n plt.savefig(\"matplotlib_figures/osteosarcoma_performance_pearson_cross_val.png\", dpi=300)\n\n ##osteosarcoma 3-fold (raw images) MSE\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"ML Model\", \"Null Model\"]\n x = [1, 2]\n ys = []\n nulls = []\n for fold in [1,2,3]:\n osteo_ml_perf = pickle.load(open(\"pickles/osteo_ml_model_mse_perf_fold_{}.pkl\".format(fold), \"rb\"))\n osteo_null_perf = pickle.load(open(\"pickles/osteo_null_model_mse_perf_fold_{}.pkl\".format(fold), \"rb\"))\n ys.append(osteo_ml_perf)\n nulls.append(osteo_null_perf) \n y = np.array([np.mean([result[0] for result in ys]), np.mean([result[0] for result in nulls])]).round(decimals=2)\n stds = [0.15, .2312] ##see https://www.statstodo.com/CombineMeansSDs_Pgm.php\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=['red', 'blue'], zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .16, j +.03),fontsize=16, fontname=\"Times New Roman\")\n plt.title(\"MSE Performance with Raw Hoechst Images\",fontname=\"Times New Roman\", fontsize=20, y=1.01)\n ax.set_ylabel(\"MSE\", fontname=\"Times New Roman\", fontsize=18)\n plt.yticks(fontname=\"Times New Roman\", fontsize=18)\n ax.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))\n ax.set_xticklabels(xlabels,fontsize=18, fontname=\"Times New Roman\")\n ax.set_ylim((0,2))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(2))\n plt.savefig(\"matplotlib_figures/osteosarcoma_performance_mse.png\", dpi=300)\n\n ##osteosarcoma 3-fold (ablated image training) pearson\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"ML Model\", \"Null Model\"]\n x = [1, 2]\n ys = []\n nulls = []\n for fold in [1,2,3]:\n osteo_ml_perf = pickle.load(open(\"pickles/osteo_ablated_ml_model_perf_fold_{}.pkl\".format(fold), \"rb\"))\n osteo_null_perf = pickle.load(open(\"pickles/osteo_ablated_null_model_perf_fold_{}.pkl\".format(fold), \"rb\"))\n ys.append(osteo_ml_perf)\n nulls.append(osteo_null_perf) \n y = np.array([np.mean([result[0] for result in ys]), np.mean([result[0] for result in nulls])]).round(decimals=2)\n stds = [.1288, .1385] ##see https://www.statstodo.com/CombineMeansSDs_Pgm.php\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=['red', 'blue'], zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .16, j +.03),fontsize=16, fontname=\"Times New Roman\")\n plt.title(\"Pearson Performance with\\n95% Ablated Hoechst Images\",fontname=\"Times New Roman\", fontsize=20, y=1.0)\n ax.set_ylabel(\"Pearson Correlation Coefficient\", fontname=\"Times New Roman\", fontsize=18)\n plt.yticks(fontname=\"Times New Roman\", fontsize=18)\n ax.set_xticklabels(xlabels,fontsize=18, fontname=\"Times New Roman\")\n ax.set_ylim((0,1))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(2))\n plt.savefig(\"matplotlib_figures/osteosarcoma_performance_pearson_trained_ablation_model.png\", dpi=300)\n\n ##osteosarcoma 3-fold (ablated image training) MSE\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"ML Model\", \"Null Model\"]\n x = [1, 2]\n ys = []\n nulls = []\n for fold in [1,2,3]:\n osteo_ml_perf = pickle.load(open(\"pickles/osteo_ablated_ml_model_mse_perf_fold_{}.pkl\".format(fold), \"rb\"))\n osteo_null_perf = pickle.load(open(\"pickles/osteo_ablated_null_model_mse_perf_fold_{}.pkl\".format(fold), \"rb\"))\n ys.append(osteo_ml_perf)\n nulls.append(osteo_null_perf) \n y = np.array([np.mean([result[0] for result in ys]), np.mean([result[0] for result in nulls])]).round(decimals=2)\n stds = [.2576, .2771] ##see https://www.statstodo.com/CombineMeansSDs_Pgm.php\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=['red', 'blue'], zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .16, j +.03),fontsize=16, fontname=\"Times New Roman\")\n plt.title(\"MSE Performance with\\n95% Ablated Hoechst Images\",fontname=\"Times New Roman\", fontsize=20, y=1.0)\n ax.set_ylabel(\"MSE\", fontname=\"Times New Roman\", fontsize=18)\n plt.yticks(fontname=\"Times New Roman\", fontsize=18)\n ax.set_xticklabels(xlabels,fontsize=18, fontname=\"Times New Roman\")\n ax.set_ylim((0,2))\n ax.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(2))\n plt.savefig(\"matplotlib_figures/osteosarcoma_performance_MSE_trained_ablation_model.png\", dpi=300)\n\n ##supplemental single channel learning YFP and DAPI performance\n plt.cla()\n plt.clf()\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"YFP-tau to AT8-pTau\", \"DAPI to AT8-pTau\"]\n YFP_ml_model = pickle.load(open(\"pickles/single_channel_YFP_ml_model_perf.pkl\", \"rb\"))\n DAPI_ml_model = pickle.load(open(\"pickles/single_channel_DAPI_ml_model_perf.pkl\", \"rb\"))\n y = np.array([YFP_ml_model[0], DAPI_ml_model[0]]).round(decimals=2)\n stds = [YFP_ml_model[1], DAPI_ml_model[1]]\n x = [1, 2]\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=\"cornflowerblue\", zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .20, j +.03),fontsize=12, fontname=\"Times New Roman\")\n plt.title(\"Pearson Performance with\\nSingle Channel Input Learning\",fontname=\"Times New Roman\", fontsize=17, y=1.01)\n ax.set_xlabel(\"Model\", fontname=\"Times New Roman\", fontsize=14)\n ax.set_ylabel(\"Pearson Correlation Coefficient\", fontname=\"Times New Roman\", fontsize=14)\n plt.yticks(fontname=\"Times New Roman\", fontsize=14)\n ax.set_xticklabels(xlabels,fontsize=14, fontname=\"Times New Roman\")\n ax.set_ylim((0,1))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(2))\n plt.savefig(\"matplotlib_figures/supplemental_single_channel_learning.png\", dpi=300)\n\n ##supplemental single channel learning YFP and DAPI, input similarity to prediction\n plt.cla()\n plt.clf()\n width = .50\n fig, ax = plt.subplots()\n xlabels = [\"null\", \"YFP-tau to AT8-pTau\", \"DAPI to AT8-pTau\"]\n y = np.array([0.94894628, 0.98718720]).round(decimals=2)\n stds = [0.1673864, 0.039042]\n x = [1, 2]\n rects = ax.bar(x, y, width, yerr=stds, capsize=3, error_kw=dict(lw=1, capsize=3, capthick=1), color=\"orange\", zorder=3)\n for i,j in zip(x, y):\n ax.annotate(str(j)[0:4],xy=(i - .20, j +.03),fontsize=12, fontname=\"Times New Roman\")\n plt.title(\"Pearson Similarity Between\\nInput Channel and Predicted Channel\",fontname=\"Times New Roman\", fontsize=17)\n ax.set_xlabel(\"Model\", fontname=\"Times New Roman\", fontsize=14)\n ax.set_ylabel(\"Pearson Correlation Coefficient\", fontname=\"Times New Roman\", fontsize=14)\n plt.yticks(fontname=\"Times New Roman\", fontsize=14)\n ax.set_xticklabels(xlabels,fontsize=14, fontname=\"Times New Roman\")\n ax.set_ylim((0,1.13))\n ax.yaxis.grid(True, linestyle='-', which='major', color='grey', alpha=.25, zorder=0)\n ax.xaxis.set_major_locator(plt.MaxNLocator(2))\n plt.savefig(\"matplotlib_figures/supplemental_single_channel_learning_pearson_similarity_input_and_predicted.png\", dpi=300)", "def plotdFvsLambda2(nb=10):\n x = numpy.arange(len(df_allk))\n if len(x) < nb:\n return\n xs = numpy.array_split(x, len(x)/nb+1)\n mnb = max([len(i) for i in xs])\n fig = pl.figure(figsize = (8,6))\n width = 1./(len(P.methods)+1)\n elw = 30*width\n colors = {'TI':'#C45AEC', 'TI-CUBIC':'#33CC33', 'DEXP':'#F87431', 'IEXP':'#FF3030', 'GINS':'#EAC117', 'GDEL':'#347235', 'BAR':'#6698FF', 'UBAR':'#817339', 'RBAR':'#C11B17', 'MBAR':'#F9B7FF'}\n ndx = 1\n for x in xs:\n lines = tuple()\n ax = pl.subplot(len(xs), 1, ndx)\n for name in P.methods:\n y = [df_allk[i][name]/P.beta_report for i in x]\n ye = [ddf_allk[i][name]/P.beta_report for i in x]\n line = pl.bar(x+len(lines)*width, y, width, color=colors[name], yerr=ye, lw=0.05*elw, error_kw=dict(elinewidth=elw, ecolor='black', capsize=0.5*elw))\n lines += (line[0],)\n for dir in ['left', 'right', 'top', 'bottom']:\n if dir == 'left':\n ax.yaxis.set_ticks_position(dir)\n else:\n ax.spines[dir].set_color('none')\n pl.yticks(fontsize=10)\n ax.xaxis.set_ticks([])\n for i in x+0.5*width*len(P.methods):\n ax.annotate('$\\mathrm{%d-%d}$' % (i, i+1), xy=(i, 0), xycoords=('data', 'axes fraction'), xytext=(0, -2), size=10, textcoords='offset points', va='top', ha='center')\n pl.xlim(x[0], x[-1]+len(lines)*width + (mnb - len(x)))\n ndx += 1\n leg = ax.legend(lines, tuple(P.methods), loc=0, ncol=2, prop=FP(size=8), title='$\\mathrm{\\Delta G\\/%s\\/}\\mathit{vs.}\\/\\mathrm{lambda\\/pair}$' % P.units, fancybox=True)\n leg.get_frame().set_alpha(0.5)\n pl.savefig(os.path.join(P.output_directory, 'dF_state.pdf'), bbox_inches='tight')\n pl.close(fig)\n return", "def draw_bar(df=data):\n pt = {\n 1: 'Credit card',\n 2: 'Cash',\n 3: 'No charge',\n 4: 'Dispute',\n 5: 'Unknown',\n 6: 'Voided trip',\n }\n df['payment_type'] = df['payment_type'].replace(pt)\n gr = df.groupby(['payment_type', 'weekday']) \\\n .agg(total_amount=('total_amount', 'sum')) \\\n .reset_index(drop=False)\n return px.bar(gr, x='weekday', y='total_amount', color='payment_type', barmode='group') \\\n .update_layout(\n template='plotly_dark',\n plot_bgcolor='rgba(0, 0, 0, 0)',\n paper_bgcolor='rgba(0, 0, 0, 0)',\n )", "def leitner_proportions(df):\n denom = df.shape[0]\n prop_dict = {}\n\n for i in range(1,6):\n df_i = df[df['comfort_level'] == i]\n numer = df_i.shape[0]\n prop_dict[i] = numer / denom\n\n prop_df = pd.DataFrame.from_dict([prop_dict], orient='columns') \n\n prop_df = prop_df.T.rename(columns={0:'proportion'}) \n \n return prop_df", "def figure_size_resp_bms(df):\n sns.set_style('ticks')\n gs = GridSpec(2, 3)\n fig = plt.figure(figsize=(7, 8))\n axs = [fig.add_subplot(gs[0, 0]), fig.add_subplot(gs[0, 1]), fig.add_subplot(gs[0, 2]),\n fig.add_subplot(gs[1, :])]\n # fig, axs = plt.subplots(2, 2, figsize=(8, 6))\n # axs = axs.reshape(-1)\n\n sns.boxplot('genotype', 'area', hue='treatment', data=df, ax=axs[0], order=('wt', 'ko'), hue_order=('veh', 'bms'))\n axs[0].set_ylim((0, 2000000))\n axs[0].set_ylabel('Responsive area in µm²')\n sns.boxplot('genotype', 'max_df', hue='treatment', data=df, ax=axs[1], order=('wt', 'ko'), hue_order=('veh', 'bms'))\n axs[1].set_ylabel('Average peak response amplitude (%)')\n axs[1].set_ylim((0, 3.5))\n sns.boxplot('genotype', 'fwhm', hue='treatment', data=df, ax=axs[2], order=('wt', 'ko'), hue_order=('veh', 'bms'))\n gp = df.groupby(('genotype', 'treatment'))\n t = np.arange(-3, 5, .1)\n for g in product(('wt', 'ko'), ('veh', 'bms')):\n try:\n avg_df = np.vstack(gp.get_group(g).avg_df.as_matrix())\n mean_df = avg_df.mean(0)\n # mean_df[mean_df > 0.7] = 0\n axs[3].plot(t, mean_df, label=g, linewidth=2)\n except KeyError:\n pass\n axs[3].legend()\n axs[3].set_xlabel(TIME_LABEL)\n axs[3].set_ylabel('Average $\\Delta$ F / F (%)')\n fig.tight_layout()\n fig.savefig('Intrinsic/figure/responses.png')\n fig.savefig('Intrinsic/figure/responses.svg')\n with open('Intrinsic/figure/stats.txt', 'w') as f:\n f.write('Mann-Whitney U-test\\n\\n')\n for g1, g2 in combinations(product(('wt', 'ko'), ('veh', 'bms')), 2):\n f.write(f'+ {g1} vs {g2}:\\n')\n pval = mannwhitneyu(df.area[df.genotype == g1], df.area[df.genotype == g2]).pvalue\n f.write(f'\\tArea comparison {g1} vs {g2}: {pval:.3f}\\n')\n pval = mannwhitneyu(df.max_df[df.genotype == g1], df.max_df[df.genotype == g2]).pvalue\n f.write(f'\\tAmplitude comparison {g1} vs {g2}: {pval:.3f}\\n')\n pval = mannwhitneyu(df.fwhm[df.genotype == g1], df.fwhm[df.genotype == g2]).pvalue\n f.write(f'\\tFull width at half maximum comparison {g1} vs {g2}: {pval:.3f}\\n')", "def strategic_plan_barchart(dframe, colors=['#f4aa42','#becca5','#9fa399',\n '#d88668','#43a559','#edf760']):\n # prepare dataframe\n # check if user has changed number of columns in sheet\n if len(dframe.columns) != 11:\n issue = 'User has altered spreadsheet by {} {} columns.'\n if len(dframe.columns) < 11:\n action = 'removing'\n number = 11 - len(dframe.columns)\n print(issue.format(action,number))\n else:\n action = 'adding'\n number = len(dframe.columns) - 11\n print(issue.format(action,number))\n\n dframe.drop(dframe.index[0:6],inplace=True)\n new_cols = ['start_qt','start_yr','goal_id','topic_area','task_name',\n 'task_stage','blank1','start','finish','owner','internal_status']\n dframe.columns = new_cols\n dframe.drop('blank1',axis=1,inplace=True)\n dframe = dframe[dframe.task_stage.notnull()] # filter dataframe for items with a stage\n dframe['status'] = [x.lower().strip() for x in dframe.task_stage]\n dframe['start'] = [pd.to_datetime(date.split()[1]) for date in dframe.start]\n dframe['finish'].fillna(method='ffill',inplace=True)\n\n finish = []\n for date in dframe['finish']:\n if (type(date)) is str:\n finish.append(pd.to_datetime(date.split()[1]))\n else: finish.append(pd.to_datetime(date))\n dframe['finish'] = finish\n dframe['finish_qt'] = ['Q'+str(date.quarter) for date in dframe['finish']]\n YrQt_complete = ['{} Q{}'.format(date.year,date.quarter) for date in dframe['finish']]\n dframe['YrQt_complete'] = YrQt_complete\n\n # create chart\n if len(colors) != dframe['status'].nunique():\n colors = None\n\n trace = []\n clrs = dict(zip(sorted(dframe['status'].unique().tolist()),colors))\n for sts, clr in zip(sorted(dframe['status'].unique()),clrs.values()):\n trace.append(go.Bar(\n x = dframe[(dframe['task_stage']==sts)].groupby('YrQt_complete')['YrQt_complete'].count().index,\n y = dframe[(dframe['task_stage']==sts)].groupby('YrQt_complete')['YrQt_complete'].count(),\n name = sts,\n marker = {'color': clr},\n opacity = .8))\n\n layout = {\n 'barmode':'stack',\n 'legend': {'font':{'color':'#CCCCCC'}},\n 'titlefont': {'color': '#CCCCCC'},\n 'hovermode':'closest',\n 'paper_bgcolor': '#303939',\n 'plot_bgcolor': '#303939',\n 'xaxis':{'title':'Target Completion Quarter',\n 'tickfont': {'color': '#CCCCCC'},\n 'titlefont': {'color': '#CCCCCC'}},\n 'yaxis':{'title':'No. of Activities',\n 'tickfont': {'color': '#CCCCCC'},\n 'titlefont': {'color': '#CCCCCC'}},\n 'title':'Strategic Plan Overview'}\n\n fig = {'data':trace,'layout':layout}\n return fig", "def BarOverview(data):\n return dcc.Graph(id=\"BarOverview\", className=\"bar\", figure=dict(\n data=[go.Bar(\n x=data[\"frequencies\"],\n y=data[\"names\"],\n orientation='h',\n marker={\n 'color': '#ff4058'\n },\n )],\n layout=dict(\n title=\"<b>Most common Persons</b>\",\n font=dict(family='Soria, Times New Roman, Times, serif', color='#002C77', size=19),\n margin=dict(l=10, r=20, t=50, b=30),\n plot_bgcolor=\"rgba(0,0,0,0)\",\n paper_bgcolor=\"rgba(0,0,0,0)\",\n xaxis=dict(tick0=0, dtick=max(data[\"frequencies\"])),\n yaxis=dict(ticks='outside',\n showgrid=True,\n showline=False,\n showticklabels=False),\n annotations=[dict(xref='paper', yref='y',\n x=0, y=yd,\n font=dict(\n color=\"#000000\",\n size=19\n ),\n text=str(yd),\n showarrow=False) for xd, yd in zip(data[\"frequencies\"], data[\"names\"])]\n )\n ))", "def graph_max_cause(df):\r\n # set the visual features of the graph\r\n sns.set(font_scale=2)\r\n sns.set_style(\"darkgrid\")\r\n fig, ax = plt.subplots()\r\n fig.set_size_inches(20, 10)\r\n plt.xticks(rotation=20)\r\n ax.set_title(\"States' Max Police Death Causes >= 150\")\r\n # create the graph of the data\r\n plot = sns.barplot(\"state\", \"max_count\", data=df, palette=\"bone\", hue='max_cause')\r\n # plt.show()\r\n # save the graph as an image\r\n fig.savefig(\"3_graph_max_cause.png\")", "def create_score_barchart(reviews, filename):\n\n score_counter = [0, 0, 0, 0, 0]\n\n for review in reviews:\n score = int(review['review/score'])\n score_counter[score - 1] += 1\n\n\n\n bar_chart = pygal.Bar()\n bar_chart.add('Scores', score_counter)\n bar_chart.add('Scores (2)', score_counter)\n\n bar_chart.title = \"Amazon review score\"\n bar_chart.x_labels = ['1 Star', '2 Star', '3 Star', '4 Star', '5 Star']\n\n bar_chart.render_to_file(filename)", "def plot_multi_abundance(\n bracken_combined, plot_file, min_percent\n):\n\n nrow, ncol = 1, 2\n\n fig, ax = plt.subplots(\n nrows=nrow, ncols=ncol, figsize=(\n ncol*14, nrow*14\n )\n )\n\n data = pandas.read_csv(bracken_combined, sep='\\t', index_col='name', header=0)\n # Use percentage rather than total reads across samples\n data = data[[c for c in data.columns if 'bracken_frac' in c]]\n data.columns = [d.replace(\".bracken_frac\", \"\") for d in data.columns]\n\n if min_percent > 0:\n keep_idx = []\n for i, row in data.iterrows():\n keep_col = [True for v in row if v >= min_percent]\n if any(keep_col):\n keep_idx.append(row.name)\n data = data[data.index.isin(keep_idx)]\n\n # Separate viruses\n viruses = []\n for name in data.index.tolist():\n if 'virus' in name.lower():\n viruses.append(data[data.index == name])\n viruses = pandas.concat(viruses)\n data = data.drop(viruses.index.tolist())\n\n human = data[data.index == 'Homo sapiens']\n data = data.drop('Homo sapiens')\n print(human)\n\n REMAINING_PATHOGENS = [p for p in PATHOGENS if p in data.index.tolist()]\n pathogens = data[data.index.isin(REMAINING_PATHOGENS)].sort_index()\n data = data.drop(REMAINING_PATHOGENS)\n\n REMAINING_CONTAMINATION = [p for p in CONTAM if p in data.index.tolist()]\n contams = data[data.index.isin(REMAINING_CONTAMINATION)].sort_index()\n data = data.drop(REMAINING_CONTAMINATION)\n\n print(pathogens)\n print(contams)\n\n viruses_collapsed = collapse_taxa(viruses, suffix=\"virus\")\n\n print(viruses_collapsed)\n\n other_collapsed = collapse_taxa(data, genus=True)\n\n print(other_collapsed)\n\n combined = []\n for name, df in {\n 'Human': human, 'Pathogens': pathogens, 'Contamination': contams,\n 'Viruses': viruses_collapsed, 'Microbes': other_collapsed\n }.items():\n df['domain'] = [name for _ in range(len(df))]\n combined.append(df)\n combined = pandas.concat(combined)\n\n print(combined)\n\n panel1 = combined[combined['domain'] != 'Microbes']\n panel2 = combined[combined['domain'] == 'Microbes']\n\n panel1.reset_index(level=0, inplace=True)\n panel2.reset_index(level=0, inplace=True)\n panel1.rename(columns={'index': 'taxon'}, inplace=True)\n panel2.rename(columns={'index': 'taxon'}, inplace=True)\n\n print(panel1)\n print(panel2)\n #\n panel1_melt = panel1.melt(id_vars=['taxon', 'domain'], value_name=\"abundance\", var_name=\"sample\")\n panel2_melt = panel2.melt(id_vars=['taxon', 'domain'], value_name=\"abundance\", var_name=\"sample\")\n\n print(panel1_melt)\n print(panel2_melt)\n\n panel1_melt['abundance'] = [None if ab == 0. else ab for ab in panel1_melt['abundance']]\n panel2_melt['abundance'] = [None if ab == 0. else ab for ab in panel2_melt['abundance']]\n p1 = sns.scatterplot(\n data=panel1_melt, x=\"sample\", y=\"taxon\", hue=\"domain\",\n size=\"abundance\", legend=False, sizes=(70, 2000), ax=ax[0]\n )\n\n p2 = sns.scatterplot(\n data=panel2_melt, x=\"sample\", y=\"taxon\", hue=\"domain\", size=\"abundance\", legend=False, sizes=(50, 2000), ax=ax[1]\n )\n\n # plot grid behind markers\n # p1.grid(ls=\"dotted\", zorder=1, linewidth=0.1)\n # p2.grid(ls=\"dotted\", zorder=1, linewidth=0.1)\n # take care of long labels\n fig.autofmt_xdate()\n\n plt.tight_layout()\n p1.set_ylabel(\"\")\n p1.set_ylabel(\"\")\n p2.set_ylabel(\"\")\n p2.set_ylabel(\"\")\n fig.savefig(f'{plot_file}')" ]
[ "0.6668762", "0.6598685", "0.6580668", "0.65461254", "0.6532494", "0.6513617", "0.6504175", "0.6385042", "0.63797927", "0.63521934", "0.6340164", "0.63299674", "0.619069", "0.61589485", "0.6137865", "0.6120342", "0.607238", "0.6047123", "0.60318536", "0.59974545", "0.5966363", "0.5954948", "0.5947267", "0.5937", "0.59294987", "0.59135085", "0.59118944", "0.5906591", "0.5905728", "0.5903932", "0.58906835", "0.58799636", "0.5859692", "0.58223057", "0.57987016", "0.5743401", "0.57350117", "0.57294875", "0.5709284", "0.57056767", "0.57038593", "0.5700378", "0.56986976", "0.56939226", "0.5692002", "0.56670487", "0.56653446", "0.56561697", "0.5648231", "0.5647611", "0.56289274", "0.5624731", "0.56181717", "0.56174904", "0.56142086", "0.56076014", "0.5599916", "0.5579495", "0.5565897", "0.5551494", "0.5548515", "0.55430573", "0.5532257", "0.5528773", "0.5527508", "0.5525863", "0.55200213", "0.5513184", "0.551076", "0.5503893", "0.54933167", "0.54892504", "0.5472072", "0.5467058", "0.54464114", "0.5443874", "0.5440002", "0.5429503", "0.540548", "0.5402615", "0.5392226", "0.5389935", "0.538728", "0.5384979", "0.5384242", "0.5382648", "0.5381988", "0.5380687", "0.53803104", "0.53750455", "0.5370292", "0.5370191", "0.5365587", "0.5364055", "0.5355564", "0.535171", "0.5349368", "0.53384763", "0.53332984", "0.5331935" ]
0.6864728
0
Takes a pandas dataframe which contains the proportions of language classes over generations and plots timecourses
def plot_timecourse_repair_counts(repair_counts_over_gen_df, title, file_path, file_name): sns.set_style("darkgrid") sns.set_context("talk") fig, ax = plt.subplots() palette = sns.color_palette("colorblind") sns.lineplot(x="generation", y="independent_repair_proportion", data=repair_counts_over_gen_df, palette=palette) # sns.lineplot(x="generation", y="proportion", hue="class", data=lang_class_prop_over_gen_df, palette=palette, ci=95, err_style="bars") plt.tick_params(axis='both', which='major', labelsize=18) plt.tick_params(axis='both', which='minor', labelsize=18) plt.ylim(-0.05, 1.05) plt.title(title, fontsize=22) plt.xlabel('Generation', fontsize=20) plt.ylabel('Mean proportion', fontsize=20) # handles, labels = ax.get_legend_handles_labels() # # labels = ['D', 'H', 'H+Div.', 'C', 'C+Red.-part', 'C+Red.-whole', 'O'] # # ax.legend(handles=handles[1:], labels=labels[1:]) # ax.legend(handles=handles, labels=labels) plt.tight_layout() plt.savefig(file_path + "Timecourse_plot_repairs_" + file_name + ".png") plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_timecourse_language_types(lang_class_prop_over_gen_df, title, file_path, file_name):\n sns.set_style(\"darkgrid\")\n sns.set_context(\"talk\")\n\n fig, ax = plt.subplots()\n\n if len(possible_form_lengths) == 1:\n palette = sns.color_palette([\"black\", \"red\", \"green\", \"grey\"])\n else:\n palette = sns.color_palette([\"black\",\n sns.color_palette(\"colorblind\")[3],\n sns.color_palette(\"colorblind\")[1],\n sns.color_palette(\"colorblind\")[2],\n sns.color_palette(\"colorblind\")[9],\n sns.color_palette(\"colorblind\")[0],\n sns.color_palette(\"colorblind\")[7]])\n\n sns.lineplot(x=\"generation\", y=\"proportion\", hue=\"class\", data=lang_class_prop_over_gen_df, palette=palette)\n # sns.lineplot(x=\"generation\", y=\"proportion\", hue=\"class\", data=lang_class_prop_over_gen_df, palette=palette, ci=95, err_style=\"bars\")\n\n plt.tick_params(axis='both', which='major', labelsize=18)\n plt.tick_params(axis='both', which='minor', labelsize=18)\n plt.ylim(-0.05, 1.05)\n plt.title(title, fontsize=22)\n plt.xlabel('Generation', fontsize=20)\n plt.ylabel('Mean proportion', fontsize=20)\n handles, labels = ax.get_legend_handles_labels()\n\n labels = ['D', 'H', 'H+Div.', 'C', 'C+Red.-part', 'C+Red.-whole', 'O']\n\n # ax.legend(handles=handles[1:], labels=labels[1:])\n ax.legend(handles=handles, labels=labels)\n plt.tight_layout()\n plt.savefig(file_path + \"Timecourse_plot_lang_types_\" + file_name + \".png\")\n plt.show()", "def plot_word_class_pr_genre(df):\n df['nouns'] = df['nouns'] * 100\n df['verbs'] = df['verbs'] * 100\n df['adverbs'] = df['adverbs'] * 100\n # plotting nouns\n plotting_helper_method('nouns', 'genre', df)\n plt.title('Amount of nouns pr song pr. genre')\n plt.xlabel(\"Amount of nouns in each song\")\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/nouns_pr_genre_plot')\n\n # plotting verbs\n plotting_helper_method('verbs', 'genre', df)\n plt.title('Amount of verbs pr song pr. genre')\n plt.xlabel('Amount of verbs in each song')\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/verbs_pr_genre_plot')\n\n # plotting adverbs\n plotting_helper_method('adverbs', 'genre', df)\n plt.title('Amount of adverbs pr song pr. genre')\n plt.xlabel('Amount of adverbs in each song')\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/adverbs_pr_genre_plot')", "def dataframe_to_language_stats(dataframe, n_runs, n_batches, n_gens, possible_form_lengths):\n if len(possible_form_lengths) == 1:\n n_language_classes = 4\n else:\n n_language_classes = 7 #TODO: or should this be 6 (i.e. collapsing the two different reduplication strategies?)\n proportion_column = np.array(dataframe['proportion'])\n proportion_column_as_results = proportion_column.reshape((n_runs*n_batches, n_gens, n_language_classes))\n return proportion_column_as_results", "def plot_pie_charts_of_word_class_distribution(df):\n genre_dict = {\n 'g':'Rock',\n 'b':'Hip-Hop',\n 'r':'Pop'\n }\n for _, genre in genre_dict.items():\n filtered_df = df[df['genre'] == genre]\n \n # plotting circle diagram for the specific genre\n avg_percentage_nouns = filtered_df['nouns'].mean()\n avg_percentage_verbs = filtered_df['verbs'].mean()\n avg_percentage_adverbs = filtered_df['adverbs'].mean()\n\n total = avg_percentage_nouns + avg_percentage_nouns + avg_percentage_nouns\n nouns = avg_percentage_nouns / total * 100\n verbs = avg_percentage_verbs / total * 100\n adverbs = avg_percentage_adverbs / total * 100\n\n # Pie chart\n labels = ['Nouns', 'Verbs', 'Adverbs']\n sizes = [nouns, verbs, adverbs]\n\n _, ax1 = plt.subplots()\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%',\n shadow=True, startangle=90)\n # Equal aspect ratio ensures that pie is drawn as a circle\n ax1.axis('equal') \n plt.tight_layout()\n plt.title(f'Circle diagram of the genre \"{genre}\"s average word classes distribution')\n plt.show()\n # plt.savefig(f'src/visualization/feature_plots/{genre}_word_class_distribution')", "def plot_barplot_language_types(lang_class_prop_over_gen_df, title, file_path, file_name, n_runs, n_batches, n_gens, gen_start, lang_class_baselines_all, lang_class_baselines_fully_expressive, possible_form_lengths):\n\n sns.set_style(\"darkgrid\")\n sns.set_context(\"talk\")\n\n if len(possible_form_lengths) == 1:\n n_language_classes = 4\n else:\n n_language_classes = 7 #TODO: or should this be 6 (i.e. collapsing the two different reduplication strategies?)\n\n proportion_column_as_results = dataframe_to_language_stats(lang_class_prop_over_gen_df, n_runs, n_batches, n_gens, possible_form_lengths)\n\n proportion_column_from_start_gen = proportion_column_as_results[:, gen_start:]\n\n proportion_column_from_start_gen = proportion_column_from_start_gen.flatten()\n\n runs_column_from_start_gen = []\n for i in range(n_runs*n_batches):\n for j in range(gen_start, n_gens):\n for k in range(n_language_classes):\n runs_column_from_start_gen.append(i)\n runs_column_from_start_gen = np.array(runs_column_from_start_gen)\n\n generation_column_from_start_gen = []\n for i in range(n_runs*n_batches):\n for j in range(gen_start, n_gens):\n for k in range(n_language_classes):\n generation_column_from_start_gen.append(j)\n generation_column_from_start_gen = np.array(generation_column_from_start_gen)\n\n class_column_from_start_gen = []\n for i in range(n_runs*n_batches):\n for j in range(gen_start, n_gens):\n if n_language_classes == 4:\n class_column_from_start_gen.append('degenerate')\n class_column_from_start_gen.append('holistic')\n class_column_from_start_gen.append('compositional')\n class_column_from_start_gen.append('other')\n elif n_language_classes == 7:\n class_column_from_start_gen.append('D')\n class_column_from_start_gen.append('H')\n class_column_from_start_gen.append('H+Div.')\n class_column_from_start_gen.append('C')\n class_column_from_start_gen.append('C+Red.-part')\n class_column_from_start_gen.append('C+Red.-whole')\n class_column_from_start_gen.append('O')\n\n new_data_dict = {'run': runs_column_from_start_gen,\n 'generation': generation_column_from_start_gen,\n 'proportion': proportion_column_from_start_gen,\n 'class': class_column_from_start_gen}\n\n lang_class_prop_over_gen_df_from_starting_gen = pd.DataFrame(new_data_dict)\n\n if len(possible_form_lengths) == 1:\n palette = sns.color_palette([\"black\", \"red\", \"green\", \"grey\"])\n else:\n palette = sns.color_palette([\"black\",\n sns.color_palette(\"colorblind\")[3],\n sns.color_palette(\"colorblind\")[1],\n sns.color_palette(\"colorblind\")[2],\n sns.color_palette(\"colorblind\")[9],\n sns.color_palette(\"colorblind\")[0],\n sns.color_palette(\"colorblind\")[7]])\n\n sns.barplot(x=\"class\", y=\"proportion\", data=lang_class_prop_over_gen_df_from_starting_gen, palette=palette)\n\n # plt.axhline(y=lang_class_baselines_all[0], xmin=0.0, xmax=0.25, color='k', linestyle='--', linewidth=2)\n # plt.axhline(y=lang_class_baselines_all[1], xmin=0.25, xmax=0.5, color='k', linestyle='--', linewidth=2)\n # plt.axhline(y=lang_class_baselines_all[2], xmin=0.5, xmax=0.75, color='k', linestyle='--', linewidth=2)\n # plt.axhline(y=lang_class_baselines_all[3], xmin=0.75, xmax=1.0, color='k', linestyle='--', linewidth=2)\n #\n # if title == 'Mutual Understanding Only' or title == 'Minimal Effort & Mutual Understanding':\n # plt.axhline(y=lang_class_baselines_fully_expressive[0], xmin=0.25, xmax=0.5, color='0.6', linestyle='--', linewidth=2)\n # plt.axhline(y=lang_class_baselines_fully_expressive[1], xmin=0.5, xmax=0.75, color='0.6', linestyle='--', linewidth=2)\n\n plt.tick_params(axis='both', which='major', labelsize=18)\n plt.tick_params(axis='both', which='minor', labelsize=18)\n plt.ylim(-0.05, 1.05)\n plt.title(title, fontsize=22)\n # plt.xlabel('Language class')\n plt.xlabel('', fontsize=20)\n plt.ylabel('Mean proportion', fontsize=20)\n plt.tight_layout()\n\n if holistic_without_partial_meaning is True:\n plt.savefig(file_path + \"Barplot_lang_types_\" + file_name + \"_burn_in_\" + str(gen_start) + \".png\")\n else:\n plt.savefig(file_path + \"Barplot_lang_types_\" + file_name + \"_burn_in_\" + str(gen_start) + \"_NEW.png\")\n plt.show()", "def leitner_proportions(df):\n denom = df.shape[0]\n prop_dict = {}\n\n for i in range(1,6):\n df_i = df[df['comfort_level'] == i]\n numer = df_i.shape[0]\n prop_dict[i] = numer / denom\n\n prop_df = pd.DataFrame.from_dict([prop_dict], orient='columns') \n\n prop_df = prop_df.T.rename(columns={0:'proportion'}) \n \n return prop_df", "def getFigBysubClass(df, path, nameClass):\n\ttmp = pd.DataFrame()\n\ttmp = tmp.append(df)\n\tdicoNbTrClass = countTranscript.getFig3Percent(path)\n\tdicoNbTrBt = countTranscript.getFig5Percent(path)\n\tdel tmp['nuclA']\n\tdel tmp['nuclT']\n\tdel tmp['nuclN']\n\tdel tmp['Type']\n\tclassDf = pd.DataFrame()\n\tclassDftmp = tmp[ tmp.Class == nameClass]\n\tgroups = classDftmp.groupby('Biotype')\n\tfor name, group in groups:\n\t\tgroupFilter = group[ group.Location == 'intron' ]\n\t\tgroupFilter = groupFilter.append( group[ group.Location == 'exon' ])\n\t\trow = sumSubTable(groupFilter, name)\n\t\trow['Biotype'] = name\n\t\trow['Class'] = nameClass\n\t\tif name not in dicoNbTrBt['Tot']:\n\t\t\tdicoNbTrBt['Tot'][name] = 0\n\t\tif name not in dicoNbTrBt['Wt']:\n\t\t\tdicoNbTrBt['Wt'][name] = 0\n\t\tif name not in dicoNbTrBt['Shuf']:\n\t\t\tdicoNbTrBt['Shuf'][name] = 0\n\t\trow['nbTr'] = dicoNbTrBt['Tot'][name]\n\t\trow['NbTrpG4Wt'] = dicoNbTrBt['Wt'][name]\n\t\trow['NbTrpG4Shuf'] = dicoNbTrBt['Shuf'][name]\n\t\trow.update(computePercent(row))\n\t\trow = pd.DataFrame(row, index=[len(classDftmp)+1])\n\t\tclassDf = classDf.append(row)\n\trow = {'Class' : nameClass,\n\t\t\t'Biotype' : nameClass,\n\t\t\t'nuclG' : sum(classDftmp.nuclG),\n\t\t\t'nuclC' : sum(classDftmp.nuclC),\n\t\t\t'nbTr' : dicoNbTrClass['Tot'][nameClass],\n\t\t\t'NbpG4rWt' : sum(classDftmp.NbpG4rWt),\n\t\t\t'NbpG4rShuf' : sum(classDftmp.NbpG4rShuf),\n\t\t\t'NbTrpG4Wt' : dicoNbTrClass['Wt'][nameClass],\n\t\t\t'NbTrpG4Shuf' : dicoNbTrClass['Shuf'][nameClass],\n\t\t\t'Tot' : sum(classDftmp.Tot)}\n\trow.update(computePercent(row))\n\trow = pd.DataFrame(row, index=[len(classDf)+1])\n\tclassDf = classDf.append(row)\n\tclassDf = computeDensity(classDf, 'Segment')\n\treturn classDf", "def language_stats_to_dataframe(results, n_runs, n_gens, possible_form_lengths):\n\n if len(possible_form_lengths) == 1:\n n_language_classes = 4\n else:\n n_language_classes = 7 #TODO: or should this be 6 (i.e. collapsing the two different reduplication strategies?)\n\n column_proportion = np.array(results)\n\n if n_language_classes == 4 and column_proportion.shape[2] > n_language_classes:\n column_proportion_compositional_summed = np.zeros((n_runs, n_gens, n_language_classes))\n for r in range(len(column_proportion_compositional_summed)):\n for g in range(len(column_proportion_compositional_summed[0])):\n column_proportion_compositional_summed[r][g] = np.array([column_proportion[r][g][0], column_proportion[r][g][1], column_proportion[r][g][2]+column_proportion[r][g][3], column_proportion[r][g][4]])\n column_proportion = column_proportion_compositional_summed.flatten()\n\n else:\n column_proportion = column_proportion.flatten()\n\n column_runs = []\n for i in range(n_runs):\n for j in range(n_gens):\n for k in range(n_language_classes):\n column_runs.append(i)\n column_runs = np.array(column_runs)\n\n column_generation = []\n for i in range(n_runs):\n for j in range(n_gens):\n for k in range(n_language_classes):\n column_generation.append(j)\n column_generation = np.array(column_generation)\n\n column_type = []\n for i in range(n_runs):\n for j in range(n_gens):\n if len(possible_form_lengths) == 1:\n column_type.append('degenerate')\n column_type.append('holistic')\n column_type.append('compositional')\n column_type.append('other')\n else:\n column_type.append('degenerate')\n column_type.append('holistic')\n column_type.append('holistic_diversify_signal')\n column_type.append('compositional')\n column_type.append('compositional_reduplicate_segments')\n column_type.append('compositional_reduplicate_whole_signal')\n column_type.append('other')\n\n data = {'run': column_runs,\n 'generation': column_generation,\n 'proportion': column_proportion,\n 'class': column_type}\n\n lang_class_prop_over_gen_df = pd.DataFrame(data)\n\n return lang_class_prop_over_gen_df", "def grid_plot_google(proverbs_list, data, dim = (4,4), ylog = False): \n\n plt.rcParams.update({\n 'font.size': 9,\n 'axes.titlesize': 8,\n 'axes.labelsize': 14,\n 'xtick.labelsize': 7,\n 'ytick.labelsize': 7,\n 'legend.fontsize': 10,\n })\n \n rows, cols = dim[0], dim[1]\n fig = plt.figure(figsize=(12, 5.75))\n gs = gridspec.GridSpec(ncols=cols, nrows=rows)\n gs.update(wspace = 0.2, hspace = 0.2)\n \n \n res = None\n \n i = 0\n \n fig.text(0.5, 0.02,'Year' , ha='center', fontsize=14)\n fig.text(0.02, 0.5, 'Frequency among all volumes in Google Books', va='center', rotation='vertical', fontsize=14)\n for r in np.arange(0, rows, step=1):\n for c in np.arange(cols):\n\n ax = fig.add_subplot(gs[r, c])\n ax.text(0.1,0.9,'\\\"{}\\\"'.format(proverbs_list[i].lower()),horizontalalignment='left', transform=ax.transAxes)\n\n ts = data[data.proverb ==proverbs_list[i]]\n ts = ts[data.year >= 1800]\n ts.year = pd.to_datetime(ts.year, format = '%Y', errors='coerce')\n ts.index = ts.year\n ts = ts.sort_index()\n ts = ts.reindex(pd.date_range('01/01/1800', '01/01/2019', freq = 'AS'), fill_value=0)\n #get 5-year rolling average\n ts2 = ts.copy()\n ts2 = ts2.rolling(window = 5).mean()\n print(ts)\n\n if res != None:\n ts = ts.resample(res).sum()\n \n if ylog == False:\n pass\n\n elif ylog == True:\n ax.set_yscale('log') \n \n ax.plot(ts.index, ts['vol_norm'], alpha = 0.5, color = 'gray')\n ax.plot(ts2.index, ts2['vol_norm'], alpha = 0.9, color='darkorange')\n i+=1\n \n plt.subplots_adjust(left=0.08, right=0.95, top=0.95, bottom=0.1)", "def proportions_visualiser(\n df: pd.core.frame.DataFrame,\n colum_name: str = \"Sensor Glucose (mg/dL)\",\n limits: Dict[str, int] = {\"low\": 70, \"high\": 180},\n windows: Dict[str, int] = {\"weekly\": 7, \"monthly\": 30},\n kind: str = \"TIR\",\n) -> NoReturn:\n\n valid_kinds = [\"TIR\", \"TBR\", \"TAR\"]\n\n if \"low\" not in limits.keys() or \"high\" not in limits.keys():\n raise Exception(f\"limits.keys() should be ['low', 'high'] not {limits.keys()}\")\n\n titles = {\n \"TIR\": f\"Time In Range [{limits['low']},{limits['high']})\",\n \"TAR\": f\"Time Above Range >= {limits['high']}\",\n \"TBR\": f\"Time Below Range < {limits['low']}\",\n }\n\n kind = kind.upper()\n if kind not in valid_kinds:\n raise Exception(\n f\"Invalid kind `{kind}`, select one from {valid_kinds} or refer to help({self.__name__})\"\n )\n\n TIR = (\n lambda y: 100\n * y[(y >= limits[\"low\"]) & (y < limits[\"high\"])].count()\n / y.count()\n )\n TBR = lambda y: 100 * y[(y < limits[\"low\"])].count() / y.count()\n TAR = lambda y: 100 * y[(y >= limits[\"high\"])].count() / y.count()\n\n _proportions = df[colum_name].groupby(df.index.date).apply(eval(f\"{kind}\"))\n\n _proportions.plot(**{\"label\": \"daily\"})\n\n for key, value in windows.items():\n _ax = _proportions.rolling(value).mean().plot(**{\"label\": key})\n\n _mean_proportion = _proportions.mean()\n plt.ylabel(\"Percentage\")\n plt.axhline(\n _mean_proportion,\n **{\"label\": f\"mean = {round(_mean_proportion,1)}\", \"c\": \"blue\"},\n )\n plt.legend()\n plt.title(titles[kind])", "def visualize(X: pd.DataFrame, y: pd.DataFrame) -> None:\r\n y[\"Action\"].value_counts().plot.pie(explode=(0.02, 0.04, 0.05, 0.09), title=\"Proportion of classes in dataset\")\r\n plt.savefig(\"Figures/proportions\")\r\n\r\n for i, column in enumerate(X.columns):\r\n fig, ax = plt.subplots(1, 2)\r\n\r\n ax[0].hist(\r\n (\r\n X[y[\"Action\"] == \"allow\"][column],\r\n X[y[\"Action\"] == \"deny\"][column],\r\n X[y[\"Action\"] == \"drop\"][column],\r\n X[y[\"Action\"] == \"reset-both\"][column],\r\n )\r\n )\r\n ax[0].set_xlabel(column)\r\n ax[0].set_ylabel(\"Frequency\")\r\n\r\n ax[1].boxplot(\r\n (\r\n X[y[\"Action\"] == \"allow\"][column],\r\n X[y[\"Action\"] == \"deny\"][column],\r\n X[y[\"Action\"] == \"drop\"][column],\r\n X[y[\"Action\"] == \"reset-both\"][column],\r\n )\r\n )\r\n ax[1].set_xlabel(\"Action\")\r\n ax[1].set_ylabel(column)\r\n\r\n X[column].hist(by=y[\"Action\"])\r\n\r\n ax[0].legend([\"allow\", \"deny\", \"drop\", \"reset-both\"])\r\n ax[1].set_xticklabels([\"allow\", \"deny\", \"drop\", \"reset-both\"])\r\n fig.suptitle(\"Distribution of classes among attributes\")\r\n plt.savefig(\"Figures/boxplots\")", "def makeComparsionChart(columns, data):\n fig = plt.figure(figsize=(16, 10))\n gs = gridspec.GridSpec(2, 3, wspace = 0.2, hspace=0.2, right=0.96, left=0.04)\n ax1 = plt.subplot(gs[0, 0:1], label=\"\")\n ax2 = plt.subplot(gs[0, 1:2], label=\"\" )\n ax3 = plt.subplot(gs[0, 2:3], label=\"\" )\n ax4 = plt.subplot(gs[1, 0:1], label=\"\" )\n ax5 = plt.subplot(gs[1, 1:2], label=\"\" )\n ax1.set_title('Before Scaling')\n ax2.set_title('After Standard Scaler')\n ax3.set_title('After Min-Max Scaler')\n ax4.set_title('After Roboust Scaler')\n ax5.set_title('After Normalization')\n\n for column in columns:\n sns.kdeplot(data[0][column], ax=ax1, legend=False)\n sns.kdeplot(data[1][column], ax=ax2, legend=False)\n sns.kdeplot(data[2][column], ax=ax3, legend=False)\n sns.kdeplot(data[3][column], ax=ax4, legend=False)\n sns.kdeplot(data[4][column], ax=ax5, legend=False)\n\n plt.show()", "def samplecost(app, endclasses, fxnmode, samptype='std', title=\"\"):\n associated_scens=[]\n for phase in app.phases:\n associated_scens = associated_scens + app.scenids.get((fxnmode, phase), [])\n costs = np.array([endclasses[scen]['cost'] for scen in associated_scens])\n times = np.array([time for phase, timemodes in app.sampletimes.items() if timemodes for time in timemodes if fxnmode in timemodes.get(time)] ) \n rates = np.array(list(app.rates_timeless[fxnmode].values()))\n \n tPlot, axes = plt.subplots(2, 1, sharey=False, gridspec_kw={'height_ratios': [3, 1]})\n phasetimes_start =[times[0] for phase, times in app.phases.items()]\n phasetimes_end =[times[1] for phase, times in app.phases.items()]\n ratetimes =[]\n ratesvect =[]\n phaselocs = []\n for (ind, phasetime) in enumerate(phasetimes_start):\n axes[0].axvline(phasetime, color=\"black\") \n phaselocs= phaselocs +[(phasetimes_end[ind]-phasetimes_start[ind])/2 + phasetimes_start[ind]]\n\n axes[1].axvline(phasetime, color=\"black\") \n ratetimes = ratetimes + [phasetimes_start[ind]] + [phasetimes_end[ind]]\n ratesvect = ratesvect + [rates[ind]] + [rates[ind]]\n #axes[1].text(middletime, 0.5*max(rates), list(app.phases.keys())[ind], ha='center', backgroundcolor=\"white\")\n #rate plots\n axes[1].set_xticks(phaselocs)\n axes[1].set_xticklabels(list(app.phases.keys()))\n \n axes[1].plot(ratetimes, ratesvect)\n axes[1].set_xlim(phasetimes_start[0], phasetimes_end[-1])\n axes[1].set_ylim(0, np.max(ratesvect)*1.2 )\n axes[1].set_ylabel(\"Rate\")\n axes[1].set_xlabel(\"Time (\"+str(app.units)+\")\")\n axes[1].grid()\n #cost plots\n axes[0].set_xlim(phasetimes_start[0], phasetimes_end[-1])\n axes[0].set_ylim(0, 1.2*np.max(costs))\n if samptype=='fullint':\n axes[0].plot(times, costs, label=\"cost\")\n else:\n if samptype=='quadrature' or samptype=='pruned piecewise-linear': \n sizes = 1000*np.array([weight if weight !=1/len(timeweights) else 0.0 for phase, timeweights in app.weights[fxnmode].items() for time, weight in timeweights.items() if time in times])\n axes[0].scatter(times, costs,s=sizes, label=\"cost\", alpha=0.5)\n axes[0].stem(times, costs, label=\"cost\", markerfmt=\",\", use_line_collection=True)\n \n axes[0].set_ylabel(\"Cost\")\n axes[0].grid()\n if title: axes[0].set_title(title)\n elif type(fxnmode[0])==tuple: axes[0].set_title(\"Cost function of \"+str(fxnmode)+\" over time\")\n else: axes[0].set_title(\"Cost function of \"+fxnmode[0]+\": \"+fxnmode[1]+\" over time\")\n #plt.subplot_adjust()\n plt.tight_layout()", "def costovertime(endclasses, app, costtype='expected cost'):\n costovertime = cost_table(endclasses, app)\n plt.plot(list(costovertime.index), costovertime[costtype])\n plt.title('Total '+costtype+' of all faults over time.')\n plt.ylabel(costtype)\n plt.xlabel(\"Time (\"+str(app.units)+\")\")\n plt.grid()", "def grid_plot_nyt(proverbs_list, data, dim = (4,4), res = '1M'):\n \n plt.rcParams.update({\n 'font.size': 9,\n 'axes.titlesize': 8,\n 'axes.labelsize': 14,\n 'xtick.labelsize': 7,\n 'ytick.labelsize': 7,\n 'legend.fontsize': 10,\n })\n \n rows, cols = dim[0], dim[1]\n fig = plt.figure(figsize=(12, 5.75))\n gs = gridspec.GridSpec(ncols=cols, nrows=rows)\n gs.update(wspace = 0.3, hspace = 0.2)\n \n\n i = 0\n \n fig.text(0.5, 0.02,'Year' , ha='center', fontsize=14)\n fig.text(0.02, 0.5, 'Frequency among all articles in NYT', va='center', rotation='vertical', fontsize=14)\n \n #get month resolution\n ts = data.copy()\n resamp = ts.resample(res).sum()\n resamp = resamp.div(resamp['total'], axis =0)\n ts = resamp\n \n #get year resolution\n ts2 = data.copy()\n resamp = ts.resample('1Y').sum()\n resamp = resamp.div(resamp['total'], axis =0)\n ts2 = resamp\n \n #make each plot in the grid\n for r in np.arange(0, rows, step=1):\n for c in np.arange(cols):\n\n ax = fig.add_subplot(gs[r, c])\n\n ax.text(0.1,0.9,'\\\"{}\\\"'.format(proverbs_list[i]),horizontalalignment='left', transform=ax.transAxes)\n\n print(ts[proverbs_list[i]])\n ax.plot(ts.index, ts[proverbs_list[i]], alpha = 0.5, color = 'gray')\n ax.plot(ts2.index, ts2[proverbs_list[i]], alpha = 0.9, color = 'orange')\n i+=1\n \n plt.subplots_adjust(left=0.08, right=0.95, top=0.95, bottom=0.1)", "def dashboard(df):\n panamax = (df.loc[:, \"Class\"] == \"Panamax\")\n post_panamax = (df.loc[:, \"Class\"] == \"Post-Panamax\")\n nearshore = (df.loc[:, \"Location\"] == \"Nearshore\")\n offshore = (df.loc[:, \"Location\"] == \"Offshore\")\n inbound = (df.loc[:, \"Course Behavior\"] == \"Inbound\")\n outbound = (df.loc[:, \"Course Behavior\"] == \"Outbound\")\n dat = {\"Proportion<br>of Transits\":[\n str(round(sum(panamax) / len(df) * 100, 2)) + \"%\",\n str(round(sum(post_panamax) / len(df) * 100, 2)) + \"%\", \"100%\"\n ],\n \"Compliance<br>Rate\":[\n str(round(sum(panamax & (df.loc[:, \"VSPD kn\"] <= 10)) /\n sum(panamax) * 100, 2)) + \"%\",\n str(round(sum(post_panamax & (df.loc[:, \"VSPD kn\"] <= 10)) /\n sum(post_panamax) * 100, 2)) + \"%\",\n str(round(sum(df.loc[:, \"VSPD kn\"] <= 10) / len(df) * 100, 2)) + \"%\"\n ],\n \"Mean<br>VSPD\":[\n str(round(df[panamax].loc[:, \"VSPD kn\"].mean(), 2)) + \" kn\",\n str(round(df[post_panamax].loc[:, \"VSPD kn\"].mean(), 2)) + \" kn\",\n str(round(df.loc[:, \"VSPD kn\"].mean(), 2)) + \" kn\"\n ],\n \"Nearshore<br>Median VSPD\":[\n str(round(df[nearshore & panamax].loc[:, \"VSPD kn\"].median(), 2)) +\n \" kn\",\n str(round(df[nearshore & post_panamax].loc[:,\n (\"VSPD kn\")].median(), 2)) + \" kn\",\n str(round(df[nearshore].loc[:, \"VSPD kn\"].median(), 2)) + \" kn\"\n ],\n \"Offshore<br>Median VSPD\":[\n str(round(df[offshore & panamax].loc[:, \"VSPD kn\"].median(), 2)) +\n \" kn\",\n str(round(df[offshore & post_panamax].loc[:,\n (\"VSPD kn\")].median(), 2)) + \" kn\",\n str(round(df[offshore].loc[:, \"VSPD kn\"].median(), 2)) + \" kn\"\n ],\n \"Inbound<br>Median VSPD\":[\n str(round(df[inbound & panamax].loc[:, \"VSPD kn\"].median(), 2)) +\n \" kn\",\n str(round(df[inbound & post_panamax].loc[:,\n (\"VSPD kn\")].median(), 2)) + \" kn\",\n str(round(df[inbound].loc[:, \"VSPD kn\"].median(), 2)) + \" kn\"\n ],\n \"Outbound<br>Median VSPD\":[\n str(round(df[outbound & panamax].loc[:, \"VSPD kn\"].median(), 2)) +\n \" kn\",\n str(round(df[outbound & post_panamax].loc[:,\n (\"VSPD kn\")].median(), 2)) + \" kn\",\n str(round(df[outbound].loc[:, \"VSPD kn\"].median(), 2)) + \" kn\"\n ],\n \"VSPD-WSPD<br>Correlation\":[\n str(round(df[panamax].dropna().loc[:, (\"VSPD kn\", \"WSPD mph\")].corr()\n .iloc[0][1], 2)),\n str(round(df[post_panamax].dropna().loc[:,\n (\"VSPD kn\", \"WSPD mph\")].corr().iloc[0][1], 2)),\n str(round(df.dropna().loc[:,\n (\"VSPD kn\", \"WSPD mph\")].corr().iloc[0][1], 2))\n ]\n }\n index = [\"Panamax\", \"Post-Panamax\", \"Combined\"]\n return pd.DataFrame(dat, index)", "def trip_duration_stats(df):", "def forebears (WFROM,WTO,efrom, eto, g=25):\n \n c.execute(\"\"\"\n SELECT wyear, eyear, count (eyear), wnationality\n FROM clean \n WHERE (eyear IS NOT Null) AND (wyear IS NOT Null)\n AND WYEAR >= ? and WYEAR <= ? \n AND eyear >= ? AND eyear <= ? \n GROUP BY wyear, eyear\n ORDER BY wyear, eyear\"\"\", (WFROM, WTO, efrom, eto))\n\n years = c.fetchall()\n epigraphtotal = sum (s for (x,y,s,n) in years)\n #plt.xlim(WFROM, WTO)\n #plt.ylim(100, -1500)\n #colors = list(mcolors.TABLEAU_COLORS.keys()) *20\n #print(colors)\n \n \n gen =dd(lambda: dd(int))\n gentotal= dd(int)\n for (x,y,s,n) in years:\n gen[generation(x,g)][generation(y-x,g)] += 1\n gentotal[generation(x,g)] +=1\n \n for x in gen:\n for y in gen[x]:\n print(x, y, gen[x][y], gentotal[x])\n\n \n\n plt.figure(figsize=(10, 5))\n ax=plt.axes()\n\n\n #df.plot(colormap=gray) \n cumtotal = [0]*len(gen)\n\n for d in range(0,-200, -1):\n #for d in range(min(gen.keys()),max(gen.keys()),-1):\n xv = list(gen.keys())\n yv = [rat(gen[x][d],gentotal[x]) for x in xv]\n plt.bar(xv, yv, bottom=cumtotal,\n tick_label=[x*g for x in xv])\n cumtotal = [x + y for x, y in zip(yv, cumtotal)]\n #colors.pop()\n #print(d, cumtotal)\n plt.xlabel('Year of Work (in generations)')\n plt.ylabel(f'Share of Distance to forebear (in {g} year generations)')\n plt.title(f'Distance back vs Year of Work ({epigraphtotal} epigraphs)')\n plt.savefig(f\"figs/eg-forebear-{WFROM}:{WTO}-{efrom}:{eto}-{g}.png\")\n plt.close()", "def plot_df(data_frame):\n plt.figure(figsize = (10, 5))\n chart = sns.countplot(data_frame['label'], \n palette=\"Set1\"\n )\n plt.show()", "def visualize_timecourses_grid(timecourses_cols, cols_names, out_filename, hgap=20, vgap=20):\n\n\tnframes, ncomponents = timecourses_cols[0].shape\n\tncols = len(timecourses_cols)\n\n\tf, axarr = pyplot.subplots(ncomponents, ncols)\n\n\t# fig = pyplot.figure(figsize=(4, 2))\n\n\tthemin = min([timecourses_cols[col].min() for col in range(ncols)])\n\tthemax = min([timecourses_cols[col].max() for col in range(ncols)])\n\n\tfor k in range(ncomponents):\n\t\tfor col in range(ncols):\n\t\t\taxarr[k, col].plot(timecourses_cols[col][:,k])\n\n\t\t\tif k == 0:\n\t\t\t\taxarr[k, col].set_title(cols_names[col])\n\n\t\t\taxarr[k, col].tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='off')\n\t\t\taxarr[k, col].set_ylim(themin, themax)\n\t\t\taxarr[k, col].set_xticklabels([])\n\t\t\taxarr[k, col].set_yticklabels([])\n\n\tpyplot.tight_layout()\n\tpyplot.savefig(out_filename)\n\tpyplot.close(f)", "def proportion_with_cardinals(df, PATH):\n \n df_test = df.copy()\n df_test['cardinal'] = df.title.apply(contains_cardinal)\n\n click = df_test[df_test.target == 1]\n non = df_test[df_test.target == 0]\n click = click.groupby(['cardinal']).target.count()\n non = non.groupby(['cardinal']).target.count()\n \n non = non[1]/non[0] * 100\n click = click[1]/click[0] * 100\n # plot the results\n fig, ax = plt.subplots(figsize=(12,6))\n sns.barplot(x=['Normal', \"Clickbait\"], y=[non, click], ax=ax)\n plt.title(\"Percent of Titles Containing Cardinal Numbers\", size = 24)\n plt.xlabel(\"Article Class\", size=24)\n plt.ylabel(\"Percent %\", size = 24)\n plt.ylim(0, 100)\n plt.xticks([0,1], label=[\"Normal\", \"Clickbait\"], size=24)\n if PATH:\n plt.savefig(PATH, bbox_inches=\"tight\", transparent=True)\n \n return ax", "def inst_class_stats(df, col='num_pkts'):\n classes = df.groupby('class_label')\n stat = classes[col].describe()\n return stat", "def graph_course(self):\n group = self.__data[\"filted_general_groupby\"]\n graph = {}\n if self.analysis[\"courses\"] is None:\n self.courses_list()\n\n # inicializa o dicionario que vai guardar o grafico\n for course in self.analysis[\"courses\"].index:\n graph[course] = []\n\n for i in range(18):\n min_v = i * 5\n max_v = min_v + 4.99\n self.__calc_graph_mean(group, min_v, max_v, graph)\n\n min_v = 95\n max_v = 100\n self.__calc_graph_mean(group, min_v, max_v, graph)\n\n self.analysis[\"graph_course\"] = graph", "def plot_scenario_distribution(self):\n x = self.arms\n\n y = self.df.groupby('price').mean().Converted[x]\n y_sex_0 = self.df[self.df.Sex == 0].groupby('price').mean().Converted[x]\n y_sex_1 = self.df[self.df.Sex == 1].groupby('price').mean().Converted[x]\n y_age_0 = self.df[self.df.Under_30 == 0].groupby('price').mean().Converted[x]\n y_age_1 = self.df[self.df.Under_30 == 1].groupby('price').mean().Converted[x]\n\n fig, ax_list = plt.subplots(2,1, figsize=(12, 9))\n\n for ax in ax_list:\n ax.grid(alpha=0.3, linestyle='--')\n\n ax.set_ylim(bottom=0, top=0.6)\n ax.set_xlim(left=50, right=104)\n\n ax.set_xlabel(\"Price\", fontsize=14)\n ax.set_ylabel(\"Conversion Rate\", fontsize=14)\n\n ax.set_xticks(self.arms)\n ax.set_xticklabels(self.arms.astype(np.int64), fontsize=12, alpha=0.7)\n ax.set_yticks(np.linspace(0, 0.7, 8))\n ax.set_yticklabels([str((i * 100).astype(np.int64)) + \"%\" for i in np.linspace(0, 0.7, 8)], fontsize=12, alpha=0.7)\n\n ax.spines['right'].set_alpha(0)\n ax.spines['left'].set_alpha(0.3)\n ax.spines['top'].set_alpha(0)\n ax.spines['bottom'].set_alpha(0.3)\n\n ax_list[0].plot(x, y, label='Global')\n ax_list[0].plot(x, y_sex_0, label='Male', color='moccasin')\n ax_list[0].plot(x, y_sex_1, label='Female', color='darkorange')\n\n ax_list[1].plot(x, y, label='Global')\n ax_list[1].plot(x, y_age_0, label='Under 30', color='red')\n ax_list[1].plot(x, y_age_1, label='Over 30', color='darkred')\n\n ax_list[0].legend()\n ax_list[1].legend()\n\n fig.suptitle(\"Conversion Rate\", fontsize=22)\n\n fig.show()\n\n plt.savefig('chapter5_pricing.png')", "def context_study_stats(frame_path=METRICS_DIR+'/merge.csv'):\n frame = pd.read_csv(frame_path)\n print(frame['LOC_prod'].mean())\n print(frame['LOC_prod'].sum())\n print(frame['LOC_test'].sum())\n print(frame['no_mutations'].sum())\n print(frame.shape[0])\n\n sizes = frame.groupby('project').size()\n prod = frame.groupby('project')['LOC_prod'].sum( )\n test = frame.groupby('project')['LOC_test'].sum()\n mutants = frame.groupby('project')['no_mutations'].sum()\n\n result = pd.DataFrame({'project': list(sizes.index),\n 'size': list(sizes),\n 'prod': list(prod),\n 'test': list(test),\n 'mutants': list(mutants)},\n columns=['project', 'size', 'prod', 'test', 'mutants'])\n print(result.to_latex())", "def makePdf(sources):\n pdf = PdfPages(\"sample_features.pdf\")\n classnames = []\n classname_dict = {}\n x = 2 # number of subplot columns\n y = 3 # number of subplot rows\n for source in sources:\n lc = source.lcs[0]\n\n if lc.classname not in classnames:\n classnames.append(lc.classname)\n classname_dict[lc.classname] = [lc]\n else:\n classname_dict[lc.classname].append(lc)\n\n if len(classname_dict[lc.classname]) < 3:\n\n label = lc.classname + \"; ID: \" + lc.id\n # all_times histogram:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_title(label)\n ax.axis('off')\n\n ax1 = fig.add_subplot(321)\n ax2 = fig.add_subplot(322)\n ax2.axis('off')\n ax3 = fig.add_subplot(323)\n ax4 = fig.add_subplot(324)\n ax4.axis('off')\n ax5 = fig.add_subplot(325)\n ax6 = fig.add_subplot(326)\n ax6.axis('off')\n\n hist, bins, other = ax1.hist(lc.all_times, 50, normed=True)\n ax1.text(np.max(bins) * 0.1, np.max(hist) * 0.8,\n r'Histogram (normed) of all $\\Delta$Ts')\n\n ax2.text(0.0, 0.9, (r'$\\bullet$med time to next obs: ' +\n str(np.round(lc.cads_med, 4))))\n ax2.text(0.0, 0.75, (r'$\\bullet$avg time to next obs: ' +\n str(np.round(lc.avgt, 4))))\n ax2.text(0.0, 0.6, (r'$\\bullet$std dev of time to next obs: ' +\n str(np.round(lc.cads_std, 4))))\n ax2.text(0.0, 0.45, (r'$\\bullet$med of all $\\Delta$Ts: ' +\n str(np.round(lc.all_times_med, 4))))\n ax2.text(0.0, 0.3, (r'$\\bullet$avg of all $\\Delta$Ts: ' +\n str(np.round(lc.all_times_avg, 4))))\n ax2.text(0.0, 0.15, (r'$\\bullet$std dev of all $\\Delta$Ts: ' +\n str(np.round(lc.all_times_std, 4))))\n\n hist, bins, other = ax3.hist(lc.cads, 50)\n ax3.text(np.max(bins) * 0.1, np.max(hist) * 0.8,\n r'Hist of time to next obs')\n\n ax6.text(\n 0.0, 0.9, r'$\\bullet$Number of epochs: ' + str(lc.n_epochs))\n ax6.text(0.0, 0.75, (r'$\\bullet$Time b/w first & last obs (days): ' +\n str(np.round(lc.total_time, 2))))\n ax6.text(0.0, 0.6, (r'$\\bullet$Average error in mag: ' +\n str(np.round(lc.avg_err, 4))))\n ax6.text(0.0, 0.45, (r'$\\bullet$Median error in mag: ' +\n str(np.round(lc.med_err, 4))))\n ax6.text(0.0, 0.3, (r'$\\bullet$Std dev of error: ' +\n str(np.round(lc.std_err, 4))))\n ax6.text(0.0, 0.15, '')\n\n ax5.scatter(lc.epochs, lc.mags)\n\n ax4.text(0.0, 0.9, (r'$\\bullet$Avg double to single step ratio: ' +\n str(np.round(lc.avg_double_to_single_step, 3))))\n ax4.text(0.0, 0.75, (r'$\\bullet$Med double to single step: ' +\n str(np.round(lc.med_double_to_single_step, 3))))\n ax4.text(0.0, 0.6, (r'$\\bullet$Std dev of double to single step: ' +\n str(np.round(lc.std_double_to_single_step, 3))))\n ax4.text(\n 0.0, 0.45,\n (r'$\\bullet$1st peak to 2nd peak (in all $\\Delta$Ts): ' +\n str(np.round(lc.all_times_nhist_peak_1_to_2, 3))))\n ax4.text(\n 0.0, 0.3,\n (r'$\\bullet$2ndt peak to 3rd peak (in all $\\Delta$Ts): ' +\n str(np.round(lc.all_times_nhist_peak_2_to_3, 3))))\n ax4.text(\n 0.0, 0.15,\n (r'$\\bullet$1st peak to 3rd peak (in all $\\Delta$Ts): ' +\n str(np.round(lc.all_times_nhist_peak_1_to_3, 3))))\n\n pdf.savefig(fig)\n\n pdf.close()\n\n pdf = PdfPages('feature_plots.pdf')\n\n fig = plt.figure()\n\n ax1 = fig.add_subplot(221)\n ax2 = fig.add_subplot(222)\n ax3 = fig.add_subplot(223)\n ax4 = fig.add_subplot(224)\n\n plt.subplots_adjust(wspace=0.4, hspace=0.4)\n\n classnamenum = 0\n\n colors = ['red', 'yellow', 'green', 'blue', 'gray', 'orange', 'cyan',\n 'magenta']\n for classname, lcs in list(classname_dict.items()):\n classnamenum += 1\n print(classname, len(lcs), 'light curves.')\n attr1 = []\n attr2 = []\n attr3 = []\n attr4 = []\n attr5 = []\n attr6 = []\n attr7 = []\n attr8 = []\n for lc in lcs:\n attr1.append(lc.n_epochs)\n attr2.append(lc.avgt)\n attr3.append(lc.cads_std)\n attr4.append(lc.total_time)\n attr5.append(lc.all_times_hist_peak_val)\n attr6.append(lc.cad_probs[5000])\n attr7.append(lc.all_times_nhist_peak_1_to_3)\n attr8.append(lc.all_times_nhist_peak_val)\n\n ax2.scatter(attr1, attr2, color=colors[classnamenum], label=classname)\n ax1.scatter(attr3, attr4, color=colors[classnamenum], label=classname)\n ax2.set_xlabel('N Epochs')\n ax2.set_ylabel('Avg time to next obs')\n ax1.set_xlabel('Standard dev. of time to next obs')\n ax1.set_ylabel('Time b/w first and last obs')\n\n ax3.scatter(attr5, attr6, color=colors[classnamenum], label=classname)\n ax4.scatter(attr7, attr8, color=colors[classnamenum], label=classname)\n ax3.set_xlabel(r'All $\\Delta$T hist peak val')\n ax3.set_ylabel('Prob time to next obs <= 5000 min')\n ax4.set_xlabel(r'$\\Delta$Ts normed hist peak 1 to peak 3')\n ax4.set_ylabel(r'Peak val of all $\\Delta$Ts normed hist')\n\n #ax1.legend(bbox_to_anchor=(1.1, 1.1),prop={'size':6})\n ax2.legend(bbox_to_anchor=(1.1, 1.1), prop={'size': 6})\n #ax3.legend(loc='upper right',prop={'size':6})\n #ax4.legend(loc='upper right',prop={'size':6})\n\n pdf.savefig(fig)\n\n pdf.close()\n return 0", "def plot_associative_learning_progress(ax, df):\n\n num_objects_list = sorted(df.curr_num_objects.unique())\n legend_list = []\n for idx in num_objects_list:\n ax.plot(df[df.curr_num_objects == idx].groupby('objects_iter').rewards.mean())\n legend_list.append(f'ns={idx}')\n ax.set_xlabel('Stimulus iteration')\n ax.set_ylabel('P(correct)')\n ax.set_ylim([0.4, 1])\n ax.legend(legend_list)", "def count_plot_target_class(self):\r\n print(self.dataframe_name)\r\n print(self.data_frame.groupby([self.target_column]).size()) # print the sum of every class\r\n\r\n sns.countplot(data=self.data_frame, x=self.data_frame[self.target_column])\r\n plt.title(self.dataframe_name + ': Display the distribution of ' + self.target_column + ' class')\r\n plt.xlabel('Target Name: ' + self.target_column)\r\n plt.ylabel('Count')\r\n self.save_plot_as_image()\r\n plt.show()", "def grid_plot_twitter(proverbs_list, data,dim = (4,4), ylog = False, rt = False): \n plt.rcParams.update({\n 'font.size': 9,\n 'axes.titlesize': 8,\n 'axes.labelsize': 14,\n 'xtick.labelsize': 7,\n 'ytick.labelsize': 7,\n 'legend.fontsize': 10,\n })\n \n rows, cols = dim[0],dim[1]\n fig = plt.figure(figsize=(12, 5.75))\n gs = gridspec.GridSpec(ncols=cols, nrows=rows)\n gs.update(wspace = 0.2, hspace = 0.2)\n \n \n i = 0\n \n fig.text(0.5, 0.02,'Year' , ha='center', fontsize = 14)\n fig.text(0.02, 0.5, 'Frequency among all {}-grams on Twitter'.format(len(proverbs_list[0].split())), va='center', rotation='vertical', fontsize = 14)\n \n #loop to create each timeseries plot in the grid\n for r in np.arange(0, rows, step=1):\n for c in np.arange(cols):\n\n ax = fig.add_subplot(gs[r, c])\n\n ax.text(0.1,0.9,'\\\"{}\\\"'.format(proverbs_list[i]),horizontalalignment='left', transform=ax.transAxes)\n ts = data[data.proverb ==proverbs_list[i]]\n ts.date = pd.to_datetime(ts.date, format = '%Y-%m-%d', errors='coerce')\n ts.index = ts.date\n ts = ts.sort_index()\n print(ts)\n ts2 = ts.copy()[['freq_noRT', 'freq']]\n print(ts2)\n ts2 = ts2.rolling(window=30).mean()\n print(ts2)\n\n \n if ylog == False:\n pass\n\n elif ylog == True:\n ax.set_yscale('log') \n\n if rt == False:\n ax.plot(ts.index, ts['freq_noRT'], alpha = 0.5, color = 'gray')\n ax.plot(ts2.index, ts2['freq_noRT'], alpha = 0.9, color='darkorange') \n \n elif rt ==True:\n ax.plot(ts.index, ts['freq'], alpha = 0.5, color = 'gray')\n ax.plot(ts2.index, ts2['freq'], alpha = 0.9, color='darkorange')\n i+=1\n \n plt.subplots_adjust(left=0.08, right=0.95, top=0.95, bottom=0.1)", "def prop_types(houses:pd.DataFrame) -> None:\n sns.set_style('whitegrid')\n indexNames= houses[houses['PRICE'] >= 3000000].index\n houses= houses.drop(indexNames)\n \n ax= sns.catplot(x= 'PROPERTY_TYPE', y= 'PRICE', kind= 'box', data= houses)\n ax.set_xticklabels(rotation=30)\n plt.tight_layout()\n plt.show()\n \n ax= sns.countplot(x= 'PROPERTY_TYPE', data= houses)\n ax.set_xticklabels(ax.get_xticklabels(), rotation= 30, ha=\"right\", fontsize=9)\n plt.show()", "def plot_genre_and_word_count(df):\n plotting_helper_method('word_count', 'genre', df)\n\n plt.title('Word count pr. genre')\n plt.xlabel('Word Count')\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/word_count_plot')", "def grid_plot_gutenberg(proverbs_list, data, counts, begin_at =1800, end_at = 1950, bin_size = 20): \n \n plt.rcParams.update({\n 'font.size': 9,\n 'axes.titlesize': 8,\n 'axes.labelsize': 14,\n 'xtick.labelsize': 7,\n 'ytick.labelsize': 7,\n 'legend.fontsize': 10,\n })\n \n rows, cols = 4, 4\n fig = plt.figure(figsize=(12, 5.75))\n gs = gridspec.GridSpec(ncols=cols, nrows=rows)\n gs.update(wspace = 0.2, hspace = 0.2) \n \n \n i = 0\n \n fig.text(0.5, 0.02,'Year' , ha='center', fontsize=14)\n fig.text(0.02, 0.5, 'Frequency among all volumes in Gutenberg', va='center', rotation='vertical', fontsize=14)\n \n ts = data.copy()\n ts_bin = ts.groupby(lambda x: (x//bin_size)*bin_size).sum()\n ts_norm = ts_bin.div(ts_bin['num_books'], axis=0)\n ts_norm = ts_norm.fillna(0)\n ts = ts_norm.truncate(before = begin_at, after = end_at)[proverbs_list]\n\n #loop to create each timeseries plot in the grid\n for r in np.arange(0, rows, step=1):\n for c in np.arange(cols):\n\n ts2 = ts[proverbs_list[i]].to_frame()\n\n ax = fig.add_subplot(gs[r, c])\n\n ax.text(0.1,0.9,'\\\"{}\\\"'.format(proverbs_list[i]),horizontalalignment='left', transform=ax.transAxes)\n\n ax.plot(ts2.index, ts2[proverbs_list[i]], alpha = 0.5)\n i+=1\n \n plt.subplots_adjust(left=0.08, right=0.95, top=0.95, bottom=0.1)", "def make_plots():\n prep = DataPrep(filepath='/home/ubuntu/ca_bills_project/data/extra/topic_intro_data_05-23-17-08-23.csv')\n prep.prepare()\n k = 100\n trained_model_file = \"/home/ubuntu/ca_bills_project/data/extra/intro_model_100_topics_rf_10000trees.pkl\"\n with open(trained_model_file) as p:\n model = pickle.load(p)\n print \"loaded model\"\n features = [u'days_since_start', u'session_type', u'party_ALL_DEM', u'party_ALL_REP',\n u'party_BOTH', 'party_COM', u'urgency_No', u'urgency_Yes',\n u'taxlevy_No',\n u'taxlevy_Yes']\n topic_features = [\"topic_\"+str(x) for x in range(k)]\n features += topic_features\n X_train, y_train = prep.subset(features)\n feature_importance(model, features)\n feature_subset_indices = [73, 13]\n gb_file = \"/home/ubuntu/ca_bills_project/data/extra/intro_model_100_topics_gb.pkl\"\n with open(gb_file) as p:\n gb = pickle.load(p)\n make_partial_dependence(gb, X_train, y_train, features, feature_subset_indices)", "def explore_col(s, e):\n \n fig = plt.figure(figsize=(10, 8))\n\n\n sub1 = fig.add_subplot(221) \n sub1.set_title(s +' histogram') \n sub1.hist(df_tr_lbl[s])\n\n sub2 = fig.add_subplot(222)\n sub2.set_title(s +' boxplot')\n sub2.boxplot(df_tr_lbl[s])\n \n #np.random.seed(12345)\n \n if e > 100 or e <= 0:\n select_engines = list(pd.unique(df_tr_lbl.id))\n else:\n select_engines = np.random.choice(range(1,101), e, replace=False)\n \n sub3 = fig.add_subplot(223)\n sub3.set_title('time series: ' + s +' / cycle')\n sub3.set_xlabel('cycle')\n for i in select_engines:\n df = df_tr_lbl[['cycle', s]][df_tr_lbl.id == i]\n sub3.plot(df['cycle'],df[s])\n \n sub4 = fig.add_subplot(224)\n sub4.set_title(\"scatter: \"+ s + \" / ttf (regr label)\")\n sub4.set_xlabel('ttf')\n sub4.scatter(df_tr_lbl['ttf'],df_tr_lbl[s])\n\n\n plt.tight_layout()\n plt.show()", "def course_plots(request, course_code):\n try:\n course = (\n Course.objects.filter(course_filters_pcr, full_code=course_code)\n .order_by(\"-semester\")[:1]\n .select_related(\"topic\", \"topic__most_recent\")\n .get()\n )\n except Course.DoesNotExist:\n raise Http404()\n\n course = course.topic.most_recent\n\n current_semester = get_current_semester()\n\n # Compute set of sections to include in plot data\n filtered_sections = (\n Section.objects.filter(\n extra_metrics_section_filters_pcr(current_semester),\n course__topic_id=course.topic_id,\n )\n .annotate(efficient_semester=F(\"course__semester\"))\n .distinct()\n )\n instructor_ids = request.GET.get(\"instructor_ids\")\n if instructor_ids:\n instructor_ids = [int(id) for id in instructor_ids.split(\",\")]\n filtered_sections = filtered_sections.filter(\n instructors__id__in=instructor_ids,\n ).distinct()\n\n section_map = defaultdict(dict) # a dict mapping semester to section id to section object\n for section in filtered_sections:\n section_map[section.efficient_semester][section.id] = section\n\n (\n avg_demand_plot,\n avg_demand_plot_min_semester,\n recent_demand_plot,\n recent_demand_plot_semester,\n avg_percent_open_plot,\n avg_percent_open_plot_min_semester,\n recent_percent_open_plot,\n recent_percent_open_plot_semester,\n ) = tuple([None] * 8)\n avg_demand_plot_num_semesters, avg_percent_open_plot_num_semesters = (0, 0)\n if section_map:\n status_updates_map = get_status_updates_map(section_map)\n (\n avg_demand_plot,\n avg_demand_plot_min_semester,\n avg_demand_plot_num_semesters,\n recent_demand_plot,\n recent_demand_plot_semester,\n ) = avg_and_recent_demand_plots(section_map, status_updates_map, bin_size=0.005)\n (\n avg_percent_open_plot,\n avg_percent_open_plot_min_semester,\n avg_percent_open_plot_num_semesters,\n recent_percent_open_plot,\n recent_percent_open_plot_semester,\n ) = avg_and_recent_percent_open_plots(section_map, status_updates_map)\n\n current_adp = get_or_create_add_drop_period(current_semester)\n local_tz = gettz(TIME_ZONE)\n\n return Response(\n {\n \"code\": course_code,\n \"current_add_drop_period\": {\n \"start\": current_adp.estimated_start.astimezone(tz=local_tz),\n \"end\": current_adp.estimated_end.astimezone(tz=local_tz),\n },\n \"average_plots\": {\n \"pca_demand_plot_since_semester\": avg_demand_plot_min_semester,\n \"pca_demand_plot_num_semesters\": avg_demand_plot_num_semesters,\n \"pca_demand_plot\": avg_demand_plot,\n \"percent_open_plot_since_semester\": avg_percent_open_plot_min_semester,\n \"percent_open_plot_num_semesters\": avg_percent_open_plot_num_semesters,\n \"percent_open_plot\": avg_percent_open_plot,\n },\n \"recent_plots\": {\n \"pca_demand_plot_since_semester\": recent_demand_plot_semester,\n \"pca_demand_plot_num_semesters\": 1 if recent_demand_plot is not None else 0,\n \"pca_demand_plot\": recent_demand_plot,\n \"percent_open_plot_since_semester\": recent_percent_open_plot_semester,\n \"percent_open_plot_num_semesters\": 1 if recent_demand_plot is not None else 0,\n \"percent_open_plot\": recent_percent_open_plot,\n },\n }\n )", "def summary_source(classes_fold_score_list, classes_periods, classes): \n scores = []\n for idx in range(len(classes_periods)):\n temp = pd.concat([classes_fold_score_list[idx], classes_periods[idx].source], axis=1)\n temp = temp.groupby(['source', 'catalog']).size().unstack(fill_value=0).T\n scores.append(temp)\n score_df = pd.concat(scores, keys=classes)\n score_df.index.set_levels([\"Wrong\", \"Right\", \"Multiply\"], \n level=1,\n inplace=True)\n score_df = (score_df.T.fillna(0))\n dividend = score_df.iloc[:, score_df.columns.get_level_values(1)==\"Right\"].T.droplevel(-1).T\n divisor = score_df.T.groupby(level=0).sum().T.loc[:,[\"RRL\",\"Ceph\",\"LPV\",\"DSCT\",\"EB\"]]\n return dividend.divide(divisor).round(2).mean(axis=1)", "def time_stats(df):", "def visualise_hourly_arrivals_at_each_lab(tests_dataframe):\r\n labs_df = create_dataframe_from_csv('labs.csv')\r\n labs_df = drop_missing_values_in_dataframe(labs_df)\r\n list_of_labs = labs_df['lab_name'].to_list()\r\n for lab_name in list_of_labs:\r\n df = tests_dataframe.loc[tests_dataframe['lab_name'] == lab_name]\r\n df.time_test_arrives_lab = pd.to_datetime(df.time_test_arrives_lab)\r\n df = df.sort_values(by=\"time_test_arrives_lab\")\r\n df = df[['time_test_arrives_lab']]\r\n df = df.reset_index().set_index('time_test_arrives_lab')\r\n df = df.resample('H').count()\r\n df.plot(title = 'hourly arrivals at ' + lab_name)\r\n plt.show()", "def create_course_bars(hist_df, fig, labels):\n colors = [\n \"#60a7ba\",\n \"#f0912d\",\n \"#357025\",\n \"#ba3622\",\n \"#8f33d6\",\n \"#6a4c4d\",\n \"#cf8af3\",\n ]\n all_numbers = []\n\n for index, _ in enumerate(fig[\"layout\"][\"annotations\"]):\n all_numbers.append(float(fig[\"layout\"][\"annotations\"][index][\"text\"]))\n\n for _, idx in enumerate(hist_df.index.unique()):\n row = all_numbers.index(idx)\n show_legend = row == 0\n traces = []\n\n # Calculate subfigure position in figure\n row = (row + 1) / 2\n col = 1 if row.is_integer() else 0\n row = math.ceil(row) - 1\n\n # Calculate dataframe for plot\n task_subset_df = hist_df.loc[idx]\n task_subset_df = task_subset_df.apply(pd.value_counts).T\n task_subset_df = task_subset_df.div(task_subset_df.sum(axis=1), axis=0)\n\n # Handle case if there are only correct answers\n if task_subset_df.shape != (\n 7,\n 2,\n ): # sometimes task_subset_df is in the wrong shape\n if task_subset_df.shape != (\n 7,\n 1,\n ):\n task_subset_df = task_subset_df.T\n\n if \"correct\" in task_subset_df.columns.values:\n task_subset_df[\"incorrect\"] = 0\n\n # Each bar needs a color and a legend entry and will therefore\n # be plotted individually\n for i, color in enumerate(colors):\n trace = go.Bar(\n x=[task_subset_df.index.values[i]],\n y=[task_subset_df.incorrect[i] * 100],\n name=labels[i],\n marker={\"color\": color},\n showlegend=show_legend,\n )\n traces.append(trace)\n\n # All traces build one subfigure\n for trace in traces:\n fig.append_trace(trace, row=row + 1, col=col + 1)\n\n # Figure styling\n fig.update_layout(\n height=400 * (row + 1),\n legend={\n \"orientation\": \"h\",\n \"xanchor\": \"left\",\n \"yanchor\": \"bottom\",\n \"x\": 0.15,\n \"y\": 1.05,\n },\n )\n fig.update_xaxes(showticklabels=False)\n\n # for i in range(0, row + 1):\n fig.update_yaxes(title_text=\"Students\", row=i + 1, col=1)\n return fig", "def draw_num_classes_graphs():\n values = [10, 50, 100, 250, 1000, 4000]\n for num_classes in values:\n print(\"Training model on {} most common classes.\".format(num_classes))\n model = create_pretrained_model(num_classes=num_classes)\n histories = train(model, num_classes, epochs=50)\n run_name = get_run_name(\"{}classes\".format(num_classes))\n save_learning_curves(histories, run_name)\n csv_path = os.path.join(\"plots/\", run_name, \"data.csv\")\n ut.write_csv_dict(histories,\n keys=['loss', 'acc', 'val_loss', 'val_acc'],\n filename=csv_path)", "def preprocess(df):\n \n # drop the following columns - irrelevant now\n DROP_COLUMNS = ['id', 'original_title', 'release_date'\n , 'tmdbId', 'popularity', 'year']\n df.drop(DROP_COLUMNS, axis=1, inplace=True)\n \n # drop all of the language columns\n DROP_COLUMNS = [col for col in df.columns if col[:3]==\"lan\"]\n df.drop(DROP_COLUMNS, axis=1, inplace=True)\n\n # loop through the columns we want to aggregate\n for col_type in [\n \"original_language_\"\n , \"prod_comp_cntry_\"\n , \"prod_comp_names_\"\n , \"writers_\"\n , \"actors_\"\n , \"genres_\"\n , \"director_\"\n ]:\n # create a dictionary of each unique value and its frequency\n val_freq = {}\n for col in df.columns:\n if col.startswith(col_type):\n val_freq[col] = df[col].sum()\n\n # create a dataframe from this dictionary; sort by count\n counts = pd.DataFrame.from_dict(\n val_freq\n , orient='index'\n , columns=['count']\n ).sort_values('count', ascending=False)\n counts['frac'] = counts['count'].apply(lambda x: 100*x / df.shape[0])\n\n # handle special case of production company country\n if col_type == \"prod_comp_cntry_\":\n DROP_COLUMNS = [col for col in counts.index][3:]\n\n # handle special case of directors\n elif col_type == \"director_\":\n DIRECTOR_COLS = [col for col in df.columns\n if col.startswith(\"director_\")\n and col!=\"director_pop\"]\n df['established_director'] = df[DIRECTOR_COLS].max(axis=1)\n DROP_COLUMNS = DIRECTOR_COLS\n\n # handle special case of actors\n elif col_type == \"actors_\":\n ACTORS_COLS = [col for col in df.columns if \"actors\" in col]\n df['num_top_100_actors'] = df[ACTORS_COLS].sum(axis=1)\n DROP_COLUMNS = ACTORS_COLS\n\n # handle all the other cases\n else:\n DROP_COLUMNS = [col for col in counts.query('frac < 2').index]\n\n\n df.drop(DROP_COLUMNS, axis=1, inplace=True)\n \n ##########################################################################\n # adjust the data for inflation\n CPI_tf = df['CPIAUCSL'].max()\n df['budget'] = df[['budget', 'CPIAUCSL']].apply(\n cpi_adjust\n , args=(CPI_tf ,)\n , axis=1\n )\n df['revenue'] = df[['revenue', 'CPIAUCSL']].apply(\n cpi_adjust\n , args=(CPI_tf ,)\n , axis=1\n )\n # no longer need CPI data\n df.drop('CPIAUCSL', axis=1, inplace=True)\n \n ########################################################################## \n # add in useful features about the cast and crew \n df['cast_crew_sum_pop'] = (\n df['director_pop']\n + df['avg_actor_pop']\n + df['avg_writer_pop']\n )\n df['cast_crew_product_pop'] = (\n df['director_pop']\n * df['avg_actor_pop']\n * df['avg_writer_pop']\n )\n df['runtime'].replace(to_replace=0, value=df['runtime'].median(), inplace=True)\n df = df.query('10000 <= revenue').copy()\n df = df.query('100000 <= budget').copy()\n df.drop('sum_actor_pop', axis=1, inplace=True)\n df.drop('min_writer_pop', axis=1, inplace=True)\n\n # code to transform columns\n for col in [\n \"budget\", \"director_pop\", \"avg_writer_pop\"\n , \"max_writer_pop\", \"avg_actor_pop\", \"max_actor_pop\"\n , \"min_actor_pop\", 'cast_crew_sum_pop'\n , 'cast_crew_product_pop'\n ]:\n df['log10_'+col] = df[col].apply(lambda x: math.log10(x))\n df.drop(col, axis=1, inplace=True)\n \n return df", "def createGraph(self):\n self.measurements(45,50,10)\n avg = self.readFile(\"avg.pickle\")\n table = []\n for a in avg:\n table.append((a[0], a[1], a[2], a[3], a[4], \"Boolean\"))\n table.append((a[0], a[1], a[2], a[5], a[6], \"Fractional\"))\n table.append((a[0], a[1], a[2], a[7], a[8], \"Hierarchical\"))\n df = pd.DataFrame(table)\n df.columns = [\"nPages\", \"nCentroids\", \"Time\", \"Mean\", \"Std\", \"Type\"]\n print(df)\n sns.set(style = 'darkgrid')\n sns.lmplot(x = \"nCentroids\", y = \"Mean\", col = \"Type\", hue=\"Type\", data = df)\n #sns.lmplot(x = \"nPages\", y = \"Mean\", col = \"Type\", hue=\"Type\", data = df)\n #sns.scatterplot(x = \"nCentroids\", y = \"Mean\", size = \"Time\", hue = \"Type\", sizes = (20, 200), data = df)\n #sns.scatterplot(x = \"nPages\", y = \"Mean\", size = \"Time\", hue = \"Type\", sizes = (20, 200), data = df)\n plt.show()", "def plot_wordsOverTime(df, col, column_line = None, operation = 'count', title = 'Words over time'):\n\n print('\\n*** INTERACTIVE MODE: HOVER OVER THE GRAPH TO SEE COUNTS FOR EACH YEAR***')\n\n # create a subsett of year and number of projects\n counts = df.groupby([col]).agg(operation)\n counts.reset_index(inplace = True)\n\n # create a column data source to plot in bokeh\n source = ColumnDataSource(counts)\n\n # initialize the plot\n p = figure(plot_width = 1000,\n plot_height = 450,\n title = title)\n\n # plot the trend line\n p.line(x=col, y=column_line,\n line_width=6, source=source)\n\n # set parameters\n p.xgrid.grid_line_color = None\n p.ygrid.grid_line_color = None\n p.background_fill_color = \"AliceBlue\"\n p.title.text_font_size = \"16pt\"\n p.title.text_color = 'MidnightBlue'\n p.xaxis.axis_label_text_font_size = '15pt'\n p.yaxis.axis_label_text_font_size = '15pt'\n\n p.yaxis.axis_label = 'Total number of words'\n p.xaxis.major_label_text_font_size = '12pt'\n\n # create annotation\n box = BoxAnnotation(left=2014.5, right=2016.5,\n line_width=1, line_color='black', line_dash='dashed',\n fill_alpha=0.2, fill_color='orange')\n\n # add annotation to plot\n p.add_layout(box)\n\n # add interactive hover tool\n hover = HoverTool()\n hover.tooltips = [(\"Total number of words \", \"@word_count\"), ('year', '@year')]\n\n hover.mode = 'vline'\n p.add_tools(hover)\n\n # export plots\n _=export_png(p, filename = img_path / 'wordsovertime.png')\n output_file(img_path/'wordsovertime.html')\n\n p.output_backend = \"svg\"\n export_svgs(p, filename=img_path/\"wordsovertime.svg\")\n\n #display plot\n show(p)", "def getFig3Data(df, path):\n\ttmp = pd.DataFrame()\n\t# tmp = tmp.append(df)\n\ttmp = tmp.append(df[df.Location == 'exon'])\n\ttmp = tmp.append(df[df.Location == 'intron'])\n\t# print(df[df.Location == 'exon'].NbpG4rWt)\n\t# print(df[df.Location == 'intron'].NbpG4rWt)\n\tdicoNbTr = countTranscript.getFig3Percent(path)\n\tGlobal = pd.DataFrame()\n\tgroups = tmp.groupby('Class')\n\tfor name, group in groups:\n\t\trow = sumSubTable(group, name)\n\t\trow['Class'] = name\n\t\trow = pd.DataFrame(row, index=[len(Global)+1])\n\t\tGlobal = Global.append(row)\n\t# print(sum(Global.NbpG4rWt))\n\trow = {'Class' : 'Global',\n\t\t\t'nuclG' : sum(Global.nuclG),\n\t\t\t'nuclC' : sum(Global.nuclC),\n\t\t\t'NbpG4rWt' : sum(Global.NbpG4rWt),\n\t\t\t'NbpG4rShuf' : sum(Global.NbpG4rShuf),\n\t\t\t'Tot' : sum(Global.Tot)}\n\trow = pd.DataFrame(row, index=[len(Global)+1])\n\tGlobal = Global.append(row)\n\tGlobal['nbTr'] = Global['Class'].map( dicoNbTr['Tot'] )\n\tGlobal['NbTrpG4Wt'] = Global['Class'].map( dicoNbTr['Wt'] )\n\tGlobal['NbTrpG4Shuf'] = Global['Class'].map( dicoNbTr['Shuf'] )\n\tGlobal['PercentWt'] = Global['NbTrpG4Wt'] / Global['nbTr'] * 100\n\tGlobal['PercentShuf'] = Global['NbTrpG4Shuf'] / Global['nbTr'] * 100\n\tGlobal = computeDensity(Global, 'Segment')\n\treturn Global", "def GetGraphicAverages(diagnostic_cases, diagnostic, weeks,year, n_years):\n t = 1.96\n\n current_year = Year.objects.get(year=year)\n weeks_current_year = weeks.filter(year=current_year)\n year_ob = Year.objects.filter(year__lt=year)\n weeks = weeks.filter(year__in=year_ob)\n\n popu = 0\n\n #cases per diagnostic\n diagnostic_cases_w = diagnostic_cases\n\n #arithmetic average of the weeks / n_years\n averages = [0] * 52\n\n standard_deviations = [0] * 52\n #number of years\n\n #cases per week of the diferent years\n cases_per_weeks = [0] * 52\n\n for i in range(len(averages)):\n\n f = [0]*(n_years)\n \n\n year = 0\n\n y_idx = 0\n for w in range(len(weeks)):\n #print(y)\n if weeks[w].week == i+1:\n \n if year != weeks[w].year: # Esto no pasa nunca\n year = weeks[w].year\n cases = 0\n \n \n for p in diagnostic_cases_w:\n\n if p.week == weeks[w]:\n \n cases += p.cases\n\n f[y_idx ] = cases\n y_idx +=1\n\n averages[i] = np.average(f) #borrar\n\n standard_deviations[i] = np.std(f)\n \n cases = 0\n for week in weeks_current_year:\n if week.week == i+1:\n dia = diagnostic_cases.filter(week=week)\n \n for d in dia:\n\n cases += d.cases\n\n cases_per_weeks[i] = cases \n\n\n #array of class dots for draw the chart of averages\n dots_graphic_averages = []\n #array of class dots for draw the chart of cumulative\n dots_graphic_cumulative = []\n\n\n average_cumulative = 0\n top_rank_cumulative = 0\n cases_acumulative = 0\n lower_rank_cumulative = 0\n\n for i in range(len(standard_deviations)):\n lower_rank = 0\n top_rank = 0\n\n if n_years != 0:\n lower_rank = averages[i] - (t * standard_deviations[i]/ math.sqrt(n_years))\n top_rank = averages[i] + (t * standard_deviations[i] / math.sqrt(n_years))\n if lower_rank < 0:\n lower_rank = 0\n\n # Acumulative dots\n cases_acumulative += cases_per_weeks[i]\n average_cumulative += averages[i]\n if lower_rank >= 0:\n lower_rank_cumulative += lower_rank\n top_rank_cumulative += top_rank\n\n dots_average = DotsGraphicAverage(averages[i],i+1, lower_rank, top_rank,cases_per_weeks[i])\n dots_cumulative = DotsGraphicAverage(average_cumulative,i+1, lower_rank_cumulative, top_rank_cumulative,cases_acumulative)\n dots_graphic_averages.append(dots_average)\n dots_graphic_cumulative.append(dots_cumulative)\n\n\n return dots_graphic_averages, dots_graphic_cumulative", "def k_means_montage(self, df, class_col):\n\n n_classes = df[class_col].nunique()\n for cl in sorted(df[class_col].unique()):\n montage_df = df[df[class_col] == cl].path\n imgs = [np.array(Image.open(img)) for img in montage_df]\n imgs = np.stack(imgs)\n plt.figure(figsize=(12, 15))\n plt.imshow(montage(imgs, multichannel=True).astype(np.uint8))\n plt.title(f\"Montage for Class{cl}\")", "def meetup_groups_dynamic(growth_df):\n\n def convert_to_percent(row):\n total_groups = row.sum()\n return row.apply(lambda x: x * 100 / total_groups)\n\n return growth_df.apply(convert_to_percent, axis=1)", "def trip_duration_stats(df):\r\n\r\n print('\\nCalculating Trip Duration...\\n')\r\n start_time = time.time()\r\n # TO DO: display total travel time\r\n print(df.groupby(['month'])['Trip Duration'].sum())\r\n print(df.groupby(['day_of_week'])['Trip Duration'].sum())\r\n\r\n # TO DO: display mean travel time\r\n print(df.groupby(['month'])['Trip Duration'].mean())\r\n print(df.groupby(['day_of_week'])['Trip Duration'].mean())\r\n\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def plot_genre_and_avg_word_len(df):\n plotting_helper_method('avg_word_len_nm', 'genre', df)\n plt.xlim(0, 1)\n\n plt.title('Normalized Average Word Length pr. genre')\n plt.xlabel('Normalized Average Word Length')\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/average_word_length')", "def plot_genre_and_normalized_word_count(df):\n plotting_helper_method('word_count_nm', 'genre', df)\n plt.title('Normalized Word count pr. genre')\n plt.xlabel('Normalized Word Count')\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/normalized_word_count_plot')", "def plot_timecourses(timecourses, output_file):\n\tpyplot.plot(timecourses)\n\tpyplot.savefig(output_file)", "def grouping_cols(df, cat_percentage = 0.05, checking_itr = 10):", "def plays(df):\n tp = (\n df.query('play_type in @OFFENSE_PLAY_TYPES')\n .pivot_table(index=['game_id', 'posteam'], \n columns=['play_type'], \n values=['play_id'], \n aggfunc='count',\n fill_value=0)\n .pipe(lambda x: x.set_axis([f'{b}_plays' for a, b in x.columns], axis=1, inplace=False))\n .reset_index()\n ) \n tp['tot_plays'] = tp.loc[:, [c for c in tp.columns if '_plays' in c]].sum(axis=1)\n tp['run_pct'] = tp['run_plays'] / (tp['run_plays'] + tp['pass_plays'])\n tp['pass_pct'] = tp['pass_plays'] / (tp['run_plays'] + tp['pass_plays'])\n return tp.join(time_of_possession(df), on=['game_id', 'posteam'], how='left')", "def plot_ttc(data_frame):\n figtt, axtt = plot_var(\n data_frame=data_frame,\n x_var=\"flow\",\n y_var=\"timetC\",\n label_var=\"mpr\",\n pivot=\"distance\",\n x_label=\"Flow [veh/m]\",\n y_label=\"Time To Collision [s]\",\n t_label=\"Distance [m]: \",\n legends=[r\"0 \\%\", r\"10 \\%\", r\"20 \\%\", r\"30 \\%\", r\"40 \\%\"],\n fnt_size={\"fontsize\": 16},\n )\n return figtt, axtt", "def profil_consommation(params):\n N = 50; min_val = 120; max_val = 200;\n df = pd.DataFrame( data = np.random.rand(N, 3), columns = [\"a_i\",\"a_j\",\"a_k\"] );\n# df = pd.DataFrame( data = np.random.uniform(low=min_val, high=max_val, size=(N, 3)),\\\n# columns = [\"a_i\",\"a_j\",\"a_k\"] );\n df[\"a_j\"] = 0.3 + df[\"a_i\"] *( 1 + np.random.random(N,)*df[\"a_i\"]);\n df[\"a_k\"] = df[\"a_k\"] ; \n df[\"a_i\"] = df[\"a_i\"] + 0.6;\n df[\"a_j\"] = df[\"a_j\"] + 0.6;\n \n fig, ax = plt.subplots()\n df.plot( ax = ax);\n ax.set_yticklabels(['{:3.1f}'.format(x*100) \\\n for x in ax.get_yticks()]);\n ax.legend( df.columns, loc='upper center', bbox_to_anchor=(0.5, 1.00),\\\n ncol=3, fancybox=True, shadow=True); \n plt.savefig(params[\"path_save\"]+\\\n \"profilDeConsommationSeriesTemporelles.eps\",\\\n dpi= 190)", "def plotProgression(self, df_X, repl_strs, time_strs,\n title=\"\", ax=None, label_fontsize=16, is_plot=True, figsize=(10, 10)):\n def deDup(lst):\n seen = set()\n seen_add = seen.add\n return [x for x in lst if not (x in seen or seen_add(x))]\n # \n if ax is None:\n _, ax = plt.subplots(1, figsize=figsize)\n if self._class_names is None:\n class_dct = {k: str(k) for k in self._ser_y.values}\n class_names = list(class_dct.values())\n else:\n class_names = list(self._class_names)\n class_names.insert(0, \"\")\n df_prediction = self.predict(df_X)\n repl_dct = {}\n for repl_str in repl_strs:\n indices = list(df_prediction.index)\n bools = [repl_str in i for i in indices]\n if any(bools):\n indices = [i for i in df_prediction.index if repl_str in i]\n y_vals = np.repeat(np.nan, len(time_strs))\n # Select the dominate class from the prediction\n for idx in indices:\n time_str = _selStrFromList(idx, time_strs)\n pos = time_strs.index(time_str)\n row = df_prediction.loc[idx, :]\n val = row.max()\n # Account for the blank row in the plot\n y_val = 1 + [c for c in row.index if row[c] == val][0]\n y_vals[pos] = y_val\n repl_dct[repl_str] = y_vals\n # Construct plot, starting with longest first\n for y_vals in repl_dct.values():\n ax.plot(time_strs, y_vals, marker=\"o\")\n ax.set_ylim(0, len(class_names))\n yticks = ax.get_yticklabels()[0]\n labels = list(class_names)\n ax.set_xticklabels(time_strs, rotation=90, fontsize=label_fontsize)\n ax.set_yticklabels(labels, fontsize=label_fontsize)\n ax.legend(repl_strs, bbox_to_anchor=(1.0, 1), loc=\"upper right\")\n fontsize = label_fontsize + 2\n plt.title(title, fontsize=fontsize)\n if is_plot:\n plt.show()", "def plotclass_pdf(pp, s, t=None):\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n\n try:\n X4, Y4 = zip(*s.mean_cln_4)\n X6, Y6 = zip(*s.mean_cln_6)\n ax1.plot(X4, Y4, 'bo', color=\"blue\", alpha=0.4, label=\"IPv4\")\n ax1.plot(X6, Y6, 'bo', color=\"red\", alpha=0.4, label=\"IPv6\")\n except Exception as e:\n print(\"Plotting failed for host {} with error {}\".format(s.domain, e))\n return\n\n try:\n ax1.plot(s.xs4, s.spl_arr4, linewidth=4, color=\"blue\", alpha=0.4)\n ax1.plot(s.xs6, s.spl_arr6, linewidth=4, color=\"red\", alpha=0.4)\n except Exception as e:\n print(\"Not plotting host {} due to exception {}\".format(s.domain, e))\n return\n\n plt.legend(loc='lower right')\n plt.title('Host: {} ({} / {})\\n Decision: {}'.format(\n s.domain, s.ip4, s.ip6, s.dec), fontsize=10)\n plt.xlabel('measurement time (h)')\n plt.ylabel('observed offset (msec)')\n ticks = ax1.get_xticks() / 3600\n ticks = [round(t, 1) for t in ticks]\n ax1.set_xticklabels(ticks)\n # saving all in PDF\n pp.savefig(fig)\n tikz_save(\"{}.{}-{}.tex\".format(t, s.domain, hash((s.ip4, s.ip6))))\n plt.close(fig)", "def boxplots_of_classes(self, title:str, y_axis: str=\"mean activity over all neurons\", second_path: str=r'D:\\Dataframes\\double_skip_mean', show:bool=True, dest_path:str=None, show_outliers: bool=False):\n data = []\n counter = 0\n for pop in self.populations:\n df = pd.read_csv(self.path + '\\\\{}.csv'.format(pop))\n trials = df['label'].tolist()\n values = df['Component 1'].tolist()\n response = df['response'].tolist()\n \n for i in range(len(response)):\n # Removing day 4 trials\n if eval(trials[i])[0] != 4:\n data.append([response[i], values[i], \"Transition over 1 day\"])\n\n df = pd.DataFrame(data, columns = ['Labels', y_axis, \"Transition\"])\n\n self.__box_plot(df, \"Labels\", y_axis, \"Transition\", title, show=show, dest_path=dest_path, showfliers=show_outliers, order = [\"0->0\", \"0->1\", \"1->0\", \"1->1\"])", "def visualizations():\r\n raise NotImplementedError\r\n # df = pandas.read_csv('accidents_by_hour.csv', index_col=0, header=0)\r\n # plt.plot(0, 0, data=df)\r\n # plt.show()\r", "def visualize(houses:pd.DataFrame) -> None:\n #price_distribution(houses)\n #prop_types(houses)\n #zip_code(houses)\n #year_built(houses)\n #bed_bath(houses)\n return", "def plot(self, context=None):\n\n response = requests.get(self.url).content\n table = pd.read_html(response, attrs={\"id\": \"main_table_countries_today\"})\n df = table[0].fillna(0)\n # df.drop(df.index[0], inplace=True) # World\n df.drop([\"ActiveCases\", 'Serious,Critical', 'Serious,Critical', 'Deaths/1M pop', 'Tests/ 1M pop'], axis=1, inplace=True)\n df.drop(df.columns[6], axis=1, inplace=True)\n\n if len(context) > 3:\n context = context.lower().capitalize()\n df = df.loc[df[\"Country,Other\"] == context]\n if 4 > len(context) > 1:\n context = context.upper()\n df = df.loc[df[\"Country,Other\"] == context]\n if len(context) <= 1:\n df = df[1:]\n\n C_Names = df[\"Country,Other\"].head(n=10).values.tolist()\n T_Cases = df[\"TotalCases\"].head(n=10).values.tolist()\n # N_Cases = df[\"NewCases\"].head(n=10).values.tolist() # not plotted\n T_Deaths = df[\"TotalDeaths\"].head(n=10).values.tolist()\n # N_Deaths = df[\"NewDeaths\"].head(n=10).values.tolist() # not plotted\n T_Recovered = df[\"TotalRecovered\"].head(n=10).values.tolist()\n T_Tests = df[\"TotalTests\"].head(n=10).values.tolist()\n\n x = np.arange(len(C_Names))\n width = 0.20\n\n fig, ax = plt.subplots()\n\n ax.bar(x - 0.30, T_Cases, width, label='TotalCases', color=\"Blue\")\n ax.bar(x - 0.10, T_Deaths, width, label='TotalDeaths', color=\"Red\")\n ax.bar(x + 0.10, T_Tests, width, label='TotalTests', color=\"Green\")\n ax.bar(x + 0.30, T_Recovered, width, label='TotalRecovered', color=\"Orange\")\n\n if len(context) > 1:\n ax.set_title(\"{}'s Situation\".format(context))\n else:\n ax.set_title(\"World's Top10 Situation\")\n\n ax.set_xticks(x)\n ax.set_xticklabels(C_Names)\n ax.legend()\n plt.ticklabel_format(style='plain', axis=\"y\")\n fig.set_size_inches(18.5, 10.5)\n fig.tight_layout()\n plt.grid()\n\n if len(context) > 1:\n font1 = {'family': 'serif',\n 'color': 'blue',\n 'weight': 'bold',\n 'size': 20}\n font2 = {'family': 'serif',\n 'color': 'red',\n 'weight': 'normal',\n 'size': 20}\n font3 = {'family': 'serif',\n 'color': 'green',\n 'weight': 'normal',\n 'size': 20}\n font4 = {'family': 'serif',\n 'color': 'orange',\n 'weight': 'normal',\n 'size': 20}\n\n # bbox=dict(facecolor='black', alpha=0.5)\n plt.text(0.863, 0.67, \"Total Cases:\\n{:,}\".format(int(T_Cases[0])), fontdict=font1, transform=ax.transAxes)\n plt.text(0.863, 0.57, \"Total Deaths:\\n{:,}\".format(int(T_Deaths[0])), fontdict=font2, transform=ax.transAxes)\n plt.text(0.863, 0.47, \"Total Tests:\\n{:,}\".format(int(T_Tests[0])), fontdict=font3, transform=ax.transAxes)\n plt.text(0.863, 0.37, \"Total Recovered:\\n{:,}\".format(int(T_Recovered[0])), fontdict=font4, transform=ax.transAxes)\n\n # plt.savefig('corona.png') # Uncomment it to save the figure\n plt.show()", "def visualise_number_of_tests_simultaneously_processed_at_each_lab(tests_dataframe):\r\n labs_df = create_dataframe_from_csv('labs.csv')\r\n labs_df = drop_missing_values_in_dataframe(labs_df)\r\n list_of_labs = labs_df['lab_name'].to_list()\r\n for index in range(len(list_of_labs)):\r\n df = tests_dataframe.loc[tests_dataframe['lab_name'] == list_of_labs[index]]\r\n df = merge_arrival_and_completion_time(df)\r\n df.plot.line(x = 'time', y = 'server_size', rot=70, title=\"Visualise the number of tests being simultaneously processed at \" + list_of_labs[index])\r\n plt.show()", "def eda_plot():\n\n df1 = pd.read_csv('eda_malware.csv')\n df2 = pd.read_csv('eda_random.csv')\n df3 = pd.read_csv('eda_popular.csv')\n\n df = pd.concat([df1, df2, df3], ignore_index=True)\n df['label'].replace([0,1],['Benign','Malware'],inplace=True)\n\n colors = ['#EAB6AB','#D9E6F3','#CBAACB','#CCE2CB', '#FFAEA5', '#A2E1DB', '#97C1A9']\n # b vs. m: node types counts\n f1 = pd.crosstab(df['label'], df['node_types_counts'])\n\n f1 = pd.DataFrame({\"3 Types\": [1, 4], \"4 Types\": [1, 407], \"5 Types\": [245, 5768], \"6 Types\": [39, 1113], \"7 Types\": [83, 487], \"8 Types\": [154, 368], \"9 Types\": [103, 286]}).rename(index={0:'Benign', 1:'Malware'})\n f1.plot(kind='bar', color=colors)\n fig = plt.gcf()\n plt.legend(loc='upper left')\n plt.title('Benign vs. Malicious: Number of Node Types')\n fig.savefig('bv_node_types.png')\n\n # for a better look, limit type 5 malware to 2k counts only\n f1 = pd.DataFrame({\"3 Types\": [1, 4], \"4 Types\": [1, 407], \"5 Types\": [245, 2000], \"6 Types\": [39, 1113], \"7 Types\": [83, 487], \"8 Types\": [154, 368], \"9 Types\": [103, 286]}).rename(index={0:'Benign', 1:'Malware'})\n f1.plot(kind='bar', color=colors)\n fig = plt.gcf()\n plt.legend(loc='upper left')\n plt.title('Benign vs. Malicious: Number of Node Types')\n fig.savefig('bv_node_types1.png')\n\n # node types\n # for malware: extract node types info for node types counts > 5, and sum up each types counts\n node_types = df[(df['label'] == 'Malware') & (df['node_types_counts'] >= 5)]['node_types'] #series\n lst = [ast.literal_eval(s) for s in node_types]\n\n c = Counter()\n for d in lst:\n c.update(d)\n\n df_nt = pd.DataFrame(dict(c).items(), columns=['node_types', 'counts'])\n df_nt = df_nt.sort_values(by=['counts'])\n\n sizes = [215060, 2823059, 3135725, 5641356, 10679709, 16547701]\n labels = ['Others', 'static,Node', 'public,static,Node', 'Node', 'external,Node', 'public,Node']\n\n colors = ['#EAB6AB','#D9E6F3','#CBAACB','#CCE2CB', '#FFAEA5', '#A2E1DB']\n\n fig1, ax1 = plt.subplots(figsize=(7, 7))\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%',\n shadow=False, startangle=90, colors=colors)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.title('Malware: Top Node Types and Its Counts', y=1.05)\n\n plt.show()\n fig1.savefig('counts_pie_m.png')\n\n # for benign: extract node types info for node types counts, and sum up each types counts\n node_types = df[(df['label'] == 'Benign')]['node_types'] #series\n lst = [ast.literal_eval(s) for s in node_types]\n\n c = Counter()\n for d in lst:\n c.update(d)\n\n df_nt = pd.DataFrame(dict(c).items(), columns=['node_types', 'counts'])\n df_nt = df_nt.sort_values(by=['counts'])\n\n sizes = [77967, 2892033, 2964924, 5287258, 6478196, 20364339]\n labels = ['Others', 'staticNode', 'public,staticNode', 'external,Node', 'Node', 'public,Node']\n\n colors = ['#EAB6AB','#D9E6F3','#CBAACB','#CCE2CB', '#FFAEA5', '#A2E1DB']\n\n fig1, ax1 = plt.subplots(figsize=(7, 7))\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%',\n shadow=False, startangle=90, colors=colors)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.title('Benign: Top Node Types and Its Counts', y=1.05)\n\n plt.show()\n fig1.savefig('counts_pie_b.png')\n\n # benign vs malware: counts\n sizes = [8435, 802]\n labels = ['Benign', 'Malware']\n\n colors = ['#EAB6AB','#D9E6F3']\n\n fig1, ax1 = plt.subplots(figsize=(7, 7))\n ax1.pie(sizes, labels=labels, autopct='%1.1f%%',\n shadow=False, startangle=90, colors=colors)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n plt.title('Number of Benign vs. Malware', y=1.05)\n\n plt.show()\n fig1.savefig('bm_counts.png')\n\n # number of edges vs number of nodes\n groups = df.groupby('label')\n colors = ['#FFAEA5', '#A2E1DB']\n\n # Plot\n fig, ax = plt.subplots()\n ax.margins(0.05) # Optional, just adds 5% padding to the autoscaling\n for name, group in groups:\n if name == 'Benign':\n c = colors[0]\n else:\n c = colors[1]\n ax.plot(group.number_edges, group.number_nodes, marker='o', linestyle='', ms=4, label=name, c=c)\n ax.legend()\n ax.set_xlabel('Number of Edges')\n ax.set_ylabel('Number of Nodes')\n ax.set_title('Benign & Malware: Number of Edges vs. Number of Nodes', y=1.05)\n\n plt.show()\n fig.savefig('bm_edges_nodes.png')", "def compare_plot_instances(data_causal):\n col_names = data_causal.columns.values # get the columns' names\n dimension = 2 # TODO: figure out better way to organize plots by location\n\n fig = plt.figure()\n i = 1\n for cond in col_names:\n ax = fig.add_subplot(len(col_names)/dimension, dimension, i)\n df_compare = data_causal.groupby(cond)[cond].count() # displays num instances assigned to each condition\n ax = df_compare.plot(kind='bar', title=cond)\n ax.set_xlabel(cond)\n ax.set_ylabel(\"count instances\")\n i += 1\n fig.tight_layout()\n plt.show()", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n \n #adding histogram of times\n plt.hist(df['Start Time'].dt.hour, bins='auto', edgecolor='black')\n plt.title('Histogram of Travel Frequency by Hour')\n plt.xlabel('Hour of the Day')\n plt.ylabel('Count of Trips')\n plt.axis('tight')\n plt.grid()\n plt.show()\n \n # display the most common month\n popular_month = df['month'].mode()[0]\n print('Most Popular Month: \\n',cal.month_name[popular_month])\n\n # display the most common day of week\n popular_day = df['day'].mode()[0]\n print('Most Popular Day: \\n',popular_day )\n \n # display the most common start hour\n popular_hour = df['hour'].mode()[0]\n print('Most Popular Hour: \\n',popular_hour )\n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def exercise_1(self):\n gdp = self.gdp\n phones = self.phones \n percent_literate = self.percent_literate\n # print(len(gdp), len(phones),len(percent_literate))\n print(type(self.percent_literate[1]))\n print((percent_literate[1]))\n\n # Create scatter plot with GDP on the x-axis and number of phones on the y-axis\n sns.scatterplot(x = gdp, y = phones)\n plt.show()\n\n # Change this scatter plot to have percent literate on the y-axis\n # sns.scatterplot(x=gdp, y=percent_literate) \n # plt.show()", "def run_pca(df, cols=None): \n import numpy as np\n import pandas as pd\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n from sklearn.preprocessing import StandardScaler\n from sklearn.decomposition import PCA\n import mpld3\n\n # Define and markers to use for different categories\n groups_dict = {(u'D', 0):('Germany, unregulated', 'g', 'o'),\n (u'N', 0):('Norway, unregulated', 'b', 'o'),\n (u'D', 1):('Germany, regulated', 'g', '^'),\n (u'N', 1):('Norway, regulated', 'b', '^')}\n \n # Extract cols of interest\n cats = df[['country', 'regulated']]\n\n if cols:\n df = df[cols].astype(float)\n\n # Standardise the feature data\n feat_std = StandardScaler().fit_transform(df)\n\n # Setup PCA. Initially, choose to keep ALL components\n pca = PCA()\n\n # Fit model\n pca.fit(feat_std)\n\n # Get explained variances (in %)\n var_exp = 100*pca.explained_variance_ratio_\n cum_exp = np.cumsum(var_exp)\n\n # Get eigenvalues\n cov_mat = np.cov(feat_std.T)\n eig_vals, eig_vecs = np.linalg.eig(cov_mat)\n\n # Get number of EVs > 1 (Kaiser-Guttman criterion)\n # and print summary\n n_kgc = (eig_vals > 1).sum()\n print 'Variance explained by first %s PCs (%%):\\n' % n_kgc\n print var_exp[:n_kgc]\n print '\\nTotal: %.2f%%' % var_exp[:n_kgc].sum()\n \n # Plot\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(16, 6))\n \n # Explained variance\n axes[0].bar(range(1, len(var_exp)+1), var_exp, \n align='center', label='Individual components')\n axes[0].plot(range(1, len(cum_exp)+1), cum_exp, \n 'r-o', label='Cumulative')\n axes[0].set_xlabel('Principal component')\n axes[0].set_ylabel('Variance explained (%)')\n axes[0].legend(loc='center right')\n \n # Eigenvalues\n axes[1].plot(range(1, len(eig_vals)+1), np.sort(eig_vals)[::-1], \n 'r-o', label='Eigenvalues')\n axes[1].axhline(1, c='k', ls='-', label='Kaiser-Guttman threshold')\n axes[1].set_xlabel('Principal component')\n axes[1].set_ylabel('Eigenvalue')\n axes[1].legend(loc='upper right') \n \n # PC loadings\n loads = pd.DataFrame(data=pca.components_, \n columns=df.columns,\n index=range(1, pca.components_.shape[0]+1)).T\n\n # Project into 2 and 3 components\n fig = plt.figure(figsize=(16, 6))\n \n # Plot 2 components\n ax = fig.add_subplot(1, 2, 1)\n \n # Refit the PCA, this time specifying 2 components\n # and transforming the result\n feat_reduced = PCA(n_components=2).fit_transform(feat_std)\n \n # Build df \n data = pd.DataFrame({'PC1':feat_reduced[:, 0],\n 'PC2':feat_reduced[:, 1],\n 'country':cats['country'],\n 'regulated':cats['regulated']}) \n\n groups = data.groupby(['country', 'regulated'])\n \n # Plot\n for name, group in groups:\n ax.scatter(group['PC1'], group['PC2'], s=60,\n label=groups_dict[name][0],\n c=groups_dict[name][1],\n marker=groups_dict[name][2])\n \n ax.set_xlabel('First principal component')\n ax.set_ylabel('Second principal component')\n ax.set_title('First two PCA directions')\n \n # Plot 3 components\n ax = fig.add_subplot(1, 2, 2, projection='3d', \n elev=-150, azim=135)\n\n # Refit the PCA, this time specifying 3 components\n # and transforming the result\n feat_reduced = PCA(n_components=3).fit_transform(feat_std)\n\n # Build df with colours\n data = pd.DataFrame({'PC1':feat_reduced[:, 0],\n 'PC2':feat_reduced[:, 1],\n 'PC3':feat_reduced[:, 2],\n 'country':cats['country'],\n 'regulated':cats['regulated']}) \n \n groups = data.groupby(['country', 'regulated'])\n \n # Plot\n for name, group in groups:\n ax.scatter(group['PC1'], group['PC2'], group['PC3'],\n label=groups_dict[name][0],\n c=groups_dict[name][1],\n marker=groups_dict[name][2],\n s=60)\n \n ax.set_title('First three PCA directions')\n ax.set_xlabel('First principal component')\n ax.set_ylabel('Second principal component')\n ax.set_zlabel('Third principal component')\n ax.legend(bbox_to_anchor=(0.15, -0.1), frameon=True)\n plt.show()\n\n return loads", "def train_age_count():\n\n import numpy as np\n import pandas as pd\n import matplotlib.pyplot as plt\n import itertools\n\n train = pd.read_csv('./data/train.csv')\n color_labels = pd.read_csv('./data/color_labels.csv')\n state_labels = pd.read_csv('./data/state_labels.csv')\n\n # Convert age from months to years\n train.loc[train['Age'] > -1, 'Age'] = (train['Age']//12)\n \n # Divide by dog (Type = 1) and cat (Type = 2)\n dog_df = train.loc[train['Type'] == 1, ['State','Type', 'Age', 'AdoptionSpeed']]\n cat_df = train.loc[train['Type'] == 2, ['State','Type', 'Age', 'AdoptionSpeed']]\n \n dog_max_age = max(dog_df.loc[:, 'Age'])\n dog_min_age = min(dog_df.loc[:, 'Age'])\n \n cat_max_age = max(cat_df.loc[:, 'Age'])\n cat_min_age = min(cat_df.loc[:, 'Age'])\n \n dog_age_labels = []\n dog_count = []\n \n cat_age_labels = []\n cat_count = []\n \n # Find dog count for each age\n for i in range(dog_min_age, dog_max_age + 1) :\n count = (dog_df.Age == i).sum()\n if(count > 0) :\n dog_count.append(count)\n dog_age_labels.append(i)\n\n # Find cat count for each age\n for i in range(cat_min_age, cat_max_age + 1) :\n count = (cat_df.Age == i).sum()\n if(count > 0) :\n cat_count.append(count)\n cat_age_labels.append(i)\n \n # Plot pie charts\n plt.figure()\n plt.pie(dog_count, labels = dog_age_labels, startangle=90, autopct='%.1f%%')\n plt.title('Count of Dogs at Different Ages')\n plt.show()\n \n plt.figure()\n plt.pie(cat_count, labels = cat_age_labels, startangle=90, autopct='%.1f%%')\n plt.title('Count of Cats at Different Ages')\n plt.show()\n \n \n \n # Plot bar graphs\n \n plt.figure(num = None, figsize=(6,4),dpi=80, facecolor = 'w', edgecolor='k')\n index = np.arange(len(dog_age_labels))\n \n index = index[0:13]\n dog_age_labels = dog_age_labels[0:13]\n dog_count = dog_count[0:13]\n \n plt.bar(index, dog_count)\n plt.xlabel('Age in Years')\n plt.xticks(index, dog_age_labels)\n plt.ylabel('Count')\n plt.title('Count of Dogs at Different Ages')\n plt.savefig('dogAgeCount.png', bbox_inches='tight')\n \n plt.figure(num = None, figsize=(6,4),dpi=80, facecolor = 'w', edgecolor='k')\n index = np.arange(len(cat_age_labels))\n \n index = index[0:11]\n cat_age_labels = cat_age_labels[0:11]\n cat_count = cat_count[0:11]\n \n plt.bar(index, cat_count)\n plt.xlabel('Age in Years')\n plt.xticks(index, cat_age_labels)\n plt.ylabel('Count')\n plt.title('Count of Cats at Different Ages')\n plt.savefig('catAgeCount.png', bbox_inches='tight')\n plt.show()", "def calc_class_ratio(da):\n\n # list all class codes in dataset\n list_classes = (np.unique(da, return_counts=False)).tolist()\n\n # create empty dataframe & dictionary\n ratio_table = pd.DataFrame(data=None, columns=list_classes)\n date_line = {}\n\n # count all pixels, should be consistent\n total_pix = int(np.sum(da.isel(time=1)))\n\n # iterate through each year in dataset\n for i in range(0, len(da.time)):\n date = str(da.time[i].data)[0:10]\n\n # for each year iterate though each present class number\n # and count pixels\n for n in list_classes:\n number_of_pixles = int(np.sum(da.isel(time=i) == n))\n percentage = number_of_pixles / total_pix * 100\n date_line[n] = percentage\n\n # add each year's counts to dataframe\n ratio_table.loc[date] = date_line\n\n return ratio_table", "def generate_pca(X, y, cols, n_components, **kwargs):\n\n pca = PCA(n_components, **kwargs)\n pca_result = pca.fit_transform(X)\n pca_df = pd.DataFrame(pca_result, columns=cols, index=X.index)\n pca_df['label'] = y\n pca_plot = ggplot(pca_df, aes(x=\"PCA-1\", y=\"PCA-2\", color='label') ) + geom_point(size=100,alpha=0.8) + ggtitle(\"First and Second Principal Components colored by class\")\n return pca_plot", "def time_stats(df):\n\n print('\\nDisplaying the statistics on the most frequent times of travel...\\n')\n start_time = time.time()\n\n # display the most common month\n most_common_month = df['Month'].mode()[0]\n print('The month with the most travels for the selected filters is: ' +\n str(months[most_common_month-1]).title() + '.')\n\n # display the most common day of week\n most_common_day = df['day_of_week'].mode()[0]\n print('The most common day of the week for the selected filters is: ' +\n str(most_common_day) + '.')\n\n # display the most common start hour\n most_common_hour = df['Start Hour'].mode()[0]\n print('The most common start hour is for the selected filters is: ' +\n str(most_common_hour) + '.')\n\n print(\"\\nWe took {} seconds to complete this.\".format((time.time() - start_time)))\n print('-'*40)", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # Display the most common month\n month_mode = NUM_TO_MONTH[df['start_month'].mode()[0]]\n print(\"The most common month has been \" + month_mode + \".\")\n\n # Display the most common day of week\n weekday_mode = NUM_TO_WEEKDAY[df['day_of_week'].mode()[0]]\n print(\"The most common day of the week has been \" + weekday_mode + \".\")\n\n # Display the most common start hour\n start_hour_mode = [df['start_hour'].mode()[0]][0]\n print(\"The most common hour has been at \" + str(start_hour_mode) + \" hours.\")\n\n # Display the most common day\n sd_df = df[['start_year', 'start_month','start_day']]\n sd_df = sd_df.groupby(['start_day', 'start_month', 'start_year']).size().reset_index(name='counts')\n sd_df = sd_df.sort_values(by = ['counts','start_year','start_month','start_day'], ascending = [False, False, False, False ])\n\n cmonth, cday, cyear, ncounts = sd_df['start_month'].values[0], sd_df['start_day'].values[0], sd_df['start_year'].values[0], sd_df['counts'].values[0]\n print(\"The most common day has been on \" + str(cday) + \" \" + NUM_TO_MONTH[cmonth] + \" \" + str(cyear) + \", having a total of \" + str(ncounts)+ \" bike rents.\")\n\n #Time controlling\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def plotting(dataframe, prod_num):\n fig, axs = plt.subplots(2, sharex=True)\n axs[0].plot(dataframe['STU'])\n axs[1].plot(dataframe['STU'].diff().dropna())\n axs[0].set_title(\"Time Series of Product\" + f\"_{prod_num}\")\n axs[1].set_title(\"Differenced Time Series of Product\" + f\"_{prod_num}\")\n plt.savefig(\"Time Series of Product\" + f\"_{prod_num}\" + \".pdf\")", "def class_proportions(histogram, years, columns, year_header=None):\r\n\r\n wh = None\r\n\r\n if histogram is not None:\r\n histogram_mask = histogram[year_header].isin(years)\r\n wh = (histogram[histogram_mask].loc[:, columns].sum() /\r\n histogram[histogram_mask].loc[:, columns].sum().sum()).values\r\n return wh", "def plot_work_trajectories(pdf, work, title=\"\"):\n plt.figure(figsize=(12, 8))\n\n nrows = 2\n ncols = 6\n workcols = 2\n for (row, direction) in enumerate(['delete', 'insert']):\n #\n # Plot work vs step\n #\n\n col = 0\n plt.subplot2grid((nrows,ncols), (row, col), colspan=(ncols-workcols))\n\n # Plot average work distribution in think solid line\n plt.plot(work[direction].mean(0), 'k-', linewidth=1.0, alpha=1.0)\n # Plot bundle of work trajectories in transparent lines\n plt.plot(work[direction].T, 'k-', linewidth=0.5, alpha=0.3)\n # Adjust axes to eliminate large-magnitude outliers (keep 98% of data in-range)\n workvals = np.ravel(np.abs(work[direction]))\n worklim = np.percentile(workvals, 98)\n nsteps = work[direction].shape[1]\n plt.axis([0, nsteps, -worklim, +worklim])\n # Label plot\n if row == 1: plt.xlabel('steps')\n plt.ylabel('work / kT')\n plt.title(\"%s NCMC in environment '%s' : %s\" % (title, envname, direction))\n plt.legend(['average work', 'NCMC attempts'])\n\n #\n # Plot work histogram\n #\n\n col = ncols - workcols\n plt.subplot2grid((nrows,ncols), (row, col), colspan=workcols)\n\n # Plot average work distribution in think solid line\n #nbins = 40\n workvals = work[direction][:-1,-1]\n #plt.hist(workvals, nbins)\n if workvals.std() != 0.0:\n sns.distplot(workvals, rug=True)\n else:\n print('workvals has stddev of zero')\n print(workvals)\n # Adjust axes to eliminate large-magnitude outliers (keep 98% of data in-range)\n #worklim = np.percentile(workvals, 98)\n #oldaxis = plt.axis()\n #plt.axis([-worklim, +worklim, 0, oldaxis[3]])\n # Label plot\n if row == 1: plt.xlabel('work / kT')\n plt.title(\"total %s work\" % direction)\n\n pdf.savefig() # saves the current figure into a pdf page\n plt.close()", "def compute_features(input: str, output: str):\n co.nb.matplotlib_inline()\n\n df = pd.read_csv(input)\n\n # Show proportion of customers exited vs retained\n labels = 'Exited', 'Retained'\n sizes = [df.Exited[df['Exited'] == 1].count(), df.Exited[df['Exited'] == 0].count()]\n explode = (0, 0.1)\n fig1, ax1 = plt.subplots(figsize=(5, 4))\n ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',\n shadow=True, startangle=90)\n ax1.axis('equal')\n plt.title(\"Proportion of customers churned vs retained\", size=10)\n plt.show()\n\n # Drop meaningless index columns, as well as surname which would likely be\n # profiling.\n df.drop([\"RowNumber\", \"CustomerId\", \"Surname\"], axis=1, inplace=True)\n\n # Normalize balance by salary, and tenure and credit score by age.\n df[\"BalanceSalaryRatio\"] = df.Balance / df.EstimatedSalary\n df[\"TenureByAge\"] = df.Tenure / df.Age\n df[\"CreditScoreGivenAge\"] = df.CreditScore / df.Age\n\n # Arrange columns by data type for easier manipulation\n continuous_vars = ['CreditScore', 'Age', 'Tenure', 'Balance', 'NumOfProducts', 'EstimatedSalary',\n 'BalanceSalaryRatio',\n 'TenureByAge', 'CreditScoreGivenAge']\n cat_vars = ['HasCrCard', 'IsActiveMember', 'Geography', 'Gender']\n df = df[['Exited'] + continuous_vars + cat_vars]\n\n # For the one hot variables, we change 0 to -1 so that the models can capture\n # a negative relation where the attribute is inapplicable instead of 0\n df.loc[df.HasCrCard == 0, 'HasCrCard'] = -1\n df.loc[df.IsActiveMember == 0, 'IsActiveMember'] = -1\n\n # One hot encode the categorical variables\n lst = ['Geography', 'Gender']\n remove = list()\n for i in lst:\n if df[i].dtype == np.str or df[i].dtype == np.object:\n for j in df[i].unique():\n df[i + '_' + j] = np.where(df[i] == j, 1, -1)\n remove.append(i)\n df = df.drop(remove, axis=1)\n\n # Scale continuous variables to go from 0 to 1.\n min_vec = df[continuous_vars].min().copy()\n max_vec = df[continuous_vars].max().copy()\n df[continuous_vars] = (df[continuous_vars] - min_vec) / (max_vec - min_vec)\n\n # Print results\n _df_pretty(df.head().transpose().round(2))\n\n os.makedirs(os.path.dirname(output), exist_ok=True)\n df.to_csv(output)", "def generate_statistics_plots(graph_name, graph_steps):\n df_final_situation = pd.DataFrame(columns=[\"type\", \"value\"])\n df_step = pd.DataFrame(columns=[\"type\", \"step\", \"value\"])\n df_exposed = pd.DataFrame(columns=[\"step\", \"type\", \"value\"])\n\n st.markdown(\"\")\n\n for i in range(graph_steps):\n # read graph and print stats\n graph_result_path = \"./data/output/\"\n G = nx.read_gexf(f\"{graph_result_path}G_{graph_name}_step{i}.gexf\")\n print_stats(G, i, graph_name)\n\n # LINE CHART (append informations into dataframe)\n df_step = df_step.append(\n {\"type\": \"not_exposed\", \"step\": i, \"value\": cn.count_not_exposed(G)},\n ignore_index=True,\n )\n df_step = df_step.append(\n {\"type\": \"exposed\", \"step\": i, \"value\": cn.count_exposed(G)},\n ignore_index=True,\n )\n df_step = df_step.append(\n {\"type\": \"infected\", \"step\": i, \"value\": cn.count_infected(G)},\n ignore_index=True,\n )\n\n line_chart = px.line(\n df_step,\n x=\"step\",\n y=\"value\",\n color=\"type\",\n title=f\"Infection overall: {graph_name} step: {i}\",\n )\n\n # BAR CHART (append informations into dataframe)\n df_exposed = df_exposed.append(\n {\n \"step\": i,\n \"type\": \"opinion_leader\",\n \"value\": cn.count_exposed_opinion_leader(G),\n },\n ignore_index=True,\n )\n df_exposed = df_exposed.append(\n {\"step\": i, \"type\": \"bot\", \"value\": cn.count_exposed_bot(G)},\n ignore_index=True,\n )\n df_exposed = df_exposed.append(\n {\"step\": i, \"type\": \"user\", \"value\": cn.count_exposed_user(G)},\n ignore_index=True,\n )\n bar_chart = px.bar(\n df_exposed,\n x=\"step\",\n y=\"value\",\n color=\"type\",\n title=f\"Type of agents exposed: {graph_name} step: {i}\",\n )\n\n # PIE CHART (append informations into dataframe)\n if i == 4:\n df_final_situation = df_final_situation.append(\n {\"type\": \"not_exposed\", \"value\": cn.count_not_exposed(G)},\n ignore_index=True,\n )\n df_final_situation = df_final_situation.append(\n {\"type\": \"exposed\", \"value\": cn.count_exposed(G)},\n ignore_index=True,\n )\n df_final_situation = df_final_situation.append(\n {\"type\": \"infected\", \"value\": cn.count_infected(G)},\n ignore_index=True,\n )\n\n #### CREATE THE PLOTS\n ##Uncomment plot(..) to save the plots to disk in html format\n\n plot_folder = \"./data/plots/\"\n\n # Plotly Line Plot\n # plot(line_chart, filename=f\"{plot_folder}steps_{graph_name}.html\")\n st.plotly_chart(line_chart, use_container_width=True)\n\n # Plotly bar plot\n # plot(bar_chart, filename=f\"{plot_folder}exposed_type_{graph_name}.html\")\n st.plotly_chart(bar_chart, use_container_width=True)\n\n # Plotly final pie chart\n final_pie_chart = px.pie(\n df_final_situation, values=\"value\", names=\"type\", title=f\"Final situation plot of: {graph_name}\"\n )\n # plot(final_pie_chart, filename=f\"{plot_folder}final_situation.html\")\n st.plotly_chart(final_pie_chart, use_container_width=True)\n\n print(\"\\nStatistics calculated succesfully\")\n\n return True", "def degree_performance_visualization(degrees, scores):\n plt.plot(scores, marker=\".\", color='b');\n plt.xticks(range(len(scores)))\n ax = plt.gca()\n ax.set_xticklabels(degrees)\n plt.xlabel(\"Degrees\")\n plt.ylabel(\"Score\")\n plt.title(\"Performance over degrees\")\n plt.grid(True)\n plt.savefig(\"degree_performances_test\")", "def showEntireDataset(wl_listG, wl_listV, tsvd_graphlet_vectors, kpca_graphlet_gram, tsvd_shortestpath_vectors,\n kpca_shortestpath_gram, classes):\n for i in range(1, 8):\n if (i == 6):\n data_tsvd = tsvd_graphlet_vectors\n data_kpca = kpca_graphlet_gram\n elif (i == 7):\n data_tsvd = tsvd_shortestpath_vectors\n data_kpca = kpca_shortestpath_gram\n else:\n data_tsvd = wl_listV[i - 1]\n data_kpca = wl_listG[i - 1]\n fig = plt.figure(figsize=(15, 15))\n if (i == 6):\n fig.suptitle('Graphlet', fontsize=25)\n elif (i == 7):\n fig.suptitle('Shortest Path', fontsize=25)\n else:\n fig.suptitle(f'Weisfeiler-Lehman {i}', fontsize=25)\n ax1 = fig.add_subplot(221)\n ax2 = fig.add_subplot(222)\n ax3 = fig.add_subplot(223, projection='3d')\n ax4 = fig.add_subplot(224, projection='3d')\n ax1.title.set_text('2D TruncatedSVD')\n ax2.title.set_text('2D KernelPCA')\n ax3.title.set_text('3D TruncatedSVD')\n ax4.title.set_text('3D KernelPCA')\n ax1.scatter(data_tsvd[:, 0], data_tsvd[:, 1], c=classes)\n ax2.scatter(data_kpca[:, 0], data_kpca[:, 1], c=classes)\n ax3.scatter3D(data_tsvd[:, 0], data_tsvd[:, 1], data_tsvd[:, 2], c=classes)\n ax4.scatter3D(data_kpca[:, 0], data_kpca[:, 1], data_kpca[:, 2], c=classes)\n plt.show()\n print(\"________________________________________________________________________________________\")\n print()", "def sample_plots_by_scn(self, df, num_graphs, num_per_row, fig_width=16, hspace=0.6):\n\t\tnum_rows = int(np.ceil(num_graphs / num_per_row))\n\t\tfig, axes = plt.subplots(nrows=num_rows, ncols=num_per_row, figsize=(fig_width, num_rows * fig_width / 4))\n\t\tfig.subplots_adjust(hspace=hspace)\n\t\tplt.xticks(rotation=45)\n\t\tfor i, scn in enumerate(df.SystemCodeNumber.unique()[:num_graphs]):\n\t\t\ttemp_df = df[df.SystemCodeNumber == scn]\n\t\t\tax = axes[i // num_per_row, i % num_per_row]\n\t\t\tax.plot(temp_df.LastUpdated, temp_df.PercentOccupied)\n\t\t\tax.set_title('Parking Area: {}'.format(scn))\n\t\t\tax.set_xlabel('Date')\n\t\t\tax.set_ylabel('Percent Occupied')\n\t\t\tax.yaxis.set_major_formatter(ticker.PercentFormatter(xmax=1))\n\n\t\tfor ax in fig.axes:\n\t\t\tplt.sca(ax)\n\t\t\tplt.xticks(rotation=45)\n\t\tplt.show()", "def plot_ttcdprc(data_frame):\n figtt, axtt = plot_var(\n data_frame=data_frame,\n x_var=\"distance\",\n y_var=\"timeTC %\",\n label_var=\"mpr\",\n pivot=\"flow\",\n x_label=\"Distance [m]\",\n y_label=r\"Change in Time to Collision [\\%]\",\n t_label=\"Flow [veh/h]: \",\n legends=[r\"0 \\%\", r\"10 \\%\", r\"20 \\%\", r\"30 \\%\", r\"40 \\%\"],\n fnt_size={\"fontsize\": 16},\n )\n return figtt, axtt", "def time_stats(df):\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n \n #display the most common month\n print('\\nCalculating The Most Common Month to Travel...\\n')\n common_month = df['month'].mode()[0]\n print('Most Common Month : {} Counts {}'.format(MONTHS[common_month-1].title(),df['month'].value_counts()[common_month]))\n\n #display the most common day of week\n print('\\nCalculating The Most Common Day to Travel...\\n')\n common_day = df['day_of_week'].mode()[0]\n print('Most Common Day : {} Counts {}'.format(common_day,df['day_of_week'].value_counts()[common_day]))\n \n #display the most common start hour\n print('\\nCalculating The Most Common Start Hour to Travel...\\n')\n df['hour'] = df['Start Time'].dt.hour\n common_hour = df['hour'].mode()[0]\n print('Most Common Hour : {} Counts {}'.format(common_hour,df['hour'].value_counts()[common_hour]))\n \n print(\"\\nThis took %s seconds.\\n\" % (time.time() - start_time))\n print('******************************')\n print('-'*40)", "def plot_pca(df, rows, batch: bool =False):\n\n markers, colors = construct_point(df)\n cdict = dict(zip(rows.keys(), colors))\n marker = dict(zip(rows.keys(), markers))\n fig, ax = plt.subplots(figsize=(10, 8))\n if batch:\n # consider changing batch to 'group' for generic grouping\n labels = ['Batch {}'.format(i+1) for i in range(len(colors))]\n else:\n labels = df.index.to_list()\n add_plot(df=df, ax=ax, cdict=cdict, marker=marker, labels=labels, rows=rows)\n plt.xlabel(\"Principal Component 1\", fontsize=15)\n plt.ylabel(\"Principal Component 2\", fontsize=15)\n\n for i in df.index.to_list():\n ax.annotate(i, (df.loc[i, ['Principal Component 1']]+.05, df.loc[i, ['Principal Component 2']]+.05))\n # ax.legend(loc='upper right', bbox_to_anchor=(1.04, 1))\n plt.tight_layout()\n ax.legend().remove()\n return fig, ax", "def pie_plot(data,ara,rd_f,cla_arr=string.ascii_lowercase):\n \n data=pd.Series(data)\n dataclass=pd.Series(value_to_class_label(ara,data,cla_arr))\n \n parti=data.groupby(dataclass).agg(rd_f)\n \n labels=parti.index\n parts = parti.tolist()\n colors = ['gold', 'yellowgreen', 'lightcoral', 'lightskyblue','peru',\n 'teal','cornflowerblue','crimson','cadetblue','beige']\n\n plt.pie(parts, labels=labels, colors=colors,\n autopct='%1.1f%%', shadow=True, startangle=140) \n plt.axis('equal')", "def visualize_classes(df, label_names, err_param_name, reduced_data_column, labels_column, cmap, title, max_n_cols=4):\n df = df[sorted(df.columns)].groupby(err_param_name).first().reset_index()\n labels = df[labels_column][0]\n\n n_rows, n_cols = get_n_rows_cols(df.shape[0], max_n_cols)\n fig, axs = plt.subplots(n_rows, n_cols, figsize=(n_cols * 3, n_rows * 3), squeeze=False, constrained_layout=True)\n for i, ax in enumerate(axs.ravel()):\n if i >= df.shape[0]:\n ax.set_xticks([])\n ax.set_yticks([])\n ax.axis(\"off\")\n continue\n reduced_data = df[reduced_data_column][i]\n sc = ax.scatter(*reduced_data.T, c=labels, cmap=cmap, marker=\".\", s=40)\n x_min, x_max, y_min, y_max = get_lims(reduced_data)\n ax.set_xlim(x_min, x_max)\n ax.set_ylim(y_min, y_max)\n err_param_val = round(df[err_param_name][i], 3)\n ax.set_title(err_param_name + \"=\" + str(err_param_val))\n ax.set_xticks([])\n ax.set_yticks([])\n fig.suptitle(title)\n n_unique = np.unique(labels).shape[0]\n cbar = fig.colorbar(sc, ax=axs, boundaries=np.arange(n_unique + 1) - 0.5, ticks=np.arange(n_unique),\n use_gridspec=True, aspect=50)\n if label_names:\n cbar.ax.yaxis.set_ticklabels(label_names)\n\n path_to_plot = generate_unique_path(\"out\", \"png\")\n fig.savefig(path_to_plot)", "def report(\n df: pd.DataFrame,\n id_col: str = \"Compound_Id\",\n columns: List[str] = [\"Compound_Id\", \"Smiles\"],\n title: str = \"Cluster Report\",\n intro: str = \"Large clusters first, similar clusters together.\",\n):\n\n def add_cluster(cl_no, sim_to=None):\n if sim_to is None:\n sim_to = \"\"\n html.append(\"<hr>\")\n else:\n sim_to = f\"(similar to {sim_to})\"\n mf_cl = mf.MolFrame(df.query(\"Cluster_No == @cl_no\")[columns])\n mf_cl = mf_cl.add_mols()\n html.append(\n f\"<br><h2>Cluster {cl_no} ({len(mf_cl.data)} Members)&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;{sim_to}</h2><br>\"\n )\n grid = mfv.html_grid(mf_cl.data, id_col=\"Compound_Id\")\n html.append(grid)\n\n if id_col not in columns:\n columns = [id_col] + columns\n if \"Smiles\" not in columns:\n columns.append(\"Smiles\")\n df_repr = df.query(\"IsRepr == 'Yes'\").reset_index().drop(\"index\", axis=1)\n chem_sim = {}\n for idx, rec0 in df_repr.iterrows():\n for _, rec1 in df_repr.iloc[idx + 1 :].iterrows():\n cl0 = rec0[\"Cluster_No\"]\n cl1 = rec1[\"Cluster_No\"]\n sim = mf.chem_sim(rec0[\"Smiles\"], rec1[\"Smiles\"])\n chem_sim[(cl0, cl1)] = sim\n chem_sim[(cl1, cl0)] = sim\n\n cl_sizes = (\n df[[\"Cluster_No\", \"Compound_Id\"]]\n .groupby(by=\"Cluster_No\")\n .count()\n .reset_index()\n .rename(columns={\"Compound_Id\": \"Size\"})\n )\n cl_sizes = cl_sizes.sort_values(\"Size\", ascending=False)\n cl_order = {x: True for x in cl_sizes[\"Cluster_No\"].values}\n\n html = [f\"<h1>{title}</h1><br>{intro}<br><br>\"]\n while len(cl_order) > 0:\n cl_no = list(cl_order.keys())[0]\n add_cluster(cl_no)\n cl_order.pop(cl_no)\n to_remove = []\n for sim_cl in cl_order:\n if chem_sim[(cl_no, sim_cl)] > 0.45:\n add_cluster(sim_cl, cl_no)\n to_remove.append(sim_cl)\n for x in to_remove:\n cl_order.pop(x)\n\n mfht.write(mfht.page(\"\\n\".join(html)), \"Clusters.html\")", "def figure_3(df):\n\n sns.set(style=\"white\", palette=sns.color_palette(\"cubehelix\", 6)) #still 6 if osa csa\n f, axes = plt.subplots(5, 1, figsize=(6, 9)) # 6, 2 if OSA CSA\n sns.despine(top=True, bottom=True)\n # f.suptitle(\"Outcome, Grouped by Contributing Etiology\")\n\n # contains used instead of equal to include patients with multiple etio (e.g. cardiac+medication count to both)\n neurologic_df = df.loc[df['PostDx'].str.contains(\"Neurologic\")].sort_values(by='Outcome')\n cardiac_df = df.loc[df['PostDx'].str.contains(\"Cardiac\")].sort_values(by='Outcome')\n medication_df = df.loc[df['PostDx'].str.contains(\"Medication\")].sort_values(by='Outcome')\n tecsa_df = df.loc[df['PostDx'].str.contains(\"TECSA\")].sort_values(by='Outcome')\n # osacsa_df = df.loc[df['PostDx'].str.contains(\"OSA-CSA\")].sort_values(by='Outcome')\n primary_df = df.loc[df['PostDx'].str.contains(\"Primary\")].sort_values(by='Outcome')\n\n # collapse possible outcomes\n neurologic_df['col_outcome'] = neurologic_df.apply(collapse_initial_outcome, axis=1)\n cardiac_df['col_outcome'] = cardiac_df.apply(collapse_initial_outcome, axis=1)\n medication_df['col_outcome'] = medication_df.apply(collapse_initial_outcome, axis=1)\n tecsa_df['col_outcome'] = tecsa_df.apply(collapse_initial_outcome, axis=1)\n # osacsa_df['col_outcome'] = osacsa_df.apply(collapse_initial_outcome, axis=1)\n primary_df['col_outcome'] = primary_df.apply(collapse_initial_outcome, axis=1)\n\n # Create count plot for each Etio on the left, then a Pie Chart with proportion on the right\n\n hatches = ['', '||||', '']\n face_color = ['white', 'white', 'dimgrey']\n\n # Neurologic\n bar = sns.countplot(y='col_outcome', data=neurologic_df, ax=axes[0])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[0].set(xlabel=\"\", ylabel=\"Neurologic\\nConditions\")\n\n\n # Cardiac\n bar = sns.countplot(y='col_outcome', data=cardiac_df, ax=axes[1])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[1].set(xlabel=\"\", ylabel=\"Cardiac\\nConditions\")\n\n # Medication\n bar = sns.countplot(y='col_outcome', data=medication_df, ax=axes[2])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[2].set(xlabel=\"\", ylabel=\"Opiate Use\")\n\n # OSA-CSA\n # bar = sns.countplot(y='col_outcome', data=osacsa_df, ax=axes[3,0])\n # for i, this_bar in enumerate(bar.patches):\n # # Set a different hatch for each bar\n # this_bar.set_hatch(hatches[i])\n # axes[3].set(xlabel=\"\", ylabel=\"OSA-CSA\")\n # If adding OSA-CSA back, would need to increase by 1 all of the axes indices\n\n # TE-CSA\n bar = sns.countplot(y='col_outcome', data=tecsa_df, ax=axes[3])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[3].set(xlabel=\"\", ylabel=\"TECSA\")\n\n #Primary\n bar = sns.countplot(y='col_outcome', data=primary_df, ax=axes[4])\n for i, this_bar in enumerate(bar.patches):\n # Set a different hatch for each bar\n this_bar.set_edgecolor('black')\n this_bar.set_facecolor(face_color[i])\n this_bar.set_hatch(hatches[i])\n axes[4].set(xlabel=\"Number of Patients\", ylabel=\"Primary CSA\")\n\n # Combined X axis for L side\n axes[4].get_shared_x_axes().join(axes[4], axes[3], axes[2], axes[1], axes[0]) # axes[5] would need to be added back\n axes[0].set_xticklabels(\"\")\n axes[1].set_xticklabels(\"\")\n axes[2].set_xticklabels(\"\")\n axes[3].set_xticklabels(\"\")\n # axes[4].set_xticklabels(\"\")\n # Leave bottom labels in\n\n # Resize all\n axes[0].autoscale()\n axes[1].autoscale()\n axes[2].autoscale()\n axes[3].autoscale()\n axes[4].autoscale()\n # axes[5].autoscale()\n\n f.tight_layout(rect=[0, 0, 1, 1])\n f.savefig('Figure 3 - outcome of cpap by etio.png', dpi=100)\n # plt.show()", "def plot_class_distribution(image_directory: str = './data/images/',\n filename: str = 'full_data_set',\n output_directory: str = './data/',\n title: str = None,\n labels: bool = False,\n rotate: bool = False,\n semilog: bool = False):\n class_dist = {' '.join(cat_type.split()[:2]): len(os.listdir(image_directory + cat_type))\n for cat_type in os.listdir(image_directory)}\n data_count = pd.Series(class_dist)\n # data_count = data_count[(data_count < 20000)] # & (data_count > 500)]\n plt.figure(figsize=[5, 5])\n rect = plt.bar(data_count.index, data_count.values, color=\"#637b7f\")\n if not labels:\n plt.gca().axes.get_xaxis().set_visible(False)\n if labels:\n if rotate:\n autolabel(rect, 90)\n else:\n autolabel(rect)\n if title:\n plt.title(title)\n plt.ylabel('Class Size')\n if semilog:\n plt.yscale('log')\n filename += '_log'\n plt.tight_layout()\n if rotate:\n plt.xticks(rotation=90)\n plt.tight_layout()\n plt.savefig(output_directory + filename + '_distribution.png', dpi=300)", "def make_pdf_reports(df, path):\n with PdfPages(path) as pdf:\n # settings for the file\n base = 10 # threshold for grouping points\n page_size = (11, 8.5)\n point_size = 1.5 # scatter plot point size\n\n df[\"color\"] = df.db.apply(rand_color) # adjacency color\n df[\"fuzzy_y\"] = df.y.apply(my_round) # horizontal group color\n df[\"y_color\"] = df.fuzzy_y.apply(rand_color)\n df[\"fuzzy_x\"] = df.x.apply(my_round) # vertical group color\n df[\"x_color\"] = df.fuzzy_x.apply(rand_color)\n\n # Add title and axis names\n plt.figure(figsize=page_size)\n plt.title('Horizontal Grouping Scatter Plot')\n plt.xlabel('x distance')\n plt.ylabel('y distance')\n plt.scatter(df.x, df.y, c=df.y_color, s=point_size)\n pdf.savefig() # saves the current figure into a pdf page\n plt.close()\n\n plt.figure(figsize=page_size)\n plt.title('Vertical Grouping Scatter Plot')\n plt.xlabel('x distance')\n plt.ylabel('y distance')\n plt.scatter(df.x, df.y, c=df.x_color, s=point_size)\n pdf.savefig() # saves the current figure into a pdf page\n plt.close()\n\n plt.figure(figsize=page_size)\n plt.title('Block Adjacency Grouping Scatter Plot')\n plt.xlabel('x distance')\n plt.ylabel('y distance')\n plt.scatter(df.x, df.y, c=df.color, s=point_size)\n pdf.savefig() # saves the current figure into a pdf page\n plt.close()\n\n data1 = df[[\"floor\", \"swing_drop\", \"name\"]]\n data = data1.groupby([\"floor\", \"swing_drop\"]).count()\n data = data.reset_index()\n data.head()\n data = data.fillna(0)\n pivot = data.pivot(index=\"floor\", columns=\"swing_drop\", values=\"name\")\n pivot = pivot.fillna(0)\n order = sorted(df.floor.unique(), reverse=True)\n pivot = pivot.reindex(order)\n plt.figure(figsize=page_size)\n ax = sns.heatmap(pivot, cmap=\"BuPu\")\n ax.set_title(\"Block Qty Heatmap\")\n pdf.savefig()\n plt.close()\n\n # bar chart\n plt.rcParams.update({'font.size': 5})\n plt.figure(figsize=page_size)\n plt.title('Block Style Bar Graph')\n plt.xlabel('Names')\n plt.xticks(rotation=90)\n plt.ylabel('Quantities')\n dd = df[['name', \"guid\"]].groupby(\"name\").count()\n dd = dd.reset_index()\n dd = dd.sort_values(\"guid\")\n plt.bar(dd.name, dd.guid)\n # plt.show()\n pdf.savefig()\n plt.close()\n\n # We can also set the file's metadata via the PdfPages object:\n d = pdf.infodict()\n d['Title'] = 'Multipage PDF Example'\n d['Author'] = 'Matthew Kreidler'\n d['Subject'] = 'How to create a multipage pdf file and set its metadata'\n d['Keywords'] = 'PdfPages multipage keywords author title subject'\n d['CreationDate'] = datetime.datetime.today()\n d['ModDate'] = datetime.datetime.today()\n\n print(\"Graphs and Charts finished!\")\n return path", "def time_stats(df):\r\n\r\n # ref: https://stackoverflow.com/questions/48590268/pandas-get-the-most-frequent-values-of-a-column\r\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\r\n\r\n start_time = time.time()\r\n\r\n # Display the most popular month\r\n most_popular_month = df['Month'].mode()[0]\r\n print ('The most popular rental month: {0}'.format(calendar.month_name[most_popular_month]))\r\n\r\n # print most popular day\r\n most_popular_day = df['Start Day'].mode()[0]\r\n print ('The most popular start day of the week: {0}'.format(most_popular_day))\r\n\r\n # most popular hour\r\n most_popular_hour = df['Hour'].mode()[0]\r\n print ('The most popular rental hour is: {0}'.format(most_popular_hour))\r\n\r\n # ref: https://stackoverflow.com/questions/29645153/remove-name-dtype-from-pandas-output\r\n top_2_days = df['Start Day'].value_counts()[0:2]\r\n print ('The top 2 most popular rental days are:\\n{0}'.format(top_2_days.to_string()))\r\n\r\n top_3_hours = df['Hour'].value_counts()[0:3]\r\n print ('The top 3 most popular rental hours are:\\n{0}'.format(top_3_hours.to_string()))\r\n\r\n print('-'*40)\r\n\r\n ###### try plottling some info ####################\r\n # plot via pandas\r\n #pd.value_counts(df['Month']).plot.bar()\r\n #pd.value_counts(df['Start Day']).plot.bar()\r\n #pd.value_counts(df['Hour']).plot.bar()\r\n\r\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\r\n print('-'*40)", "def create_progression_tables(self, feat_subset, time_col, patient_col, method, bl_index, skip_no_bl=False):\n\n prog_dfs = []\n\n for df in self:\n patients = df[patient_col]\n\n # create dataframe copy to keep from alternating original dataframe\n prog_df = df[feat_subset][::]\n\n for feat in feat_subset:\n\n for patient in patients.unique():\n # collect values for sinlge patient\n pat_inds = df[df[patient_col] == patient].index\n # create value series storing the values of a patient\n values = df.loc[pat_inds, feat]\n values.index = df.loc[pat_inds, time_col]\n\n # skip patient if no baseline value is present\n if skip_no_bl:\n if bl_index not in values.index:\n prog_df.loc[pat_inds, feat] = np.nan\n continue\n\n # calculate scores for patient and reindex to merge back into dataframe copy\n scores = calc_prog_scores(values, bl_index, method)\n\n # if only NaN has been returned as score set patients progression to nan at all visits\n if type(scores) != pd.Series:\n prog_df.loc[pat_inds, feat] = scores\n\n else: # input normal progression scores for visits\n scores.index = pat_inds\n prog_df.loc[pat_inds, feat] = scores\n\n # get columns from original dataframe to concatinate them to resulting DF\n concat_columns = df[[patient_col, time_col]]\n prog_df = pd.concat([concat_columns, prog_df], join=\"outer\", axis=1)\n\n # add prog_df to list\n prog_dfs.append(prog_df)\n\n # keep track of which categorical features are still in the collection\n categorical_feats = list(set(self.categorical_feats).intersection(feat_subset))\n\n return DataCollection(prog_dfs, self.df_names, categorical_feats)", "def _agg_proportions(df, members=None):\n p = df.copy()\n if members is not None:\n p = p.iloc[members]\n p = p.T.assign(\n group=pd.factorize(p.columns)[0],\n label=pd.factorize(p.columns)[-1],\n value=p.sum(), #/ p.sum().sum() * p.shape[0],\n row_count=p.shape[0]\n )\n p = p[['label', 'group', 'value', 'row_count']]\n p.columns = ['label', 'group', 'value', 'row_count']\n p = list(p.T.to_dict().values())\n return p", "def SA_data_display(opt_df, all_df):\n fig, axs = plt.subplots(2, 3)\n\n axs[0,0].set_title(\"Optimal rewire attempts for circularity\")\n axs[0,0].set_ylabel(\"Percent waste %\")\n axs[0,0].set_xlabel(\"Time (s)\")\n axs[0,0].plot(opt_df[\"Time (s)\"], opt_df[\"Percent waste (%)\"])\n\n axs[0,1].set_title(\"Optimal rewire attempts acceptance probability\")\n axs[0,1].set_ylabel(\"Acceptance Probability\")\n axs[0,1].set_xlabel(\"Time (s)\") # time??\n axs[0,1].scatter(opt_df[\"Time (s)\"], opt_df[\"Probability\"])\n\n axs[0,2].set_title(\"Optimal rewire attempts temperature decrease\")\n axs[0,2].set_ylabel(\"Temperature\")\n axs[0,2].set_xlabel(\"Time (s)\") # time??\n axs[0,2].plot(opt_df[\"Time (s)\"], opt_df[\"Temperature\"])\n\n axs[1,0].set_title(\"All rewire attempts for circularity\")\n axs[1,0].set_ylabel(\"Percent waste %\")\n axs[1,0].set_xlabel(\"Time (s)\")\n axs[1,0].plot(all_df[\"Time (s)\"], all_df[\"Percent waste (%)\"])\n\n axs[1,1].set_title(\"All rewire attempts acceptance probability\")\n axs[1,1].set_ylabel(\"Acceptance Probability\")\n axs[1,1].set_xlabel(\"Time (s)\") # time??\n axs[1,1].scatter(all_df[\"Time (s)\"], all_df[\"Probability\"])\n\n axs[1,2].set_title(\"All rewire attempts temperature decrease\")\n axs[1,2].set_ylabel(\"Temperature\")\n axs[1,2].set_xlabel(\"Time (s)\") # time??\n axs[1,2].plot(all_df[\"Time (s)\"], all_df[\"Temperature\"])\n\n return plt.show()", "def time_stats(df):\n\n print('\\nDisplaying the statistics on the most frequent times of '\n 'travel...\\n')\n start_time = time.time()\n\n # display the most common month\n most_common_month = df['Month'].mode()[0]\n print('For the selected filter, the month with the most travels is: ' +\n str(months[most_common_month-1]).title() + '.')\n\n # display the most common day of week\n most_common_day = df['Weekday'].mode()[0]\n print('For the selected filter, the most common day of the week is: ' +\n str(most_common_day) + '.')\n\n # display the most common start hour\n most_common_hour = df['Start Hour'].mode()[0]\n print('For the selected filter, the most common start hour is: ' +\n str(most_common_hour) + '.')\n\n print('-'*40)", "def GetGraphicQuartiles(diagnostic_cases, diagnostic, weeks,year, n_years):\n current_year = Year.objects.get(year=year)\n weeks_current_year = weeks.filter(year=current_year)\n year_ob = Year.objects.filter(year__lt=year)\n weeks = weeks.filter(year__in=year_ob)\n\n suma = 0\n if n_years % 2 != 0:\n suma = -1\n\n\n qs = [0] * 3\n qss = [0] * 3\n\n diagnost_cases = diagnostic_cases.filter(week__in=weeks_current_year)\n\n dots_graphic_quartiles = [ ]\n dots_graphic_cumulative = [ ]\n cases_per_week_acumulative = 0\n\n for o in range(52):\n cases_per_years = [0] * (n_years)\n cases = 0\n i = 0\n year = 0 \n for week_idx in range(len(weeks)):\n if weeks[week_idx].week == o+1:\n if year != weeks[week_idx].year: # Esto no pasa nunca\n year = weeks[week_idx].year\n cases = 0\n for p in diagnostic_cases:\n if p.week == weeks[week_idx]:\n cases += p.cases\n cases_per_years[i] = cases\n \n i += 1\n\n ##### Getting the quantiles ;)\n\n qs[0] = np.quantile(cases_per_years, 0.25)\n qs[1] = np.quantile(cases_per_years, 0.5)\n qs[2] = np.quantile(cases_per_years, 0.75)\n qss[0] += qs[0]\n qss[1] += qs[1]\n qss[2] += qs[2]\n cases_per_week = 0\n\n ####loop to count the amount of cases in the current year\n year= None\n for week in weeks_current_year:\n if week.week == o+1:\n for d in diagnost_cases:\n year = d.week.year.year\n if d.week == week:\n cases_per_week += d.cases\n cases_per_week_acumulative += cases_per_week\n\n dots = DotsGraphicQuartile(qs[0],qs[1],qs[2],cases_per_week,o+1)\n dots_aculative = DotsGraphicQuartile(qss[0],qss[1],qss[2],cases_per_week_acumulative,o+1)\n dots_graphic_quartiles.append(dots)\n dots_graphic_cumulative.append(dots_aculative)\n\n return dots_graphic_quartiles, dots_graphic_cumulative", "def exercise_5(self):\n student_data = self.student_data\n # Create a dictionary mapping subgroup values to colors\n palette_colors = {\"Rural\": \"green\", \"Urban\": \"blue\"}\n\n # Create a count plot of school with location subgroups\n sns.countplot(x=\"school\", data=student_data\n , hue = \"location\"\n , palette = palette_colors)\n\n\n # Display plot\n plt.show()", "def do_plot(self):\n years = sorted(set(self.prediction_df_without_covid19['Year']))\n predict_without_covid_country = self.prediction_df_without_covid19[\n self.prediction_df_without_covid19['Country'].isin([self.country])].sort_values(['Year'],\n ascending=[True])\n predict_with_covid_country = self.prediction_df_with_covid19[\n self.prediction_df_with_covid19['Country'].isin([self.country])].sort_values(['Year'],\n ascending=[True])\n # ------------------------------------------------------------------------------------------------------\n pa = \\\n predict_without_covid_country.loc[predict_without_covid_country['Year'] == 1990][\n 'Total_CO2_Emissions'].values[\n 0]\n x = []\n for i in range(len(years)):\n x.append(pa * 0.6)\n # ------------------------------------------------------------------------------------------------------\n fig = Figure()\n ax = fig.subplots()\n ax.grid(True, alpha=0.3)\n # plot_title = 'Total CO2 Emissions predicted from 2019-2030 for ' + self.country\n plot_title = 'Total ' + '$CO_2$' + ' Emissions predicted from 2019-2030 for ' + self.country\n label_country_without_covid = 'Total CO2 emissions without covid'\n label_country_with_covid = 'Total CO2 emissions with Covid-19'\n # ------------------------------------------------------------------------------------------------------\n params = {'mathtext.default': 'regular'}\n rcParams.update(params)\n rcParams['font.size'] = 7\n rcParams['lines.markersize'] = 4\n rcParams['figure.figsize'] = [7, 4]\n rcParams['figure.dpi'] = 150\n rcParams['font.family'] = 'Verdana'\n rcParams[\"font.weight\"] = \"normal\"\n font = {'family': 'Verdana',\n 'color': 'xkcd:darkgreen',\n 'weight': 'normal',\n 'size': 9,\n }\n colors = rcParams['axes.prop_cycle'].by_key()['color']\n l1, = ax.plot(years, predict_without_covid_country['Total_CO2_Emissions'], color='xkcd:dark blue green',\n marker='o',\n label=label_country_without_covid)\n l2, = ax.plot(years, predict_with_covid_country['Total_CO2_Emissions'], color='xkcd:neon pink', marker='.',\n label=label_country_with_covid)\n l3, = ax.plot(years, x, color='xkcd:orchid', marker='1')\n print('without covid: ', predict_without_covid_country['Total_CO2_Emissions'].values)\n print('with covid: ', predict_with_covid_country['Total_CO2_Emissions'].values)\n ax.set_xlabel('Years', fontdict=font)\n ax.set_ylabel('Emissions (Gg)', fontdict=font)\n ax.set_title(plot_title, fontsize=12, fontweight='normal')\n ax.patch.set_facecolor('xkcd:green')\n ax.set_facecolor('xkcd:pale green')\n fig.legend((l1, l2, l3), ('Prediction without Covid19', 'Prediction with Covid19', 'Paris Agreement'),\n bbox_to_anchor=(0.907, 0.89))\n fig.savefig(OUTPUT_GRAPH_PATH)", "def graph_cause_count_each(df, label):\r\n # set the visual features of the graph\r\n sns.set(font_scale=1.5)\r\n sns.set_style(\"darkgrid\")\r\n fig, ax = plt.subplots()\r\n fig.set_size_inches(15, 8)\r\n plt.xticks(rotation=45)\r\n ax.set_title(label.capitalize() + \" Police Death Causes\")\r\n # create the graph of the data\r\n plot = sns.barplot(\"year\", \"count\", data=df, palette=\"winter_d\")\r\n # plt.show()\r\n # save the graph as an image with the correct cause naming\r\n name = \"2_graph_cause_count_\" + label + \".png\"\r\n fig.savefig(name)", "def plot_budget_analyais_results(df, fs=8, fs_title=14, lw=3, fontsize=20, colors=['#AA3377', '#009988', '#EE7733', '#0077BB', '#BBBBBB', '#EE3377', '#DDCC77']):\n df_decomposed = df.loc[df['block'] == 'decomposed']\n df_joint = df.loc[df['block'] == 'joint']\n ticklabels = []\n num_sweeps = df_decomposed['num_sweeps'].to_numpy()\n sample_sizes = df_decomposed['sample_sizes'].to_numpy()\n for i in range(len(num_sweeps)):\n ticklabels.append('K=%d\\nL=%d' % (num_sweeps[i], sample_sizes[i]))\n fig = plt.figure(figsize=(fs*2.5, fs))\n ax1 = fig.add_subplot(1, 2, 1)\n ax1.plot(num_sweeps, df_decomposed['density'].to_numpy(), 'o-', c=colors[0], linewidth=lw, label=r'$\\{\\mu, \\tau\\}, \\{c\\}$')\n ax1.plot(num_sweeps, df_joint['density'].to_numpy(), 'o-', c=colors[1], linewidth=lw,label=r'$\\{\\mu, \\tau, c\\}$')\n ax1.set_xticks(num_sweeps)\n ax1.set_xticklabels(ticklabels)\n ax1.tick_params(labelsize=fontsize)\n ax1.grid(alpha=0.4)\n ax2 = fig.add_subplot(1, 2, 2)\n ax2.plot(num_sweeps, df_decomposed['ess'].to_numpy(), 'o-', c=colors[0], linewidth=lw,label=r'$\\{\\mu, \\tau\\}, \\{c\\}$')\n ax2.plot(num_sweeps, df_joint['ess'].to_numpy(), 'o-', c=colors[1], linewidth=lw,label=r'$\\{\\mu, \\tau, c\\}$')\n ax2.set_xticks(num_sweeps)\n ax2.set_xticklabels(ticklabels)\n ax2.tick_params(labelsize=fontsize)\n ax2.grid(alpha=0.4)\n ax2.legend(fontsize=fontsize)\n ax1.legend(fontsize=fontsize)\n ax1.set_ylabel(r'$\\log \\: p_\\theta(x, \\: z)$', fontsize=35)\n ax2.set_ylabel('ESS / L', fontsize=35)", "def pc_project(\n mt: hl.MatrixTable,\n loadings_ht: hl.Table,\n loading_location: str = \"loadings\",\n af_location: str = \"pca_af\",\n) -> hl.Table:\n mt = pc_hwe_gt(mt, loadings_ht, loading_location, af_location)\n mt = mt.annotate_cols(scores=hl.agg.array_sum(mt.pca_loadings * mt.GTN))\n return mt.cols().select(\"scores\")" ]
[ "0.66838074", "0.6448972", "0.6392558", "0.6383187", "0.6325968", "0.58468354", "0.5693776", "0.5645798", "0.5629048", "0.5611402", "0.55704165", "0.5545969", "0.54707754", "0.5421868", "0.541172", "0.5408939", "0.53972", "0.5382327", "0.5376701", "0.5373505", "0.5348653", "0.5318779", "0.53117114", "0.53011274", "0.5285753", "0.527667", "0.5270411", "0.5264465", "0.5251325", "0.52398163", "0.521343", "0.51984954", "0.51901275", "0.5183569", "0.5173878", "0.5152095", "0.5149655", "0.51378024", "0.511371", "0.51097214", "0.51095784", "0.5052808", "0.5047214", "0.5042", "0.50364536", "0.5034725", "0.50232", "0.5021818", "0.5017655", "0.50174034", "0.5002934", "0.49992308", "0.49961686", "0.49929678", "0.49905917", "0.49890637", "0.4985069", "0.4982092", "0.49820316", "0.49785334", "0.4974087", "0.49709526", "0.49674344", "0.49624622", "0.49590847", "0.49545372", "0.49536157", "0.4952961", "0.4952646", "0.49493158", "0.49439347", "0.4940264", "0.4938892", "0.4929626", "0.4923391", "0.4917842", "0.49152872", "0.49061373", "0.49033207", "0.49028543", "0.4901895", "0.4895302", "0.48947206", "0.48879775", "0.4876168", "0.4867296", "0.48672375", "0.4864022", "0.48615372", "0.48605785", "0.4851806", "0.4846353", "0.4844855", "0.48427927", "0.48401394", "0.48369566", "0.48354957", "0.4835225", "0.48210415", "0.4820683" ]
0.5622483
9
Initialize a new HTTP client event router object uri is a URI for this event router. A new URI derived from this is created for the HTTP client event relay. host is the IP address of host name to which the HTTP connection is made. port is the TCP port number to which the HTTP connection is made.
def __init__(self, uri=None, host='', port=8082, simplex=False): super(EventRouterHTTPC, self).__init__(uri) relayuri = self.getUri()+"/HTTPC" self._relay = EventRelayHTTPC(self, relayuri, host, port, simplex) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, router, uri=None, host='', port=8082, simplex=False):\n super(EventRelayHTTPC, self).__init__(uri)\n self._router = router\n self._queue = Queue()\n self._event = threading.Event()\n self._closing = False\n self._queueEvent = threading.Event()\n self._simplex = simplex\n # Have 'router' send all subscriptions events to this object\n router.routeEventFrom(None, None, self)\n router.doSubscribeRequest(self, -1, None, None)\n\n # Create HTTP \"connection\", and start thread to respond to new events from it.\n\n \n self._httpcon = httplib.HTTPConnection(host=host, port=port)\n \n self._thread = threading.Thread(name=uri, target=self.processEvent)\n self._thread.start()\n return", "def __init__(self, uri_or_host, port=None, path=None):\r\n\r\n if port is not None:\r\n warnings.warn(\"Please use the THttpClient('http://host:port/path') syntax\", DeprecationWarning, stacklevel=2)\r\n self.host = uri_or_host\r\n self.port = port\r\n assert path\r\n self.path = path\r\n self.scheme = 'http'\r\n else:\r\n parsed = urlparse.urlparse(uri_or_host)\r\n self.scheme = parsed.scheme\r\n assert self.scheme in ('http', 'https')\r\n if self.scheme == 'http':\r\n self.port = parsed.port or httplib.HTTP_PORT\r\n elif self.scheme == 'https':\r\n self.port = parsed.port or httplib.HTTPS_PORT\r\n self.host = parsed.hostname\r\n self.path = parsed.path\r\n if parsed.query:\r\n self.path += '?%s' % parsed.query\r\n self.__wbuf = StringIO()\r\n self.__http = None\r\n self.__timeout = None", "def __init__(self, host, port):\n self.host = host\n self.port = port", "def __init__(self, host, port):\n self.host = host\n self.port = port", "def __init__(self, host, port):\n self._host = host\n self._port = port", "def __init__(self, host, port):\n self._host = host\n self._port = port\n self._data = None", "def __init__(self, router):\n self._router = router", "def __init__(self, scheme, host, port, path, query=None):\n self._hash = None\n self._str = None\n self._scheme = self._makeEmptyNone(scheme)\n self._host = host\n self._port = port\n self._path = self._makeEmptyNone(path)\n self._query = self._makeEmptyNone(query)\n self._isRegularURI = True", "def __init__(self, host, port, **kwargs):\n\n SocketHandler.__init__(self, host, port)\n BaseHandler.__init__(self, **kwargs)", "def __init__(self, host=HOST, port=PORT):\r\n self._socket = None\r\n\r\n if host is not None:\r\n self.connect(host, port)", "def __init__(self, router):\n\n self.router = router", "def __init__(self, uri):\n\n self.uri = uri", "def __init__(self, host, port = 3480):\n self.host = host\n self.port = port\n Vera.__init__(self)", "def __init__(self, host, port):\n self._closed = False\n self.host = host\n self.port = port\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.sock.bind((host, port))\n self.sock.listen(5)", "def main(self):\n addr = (self.uri, self.port)\n try:\n self.client.connect(addr)\n except socket.gaierror:\n print(\"[ERROR] not a valid URI. Try again please...\")\n else:\n print(\"[SETUP] client connected to IPv4 address\", self.uri, \"on port\", self.port)\n self.handler()", "def __init__(self, rzrip, rzrport='8026'):\n self.ip = rzrip\n self.port = rzrport\n self.url = 'http://' + rzrip + ':' + rzrport + '/razor/api'", "def __init__(self, host='http://localhost:6373'):\n self._host = host", "def __init__(self, host: str, port: int):\n self.__host: str = host\n self.__port: int = port", "def __init__(self, host):\n self.host = host", "def __init__(self, host):\n self.host = host", "def __init__(self, host, server_port):\n\n # Set up the socket connection to the server\n self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n \n # TODO: Finish init process with necessary code\n self.host = host\n self.server_port = server_port\n self.run()", "def __init__(self, uri='http://localhost:18123'):\n self.log = logging.getLogger('{}.Service'.format(__name__))\n self.uri = uri", "def __init__(self):\n self._server = None\n self._address = \"\"\n self._port = 0", "def create_connection(loop, uri):\n\n proto_pos = uri.find('://')\n protocol_name = uri[0:proto_pos]\n\n if protocol_name not in PROTOCOL_MAP:\n raise ValueError(\"Unknown protocol %s\" % protocol_name)\n\n address_str = uri[proto_pos + 3:]\n\n protocol_cls, address_parser = PROTOCOL_MAP[protocol_name]\n\n address = address_parser(address_str)\n\n connection = protocol_cls(loop, address)\n\n return connection", "def __init__(self, path):\n self.path = path\n self.fqdn = getfqdn()\n print 'INFO: host name', self.fqdn\n assert not self.fqdn.startswith('127.0.0')\n self.port = None\n self.url = None\n self.process = None\n print 'INFO: constructed web server for', self.fqdn", "def __init__(self, host='localhost', port=9090):\n self.host = host\n self.port = port\n self._stream = None\n self._io_loop = ioloop.IOLoop.current()\n self._timeout_secs = None", "def __init__(self, port_num=0):\n address = ('0.0.0.0', port_num)\n HTTPServer.__init__(self, address, self.HANDLER_CLASS)\n\n # Create a dict to store configuration values set by the client\n self.config = dict()\n\n # Start the server in a separate thread\n server_thread = threading.Thread(target=self.serve_forever)\n server_thread.daemon = True\n server_thread.start()\n\n # Log the port we're using to help identify port conflict errors\n LOGGER.debug(f'Starting service on port {self.port}')", "def __init__(self, host, port):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect((host, port))\n\n self.server = self.sock.makefile('w')\n\n self.disconnected = False", "def __init__(self, port, host='', ping_timer=25):\n # TODO get these values from config\n self.host = host\n self.port = port\n self.socket = None\n self.all_connections = []\n self.all_clients = {}\n self.ping_timer_time = ping_timer", "def __init__(self, server_addr, server_port):", "def __init__(self, host='localhost', port=9091, path='/transmission/rpc',\n username=None, password=None, ssl=False, timeout=DEFAULT_TIMEOUT):\n if ssl:\n scheme = 'https'\n else:\n scheme = 'http'\n\n self.url = \"%s://%s:%d%s\" % (scheme, host, port, path)\n self.headers = {} # type: Dict[str, str]\n self.tag = 0\n self.timeout = timeout\n\n self.auth = None # type: Tuple[str, str]\n if username or password:\n self.auth = (username, password)", "def __init__( httpconn, method, uri, uriparts, version, headers ):", "def __init__(self, ae, address, ssl_context=None):\n self.ae = ae\n self.ssl_context = ssl_context\n self.allow_reuse_address = True\n\n TCPServer.__init__(\n self, address, RequestHandler, bind_and_activate=True\n )\n\n self.timeout = 60", "def __init__(self, port_num=0):\r\n address = ('0.0.0.0', port_num)\r\n HTTPServer.__init__(self, address, self.HANDLER_CLASS)\r\n\r\n # Create a dict to store configuration values set by the client\r\n self.config = dict()\r\n\r\n # Start the server in a separate thread\r\n server_thread = threading.Thread(target=self.serve_forever)\r\n server_thread.daemon = True\r\n server_thread.start()\r\n\r\n # Log the port we're using to help identify port conflict errors\r\n LOGGER.debug('Starting service on port {0}'.format(self.port))", "def __init__(self, env):\n self._env = env\n self._routes = []\n self._proto = None\n self._port = None\n self._state = False\n self._key = None", "def __init__(self):\n self._host = None\n self._port = None\n self._servers = []", "def __init__(self):\n self.host = socket.gethostname() # 192.168.56.1\n self.port = 33000\n self.buffer_size = 1024\n self.address = (self.host, self.port)", "def __init__(self, host, server_port):\n # Set up the socket connection to the server\n self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.run(host, server_port)\n\n # TODO: Finish init process with necessary code\n #Vegard sier vi ikke skal skrive noe her", "def __init__(self, host=\"localhost\", port=8080, apiRoot='/api/v1'):\n self.host = host\n self.port = port\n\n self.urlBase = 'http://' + self.host + ':' + str(self.port) + apiRoot + '/'", "def __init__(self, host: str, websession: aiohttp.ClientSession):\n self._host = host\n self._websession = websession", "def __init__(self, host, port, in_queue=Queue()):\n # Global settings\n self.host = host\n self.port = port\n\n # Init super class\n self.server = TCPServer(\n (self.host, self.port), TCPServerHandler, in_queue)", "def __init__(self, host, port):\n\n super(TcpListeningSocket, self).__init__(host, port)\n\n self.socket.bind( (self._host, self._port) )\n self.socket.listen(1)", "def __init__(self, web_app_host, web_app_port, request_timeout=40, retries=3, on_fail_sleep_duration=5):\n logging.Handler.__init__(self)\n self.addr = web_app_host + ':' + str(web_app_port)\n self.request_timeout = request_timeout\n self.retries = retries\n self.on_fail_sleep_duration = on_fail_sleep_duration\n self.http_client = httpclient.HTTPClient()", "def __init__(self, host=\"localhost\", port=60151, verbose=False):\n super(IGVSocketRobot, self).__init__(verbose=verbose)\n\n self.host = host\n self.port = port", "def __init__(self, host, port, map):\n asyncore.dispatcher.__init__(self, map=map)\n self.host = host\n self.port = port\n self.append_send_buffer = None\n self.append_connect = None\n self.clear()", "def __init__(self, host='localhost', port=None):\n self.host = host\n self.port = port\n self.name = '%s:%s' % (self.host, self.port)\n self.socket = None\n self.packet_size = 1024\n self._send = 0\n self._reset = 100\n # try to connect with a TimblServer instance\n self.connect()", "def __init__(self, host, port):\n self.endline = '\\n'\n self.host = host\n self.port = port\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n self.sock.connect((host, port))\n\n self.current_data = 0\n self.current_3D_points = 0\n self.is_new_data = False", "def __init__(self):\n # Create a TCP/IP socket\n self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)", "def uri(self, path):\n path = ensure_slash(path)\n return 'http://127.0.0.1:%d%s' % (self.port, path)", "def __init__(self, host, puerto):\r\n self.host = host\r\n self.puerto = puerto\r\n self.server = None", "def __init__(self, reactor, masterIP, masterPort, commPort, extIP, extPort,\r\n loader, converter):\r\n Endpoint.__init__(self, reactor, loader, commPort)\r\n\r\n self._masterIP = masterIP\r\n self._masterPort = masterPort\r\n self._extAddress = '{0}:{1}'.format(extIP, extPort)\r\n self._loader = loader\r\n self._converter = converter\r\n\r\n self._connections = set()\r\n self._deathCandidates = {}", "def __init__(self, host='localhost', port=9090, unix_socket=None):\r\n\r\n self.host = host\r\n self.port = port\r\n self.handle = None\r\n self._unix_socket = unix_socket\r\n self._timeout = None", "def connect_to_peer(self, uri):\n self.init_socket(uri)", "def __init__(self, link_uri):\n\n self._cf = Crazyflie()\n\n self._cf.connected.add_callback(self._connected)\n self._cf.disconnected.add_callback(self._disconnected)\n self._cf.connection_failed.add_callback(self._connection_failed)\n self._cf.connection_lost.add_callback(self._connection_lost)\n\n self._cf.open_link(link_uri)\n\n print(\"Connecting to %s\" % link_uri)", "def __init__(self, type, host, port):\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n if type == \"server\":\n self._socket.bind((host, port))\n self._socket.listen(1)\n self._socket.setblocking(0)", "def __init__(self, hosts, port, path='/', timeout='10', music_server_retries=3, logger=None):\n\n self.hosts = hosts # List of IP or FQDNs\n self.port = port # Port Number\n self.path = path # Path starting with /\n self.timeout = float(timeout) # REST request timeout in seconds\n # Retires before failiing over to next Music server.\n self.music_server_retries = music_server_retries\n self.logger = logger # For logging", "def __init__(self, link_uri):\n\n self._cf = Crazyflie()\n self.sonar = Sonar()\n\n self._cf.connected.add_callback(self._connected)\n self._cf.disconnected.add_callback(self._disconnected)\n self._cf.connection_failed.add_callback(self._connection_failed)\n self._cf.connection_lost.add_callback(self._connection_lost)\n\n self._cf.open_link(link_uri)\n self.flight_log = []\n print \"Connecting to %s\" % link_uri", "def __init__(self, remote_address):\n self._local_address = None\n self.remote_address = remote_address\n self._connection_event = gevent.event.Event()\n\n self._on_connect_handlers = list()\n self._on_disconnect_handlers = list()\n\n self._socket = None\n self._reader = None\n self._writer = None\n\n self._read_lock = gevent.lock.RLock()\n self._write_lock = gevent.lock.RLock()", "def __init__(self, host, port, initialized=None, uuid=None, debug=False, no_mine=False, benchmark=False, neighbors=[]):\n\n m = sha1()\n m.update(host.encode())\n m.update(str(port).encode())\n\n self.metadata = {}\n self.metadata['done'] = initialized\n self.metadata['host'] = host\n self.metadata['port'] = port\n self.metadata['uuid'] = str(m.hexdigest()) if uuid is None else uuid\n self.metadata['debug'] = debug\n self.metadata['no_mine'] = no_mine\n self.metadata['benchmark'] = benchmark\n self.metadata['resolve_requests'] = set()\n self.metadata['resolve_lock'] = Lock()\n\n if benchmark:\n from threading import Semaphore\n self.metadata['benchmark_lock'] = Semaphore(0)\n\n if self.metadata['uuid'] == 'SYSTEM':\n raise InvalidID\n\n initialize_log(self.metadata['uuid'], debug)\n\n # Create the Blockchain object.\n self.metadata['blockchain'] = Blockchain()\n self.metadata['history'] = History(self.metadata['uuid'])\n\n # Create the Network Handler object.\n self.nh = NetworkHandler(self.metadata, neighbors)\n\n # Start the Network Handler main loop.\n self.nh.event_loop()", "def __init__(self, link_uri):\n\n self._cf = Crazyflie(rw_cache='./cache')\n\n self._cf.connected.add_callback(self._connected)\n self._cf.disconnected.add_callback(self._disconnected)\n self._cf.connection_failed.add_callback(self._connection_failed)\n self._cf.connection_lost.add_callback(self._connection_lost)\n\n self._cf.open_link(link_uri)\n\n print('Connecting to %s' % link_uri)", "def __init__(self, address=\"lex\", port=8000, **kwargs):\n self.connect(address, port)", "def __init__(__self__, *,\n uri: Optional[pulumi.Input[str]] = None):\n if uri is not None:\n pulumi.set(__self__, \"uri\", uri)", "def __init__(self, ip, port, header):\n \n self.header = header\n self.ip = ip\n self.port = port\n try:\n self._connect_socket()\n except socket.error as e:\n print(e)\n self.close_and_exit()", "def __init__(self, host, user, password, port=22):\n self.host = host\n self.user = user\n self.port = port\n self.password = password", "def __init__(self, host, port, in_queue=Queue()):\n\n # Settings\n self.application = Application()\n self.server = tornado.httpserver.HTTPServer(self.application)\n self.host = host\n self.port = port\n self.in_queue = in_queue\n\n # Listen to ..\n self.server.listen(self.port, self.host)\n\n # Logging settings\n logging.basicConfig(level=logging.DEBUG)\n self.logger = logging.getLogger(\"WebSocketServer\")\n self.logger.setLevel(logging.INFO)", "def connect(self):\n self.ipv4 = socket.gethostbyname(socket.gethostname())\n self.addr = (self.ipv4, HttpServer.PORT)\n self.server.bind(self.addr)\n print(\"[SETUP] server bound to IPv4 address\", self.ipv4, \"on port\", HttpServer.PORT)\n self.server.listen()\n print(\"[SETUP] server listening for connections\")", "def __init__(self, local_port, timeout):\n self.name = (socket.gethostbyname(socket.gethostname()), local_port)\n self.name_str = ':'.join([self.name[0], str(self.name[1])])\n self.sok = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.sok.bind(self.name)\n self.timeout = timeout\n self.neighbors = dict()\n self.distance_vector = dict()\n self.distance_vector[self.name_str] = Router.OtherRouter(0., self.name_str)", "def __init__(self, host=\"137.227.224.97\", port=2061, timeout=30,\n debug=False):\n if debug:\n print(\"int __init__\" + host + \"/\" + str(port) + \" timeout=\" +\n str(timeout))\n self.host = host\n self.port = port\n self.timeout = timeout\n self.debug = debug", "def __init__(self, host, url, port=80, urlprefix='', method=\"POST\", encoding=URL):\n\n logging.Handler.__init__(self)\n method = method.upper()\n if method not in [\"GET\", \"POST\"]:\n raise ValueError(\"method must be GET or POST\")\n self.host = host\n self.url = url\n self.port = port\n self.urlprefix = urlprefix\n self.method = method\n self.encoding = encoding\n # create lock for params, cannot use createLock()\n self.mylock = threading.Lock()\n # semaphore to limit the number of concurrent emitters\n if 'nemitters' in logger_config.daemon:\n self.mySemaphore = threading.Semaphore(int(logger_config.daemon['nemitters']))\n else:\n self.mySemaphore = threading.Semaphore(10)\n # parameters\n self.params = {}\n self.params['PandaID'] = -1\n self.params['User'] = 'unknown'\n self.params['Type'] = 'unknown'\n self.params['ID'] = 'tester'", "def __init__(self, url_str):\n parsed_url = requests.compat.urlparse(utils.to_str(url_str))\n netloc_parts = parsed_url.netloc.split(\"@\")\n if len(netloc_parts) == 1:\n username = password = None\n host_str = netloc_parts[0]\n else:\n username, password = netloc_parts[0].split(\":\")\n host_str = netloc_parts[1]\n\n host_parts = host_str.split(\":\")\n host = host_parts[0]\n\n if len(host_parts) == 1:\n port = 80\n else:\n port = int(host_parts[1])\n\n params = [\n (key, val[0] if val[0] else None)\n for key, val in parse_qs(parsed_url.query, True).items()\n ]\n\n self._info = dict(\n scheme=parsed_url.scheme or \"http\",\n username=username,\n password=password,\n host=host,\n port=port,\n path=parsed_url.path or \"/\",\n params=params,\n fragment=parsed_url.fragment\n )\n self._url = None", "def connect(self, host, port, uri, timeout):\n _abstract()", "def connect(self, host, port, uri, timeout):\n _abstract()", "def __init__(self, host, auth_function, host_override=None, extra_headers={},\n save_cookies=False):\n self.host = host\n self.host_override = host_override\n self.auth_function = auth_function\n self.authenticated = False\n self.extra_headers = extra_headers\n self.save_cookies = save_cookies\n self.opener = self._GetOpener()\n if self.host_override:\n logging.info(\"Server: %s; Host: %s\", self.host, self.host_override)\n else:\n logging.info(\"Server: %s\", self.host)", "def __init__(self, host, weight, debugfunc):\n\n\t\tself.weight = weight\n\n\t\tif \":\" in host:\n\t\t\thost = host.split(\":\")\n\t\t\tself.addr = (host[0], int(host[1]))\n\t\telse:\n\t\t\tself.addr = (host, 11211)\n\n\t\tself.debuglog = debugfunc\n\n\t\tself.deaduntil = 0\n\n\t\ttcp.TCPConnection.__init__(self, self.addr)", "def __init__(self, host_url=Constants.DEFAULT_LOCAL_HOST,\n username=Constants.DEFAULT_USERNAME,\n password=Constants.DEFAULT_PASSWORD,\n is_https=Constants.DEFAULT_HTTPS,\n verify_https=Constants.DEFAULT_VERIFY_HTTPS):\n self.router = OpenWrtLuciRPC(host_url, username, password,\n is_https, verify_https)", "def __init__(self, hostname, debugOut=None, noProto=False, connectNow=True, portNumber=4403):\n\n logging.debug(f\"Connecting to {hostname}\")\n\n server_address = (hostname, portNumber)\n sock = socket.create_connection(server_address)\n\n # Instead of wrapping as a stream, we use the native socket API\n # self.stream = sock.makefile('rw')\n self.stream = None\n self.socket = sock\n\n StreamInterface.__init__(\n self, debugOut=debugOut, noProto=noProto, connectNow=connectNow)", "def __init__(self, host, port):\n self.host = host\n self.port = port\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.sock.bind((self.host, self.port))\n self.messagesList = []\n self.connected = []", "def __init__(self, hostname, username, password, port=5392):\n\n connection_hash = str(uuid.uuid3(uuid.NAMESPACE_OID, f'{hostname}{port}{username}{password}'))\n\n self.hostname = hostname\n self.port = port\n\n self.__auth = {\n 'data': {\n 'username': username,\n 'password': password,\n 'app_name': 'NimOS REST Client'\n }\n }\n\n self.__connection_hash = connection_hash\n\n if connection_hash in SessionManager._SESSIONS:\n self.session_id, self.session_token = SessionManager._SESSIONS[connection_hash]\n self.connected = True\n self._headers = {'X-Auth-Token': str(self.session_token)}\n\n else:\n self._headers = {}\n self.session_token = None\n self.session_id = None\n self.connected = self._connect()", "def __init__(self, address, username=None, password=None, debug=False):\n self.container_id = \"eventhub.pysdk-\" + str(uuid.uuid4())[:8]\n self.address = urlparse(address)\n url_username = unquote_plus(self.address.username) if self.address.username else None\n username = username or url_username\n url_password = unquote_plus(self.address.password) if self.address.password else None\n password = password or url_password\n if not username or not password:\n raise ValueError(\"Missing username and/or password.\")\n auth_uri = \"sb://{}{}\".format(self.address.hostname, self.address.path)\n self.auth = self._create_auth(auth_uri, username, password)\n self.connection = None\n self.debug = debug\n\n self.clients = []\n self.stopped = False\n log.info(\"{}: Created the Event Hub client\".format(self.container_id))", "def __init__(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.PORT = 2222\n # connect on construction,\n # use for duration of a game,\n # close connection on destruction later\n self.sock.connect((\"192.168.43.180\", self.PORT))", "def __init__(self, IP=None, Port=None):\n\n self.validate_input(IP, Port)\n self.url = ('http://{}:{}/v2/'.format(self.IP, self.Port))\n self.get_templates()\n self.get_data()\n self.telnet_threads = []", "def __init__(self, uri, api_version=None, key=None, secret=None, key_file=None,\n timeout=5, log=logging.getLogger('RESTClientAPILogger')):\n self._key = key\n self._secret = secret\n if key_file:\n self._load_key(key_file)\n self.authenticated = False\n self._uri = uri\n self._version = api_version if api_version else ''\n self.timeout = timeout\n self._log = log\n self._log.debug(\"Initialized %s client for URI: %s; \" % (self.name(), self._uri))", "def __init__(self, host, port=2345):\n self.host = host\n self.port = port\n self.set_command_list()", "def __init__(self, uri, x509=None, Transport=None):\n xmlrpclib.ServerProxy.__init__(self, uri, transport=Transport)\n self.x509 = x509", "def __init__(self, hostname: str, port: int):\n # Create a dictionary of topics and callbacks\n self.callback_dict = dict()\n\n self.client = mqtt.Client(userdata=self.callback_dict)\n self.client.on_message = _on_message_handler\n self.client.connect(hostname, port, 60)", "def __init__(self, path = None, method = None, host = None):\n self.path = path\n self.method = method\n self.host = host\n self.headers = []\n self._body = None", "def __init__(self):\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.s.bind((socket.gethostname(), self.__SERVER_PORT))\n self.s.listen(5)\n print(\"<Server> Listening on {}:{}...\".format(socket.gethostname(), self.__SERVER_PORT))", "def __init__(self, url):\n self._headers = {}\n \n parsed_url = urlparse.urlparse(url)\n if parsed_url.scheme and not parsed_url.netloc:\n # If we have a scheme but no netloc, someone's entered\n # a URL like 'foo.com:123'. Add an http://to the\n # start, and reparse.\n url = 'http://' + url\n parsed_url = urlparse.urlparse(url)\n \n if not parsed_url.scheme:\n # If no scheme was provided, then the url parsing\n # won't have worked. Reparse.\n scheme = 'http'\n url = '%s://%s' % (scheme, url)\n parsed_url = urlparse.urlparse(url)\n else:\n scheme = parsed_url.scheme\n\n if parsed_url.netloc.find(':') < 0:\n if scheme == 'http':\n netloc = parsed_url.netloc + ':80'\n else:\n netloc = parsed_url.netloc + ':443'\n else:\n # Already had an explicit port\n netloc = parsed_url.netloc\n \n # Normalise\n self.url = urlparse.urlunparse((scheme, netloc, parsed_url.path,\n parsed_url.params, parsed_url.query, parsed_url.fragment))\n self.parsed_url = urlparse.urlparse(self.url)", "def __init__(self, uri, **kwargs):\n super(Sip, self).__init__(**kwargs)\n self.value = uri", "def __init__(self, port, server_core):\n super().__init__((\"127.0.0.1\", port), ClientHandler._Handler)\n ClientHandler._Handler.server_core = server_core", "def __init__(self,address = None):\n\t\t# I really should do some validation around here\n\t\n\t\tif address != None:\n\t\t\tself.connect(address)", "def __init__(self, node_address, bind_address):\n self.__node_address, self.__bind_address = node_address, bind_address # type: tuple\n self.__rpc_client = JSONRPCTCPClient(self.__node_address)\n self.__rpc_server = JSONRPCTCPServer(self.__bind_address, BaseHandler)", "def __init__(self, host, port):\n super(TcpThreadedListeningServer, self).__init__()\n\n self.socket = TcpListeningSocket(host, port)\n\n # if there is a problem with closing, enable the timeout\n # self.socket.timeout = 3", "def __init__(self, ip, port):\n Thread.__init__(self)\n self._address = (ip, port)\n self._shutdown = Event()\n self._has_connection = Event()\n self._server_socket = None\n self._transmit_socket = None\n self._reset_server_socket()", "def __init__(self, host=\"localhost\", port=10500):\n self.sock = socket.socket(socket.AF_INET,\n socket.SOCK_STREAM)\n self.sock.connect((host, port))\n self.pattern = re.compile('([A-Z]+=\".*?\")')", "def __init__(self, address: str, port: int) -> None:\n super().__init__()\n self.address = address\n self.port = port", "def __init__(self, hostname, auth=None, port=None, verify=True,\n timeout=None):\n p = parse_url(hostname)\n if not p.scheme:\n raise URLError('Scheme must be provided (e.g. https:// '\n 'or http://).')\n else:\n if p.port and port and p.port != port:\n raise URLError('Mismatched ports provided.')\n elif not p.port and port:\n hostname = hostname + ':' + str(port)\n\n # since the system re-tries, the effective timeout\n # will be 2 times the connection timeout specified, so divide\n # it in half so the connection timeout is what the caller expects\n if timeout is None:\n self.timeout = None\n elif isinstance(timeout, Iterable):\n if len(timeout) != 2:\n raise ValueError('timeout tuple must be 2 float entries')\n self.timeout = tuple([float(timeout[0]) / 2.0,\n float(timeout[1])])\n else:\n self.timeout = float(timeout) / 2.0\n\n self.hostname = hostname\n self._ssladapter = False\n\n self.conn = requests.session()\n self.conn.auth = auth\n self.conn.verify = verify\n\n # store last full response\n self.response = None", "def __init__(self, host=\"localhost\", port=10500):\n self.sock = socket.socket(socket.AF_INET,\n socket.SOCK_STREAM)\n self.sock.connect((host, port))\n self.pattern = re.compile('([A-Z]+=\".*?\")')", "def __init__(self, host='127.0.0.1', port=8080, context_path='ignite'):\n self._host = host\n self._port = port\n self._end_point = self.END_POINT.format(host=self._host,\n port=self._port, context_path=context_path)", "def __init__(self, host: str, port: int) -> None:\n self._host = host\n self._port = port\n self.status: dict[str, Any] = {}" ]
[ "0.74562943", "0.6506829", "0.63234913", "0.63234913", "0.63085514", "0.621488", "0.60746145", "0.6005634", "0.59841245", "0.5979701", "0.5968453", "0.5921957", "0.5919456", "0.58944875", "0.58941567", "0.5850365", "0.58042604", "0.5793125", "0.5780519", "0.5780519", "0.5779045", "0.5765875", "0.5758877", "0.57411504", "0.5736077", "0.5721256", "0.57035196", "0.5692642", "0.5692463", "0.5682872", "0.56754166", "0.56675756", "0.5654489", "0.56184095", "0.5606469", "0.55854636", "0.5547992", "0.554235", "0.55333245", "0.55279493", "0.5527415", "0.5513027", "0.55080163", "0.55024767", "0.54975843", "0.548533", "0.5474386", "0.5469434", "0.5465782", "0.5431879", "0.5429483", "0.5423546", "0.54224515", "0.5405017", "0.53840464", "0.5379832", "0.5371719", "0.5365753", "0.5365553", "0.5365259", "0.53616345", "0.53571784", "0.53546315", "0.5353208", "0.5349245", "0.53486055", "0.53457856", "0.53360844", "0.53360784", "0.5333656", "0.5320335", "0.5320335", "0.53052473", "0.530277", "0.53004724", "0.5296325", "0.5295742", "0.5294085", "0.5291195", "0.5286353", "0.52765304", "0.5270026", "0.525595", "0.5253469", "0.52406263", "0.523972", "0.5236692", "0.52354383", "0.5223495", "0.5217261", "0.5216062", "0.5207288", "0.52015465", "0.5200858", "0.51996195", "0.5198269", "0.51975054", "0.5193902", "0.51835907", "0.51802635" ]
0.7386842
1
Function called to close down event router.
def close(self): self._relay.close() super(EventRouterHTTPC, self).close() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def closeEvent(self, event):\n\n sys.exit()", "def on_closing_event(self):\n self.exit_event(None)", "def closeEvent(self, event):\n sys.exit(0)", "def close(self) -> None:\n self.relay(\"close\")()", "def closeEvent(self, event):\n self.exit()\n event.accept()", "def handle_close(self):\n print(self.addr, \"bye\")\n self.close()", "def quit_application(self, event):\n self.Close()\n server.closeSocket()", "def close(self):\n self._udp_handler.send('exit'.encode(encoding='utf-8'))", "def closeEvent(self, event):\r\n self.app.stop()\r\n event.accept()", "def _onExit(self, event):\n self.Close(True)", "def OnClose(self, event):\r\n if self.worker: #stop main GPIB thread\r\n self.worker.abort()\r\n time.sleep(0.3)\r\n self.Destroy()", "def closeEvent(self, event):\n self.device.disconnect()\n event.accept()", "def outCloseEvent(self):\r\n pass", "def close(self):\n Trace(\"%s close\"%(self.getUri()), \"EventLib.EventRelayHTTPC\")\n self._httpcon.close()\n self._closing = True\n self._event.set()\n self._queueEvent.set()\n self._queue.put([\"closedown\",[]])\n self._thread.join()\n Trace(\"%s closed\"%(self.getUri()), \"EventLib.EventRelayHTTPC\")\n return", "def closeEvent(self, event):\n self._parent.quit_application(event)", "def closeEvent(self, event):\n logger.info('see here, we can just quit application')\n QtCore.QCoreApplication.quit()", "def close(self):\n self.pi.set_watchdog(self.gpio, 0)\n if self.either_edge_cb:\n self.either_edge_cb.cancel()\n self.either_edge_cb = None", "def onClose(self, event): \n \n self.Destroy()\n return", "def OnExit(self, event):\r\n self.Close(True)", "def OnExit(self, event):\n self.Close(True)", "def OnExit(self, event):\n self.Close(True)", "def OnExit(self, event):\n self.Close(True)", "def OnExit(self, event):\n self.Close(True)", "def closeEvent(self, event):\n event.accept() # let the window close\n self.returnHome()", "def close(self):\n self.closecallback()\n self.destroy()", "def close(self):\n self.exit()", "def on_exit(self, event):\n # Close server\n if hasattr(self, 'webapp'):\n requests.get(ROOT_URL + '/shutdown')\n self.webapp = None\n\n # Close app\n sys.exit()", "def on_close(self):\n print('[INFO] closing...')\n self.stopEvent.set()\n del self.tello\n self.root.quit()", "def closeEvent(self, event):\n self.is_active = False\n app._in_event_loop = False\n super()", "def close(self):\n self.call('close')", "def handle_close(self):\n self.active = False\n self.close()", "def closeEvent(self, evt):\n self.__shutdown()", "def close(self):\n self.send(ActorExit)", "def closeEvent(self, event): # pylint: disable=invalid-name\n self.cleanup(True)\n event.accept()", "def closeEvent(self, event):\n if self.mgr.exit_check():\n event.accept()\n else:\n event.ignore()", "def closeEvent(self, event):\n if self.mgr.exit_check():\n event.accept()\n else:\n event.ignore()", "def onClose(self, *args):\n rospy.loginfo('Closing Cloud Map')\n self.root.quit()\n self.root.destroy()\n # rospy.signal_shutdown('Exited UI')", "def close(self):\n self.context['socket'].close()", "def Close(self):", "def close(self):\n print(\"CAUGHT CLOSE SIGNAL\")\n self.root.destroy()", "def closeEvent(self, e):\r\n self.returnDocker()\r\n return super().closeEvent(e)", "def close(self):\n self.stop()\n GPIO.cleanup()", "def close(self):\n logging.info(\"Closing controller\")\n self.controller.close()\n\n # Send close event to UDP thread\n logging.info(\"Set event to close UDP_Listener\")\n self.UDP_ListenerEvent.clear()\n\n # Send close event to serial thread\n logging.info(\"Set event to close Serial_Listener\")\n self.Serial_ListenerEvent.clear()\n\n # Send close event to controller event loop\n logging.info(\"Set event to close ControllerEventLoop\")\n self.ControllerEventLoop_ListenerEvent.clear()\n\n self.master.after(0, self.master.destroy)", "def close(self):\r\n pass", "def shutdown(self):\n self.exit_app()", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def close(self):", "def exit(self):\n LOGGER.debug(\"State 'open' exited\")\n self.door_model.stop_door_signal()", "def close(self):\r\n pass", "def close(self):\r\n pass", "def close(self):\r\n pass", "def OnClose(self, event):\r\n pos.app.main.Exit()", "def on_closing(self, *args):\n pass", "def on_close(self, event):\r\n if self.thread is not None:\r\n self.thread.abort = True\r\n if self.tester is not None:\r\n try:\r\n self.tester.Close()\r\n except:\r\n pass\r\n self.close_debug_console()\r\n event.Skip()", "def close(self):\n\n\t\t# close connection\n\t\tself.eyetribe.close()\n\t\tself.connected = False", "def close(self):\n ...", "def close(self):\n ...", "def close(self) -> None:\n _LOGGER.info('Shutting down connections to deCONZ.')\n if self.websocket:\n self.websocket.stop()", "def onClose(self, event):\n pass", "def onClose(self, event):\n pass", "def onClose(self, event):\n pass", "def shutdown(self):", "def closeEvent(self, event):\n super(MainWindow, self).closeEvent(event)\n try:\n reactor.stop()\n except error.ReactorNotRunning: # if reactor has not been run in the session\n pass", "def onClose (self):\n \n pass", "def close(self):\n\n\t\tif (self.port != None) and (self.state == State.Connected):\n\t\t\tself.send(\"atz\")\n\t\t\tself.port.close()\n\n\t\tself.port = None\n\t\tself.ELMver = \"Unknown\"", "def closeEvent(self, event=None):\n log.debug(\"Cleanup close, %s\", event)\n result = self.dev.close_pipe()\n log.info(\"Close pipe result: %s\", result)\n self.data_timer.stop()\n self.close()", "def exited(event=None):\n clientSock.close()\n GUI.quit()", "def close(self):\n pass", "def close(self):\n pass", "def close(self):\n # FIXME: Does this even work?\n if self.handler_timeout:\n GLib.Source.remove(self.handler_timeout)", "def __exit__(self, *args, **kwargs):\n\n self.sock.close()", "def onQuit(self, event):\n\n\t\tself.onClose(None)", "def close(self):\n print 'close'", "def closeEvent(self, event):\n self.is_active = False\n app._in_event_loop = False\n # It seems that interactor closes slower than the windows preventing from properly closing the interface.\n # The work-around is to wait a little bit\n time.sleep(0.1)\n super()", "def closeEvent(self, e):\n self.__diffGenerator.stopProcesses()\n e.accept()", "def close(self):\n self.device.disconnect()", "def __exit__(self, *args):\n self.sock.close()", "def shutdown(self):\r\n self.socket.close()\r\n # self.socket_video.close()\r\n self.socket_state.close()", "def exit(self):\n self.close()", "def __window_close(self):\n pass", "def close(self):\n\n #Kill all zombie PIDs and exit gracefully\n try:\n self.webdriver.quit()\n except:\n pass\n if 'p' not in sys.argv:\n self.kill()\n sys.exit()", "def closeConfiguration(self):\n self.parent.closeDresser()", "def close(self):\n self.destroy()", "def shutdown(self):\n ...", "def close_UDP_connection(self):\n self.beacon.stop_beaconing()", "def ev_windowclose(self, event: WindowEvent) -> None:", "def shutdown(self):\n self.exit_event.set()", "def close(self):\n self.__exit__(None, None, None)", "def close(self):\n self.__exit__(None, None, None)", "def exit(self):\n \t\troot.destroy()\n \t\tpass", "def close(self):\n self.state = False\n self.mainwindow.sendMessage('a')\n print(\"closing \" + self.name)", "def close_epd():\n if not epd:\n return\n epd.sleep()\n epd.Dev_exit()\n # epd2in13_V2.epdconfig.module_exit()" ]
[ "0.71471226", "0.71452016", "0.70188725", "0.7006168", "0.6977439", "0.6927479", "0.68837065", "0.6871763", "0.68616617", "0.68096364", "0.6793978", "0.6784437", "0.67712516", "0.6754609", "0.6701007", "0.6697934", "0.6681519", "0.6672537", "0.6667044", "0.66509724", "0.66509724", "0.66509724", "0.66509724", "0.6642087", "0.6641009", "0.6638897", "0.66380095", "0.66341525", "0.66270345", "0.66220933", "0.6616018", "0.66149795", "0.6613918", "0.66078264", "0.6598393", "0.6598393", "0.6596406", "0.65932244", "0.6581275", "0.65571696", "0.6544845", "0.6531249", "0.6516284", "0.65148854", "0.6514599", "0.6507564", "0.6507564", "0.6507564", "0.6507564", "0.6507564", "0.6507564", "0.6507564", "0.6507564", "0.6507564", "0.6507564", "0.6506006", "0.64994013", "0.64994013", "0.64994013", "0.6495637", "0.6495389", "0.64891607", "0.6485709", "0.6483492", "0.6483492", "0.64782745", "0.6473767", "0.6473767", "0.6473767", "0.6473759", "0.6471386", "0.6466172", "0.6459654", "0.64572513", "0.64495087", "0.6435291", "0.6435291", "0.64342827", "0.64218956", "0.6421358", "0.64204806", "0.64161026", "0.6414849", "0.64127225", "0.6411985", "0.6406884", "0.6405402", "0.6405337", "0.6403815", "0.6401131", "0.63974416", "0.6391037", "0.6379412", "0.6378136", "0.6376217", "0.6370583", "0.6370583", "0.63665265", "0.63662374", "0.63609624" ]
0.73405606
0
Initialize a new HTTP client event passing object An HTTP client is associated with an existing event router, and sends all messages received from that router to the HTTP connection, and forwards all messages received from the HTTP connection to the router. Interaction with the indicated EventRouter object takes place primarily through the 'receive' methods of this class and the supplied router. Because messages received from HTTP are sent onwards using the normal forwarding mechanisms, this class must perform loopdetection to stop events being bounced back to the HTTP connection.
def __init__(self, router, uri=None, host='', port=8082, simplex=False): super(EventRelayHTTPC, self).__init__(uri) self._router = router self._queue = Queue() self._event = threading.Event() self._closing = False self._queueEvent = threading.Event() self._simplex = simplex # Have 'router' send all subscriptions events to this object router.routeEventFrom(None, None, self) router.doSubscribeRequest(self, -1, None, None) # Create HTTP "connection", and start thread to respond to new events from it. self._httpcon = httplib.HTTPConnection(host=host, port=port) self._thread = threading.Thread(name=uri, target=self.processEvent) self._thread.start() return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, uri=None, host='', port=8082, simplex=False):\n super(EventRouterHTTPC, self).__init__(uri)\n relayuri = self.getUri()+\"/HTTPC\"\n self._relay = EventRelayHTTPC(self, relayuri, host, port, simplex)\n return", "def __init__(self, router):\n self._router = router", "def __init__(self):\n\n self.loop = asyncio.get_event_loop()\n self.aiohttp = web.Application(\n loop=self.loop,\n middlewares=[unhandled_route],\n )\n self.client = ClientSession()\n self.ws = WebSocketHandler(self)\n self.cert = self._load_ssl_certificate()\n\n self.config()", "def __init__(self, router):\n\n self.router = router", "def __init__(self, remote_address):\n self._local_address = None\n self.remote_address = remote_address\n self._connection_event = gevent.event.Event()\n\n self._on_connect_handlers = list()\n self._on_disconnect_handlers = list()\n\n self._socket = None\n self._reader = None\n self._writer = None\n\n self._read_lock = gevent.lock.RLock()\n self._write_lock = gevent.lock.RLock()", "def __init__(self, ae, address, ssl_context=None):\n self.ae = ae\n self.ssl_context = ssl_context\n self.allow_reuse_address = True\n\n TCPServer.__init__(\n self, address, RequestHandler, bind_and_activate=True\n )\n\n self.timeout = 60", "def processEvent(self):\n # Note: break out of event dispatch loop when closedown event is received\n # and closing flag is set. This is to prevent DoS attack by faked closedown\n # event type, and to ensure that prior events received are all processed.\n delay_on_error_min = 0.125 # Back off retry interval on error..\n delay_on_error_max = 20.0 # ..\n delay_on_error = delay_on_error_min # ..\n while True:\n if delay_on_error < delay_on_error_max:\n delay_on_error *= 2\n try:\n # PLEASE NOTE: In the event that the HTTPC is run as duplex, not simplex\n # then the post methods will be delayed if nothing is sent down to the client\n # from the server. This timeout is controlled by QUEUE_WAIT_TIMEOUT in EventRouterHTTPS.py\n if self._simplex == True:\n self._queueEvent.wait()\n self._queueEvent.clear()\n \n if not self._queue.empty():\n Trace(\"%s queue.get ...\"%(self.getUri()), \"EventLib.EventRelayHTTPC\")\n ###msgbody = self._queue.get()\n ###Trace(\"%s get msgbody: %s\"%(self.getUri(),msgbody), \"EventLib.EventRelayHTTPC\")\n ###self._event.set()\n msgbody = self.getQueuedItem()\n [typ,env] = msgbody\n if typ == \"closedown\":\n if self._closing: break\n else:\n # process request as an HTTP POST request\n data = makeEnvelopeData(env)\n headers = { \"Content-type\": \"text/plain\",\n \"Accept\": \"text/plain\",\n \"Content-length\": str(len(data)) }\n self._httpcon.request(\"POST\", \"/request_path_ignored\", data, headers)\n response = self._httpcon.getresponse()\n delay_on_error = delay_on_error_min\n elif self._simplex == False:\n # Nothing in queue:\n # issue a GET for incoming events\n _log.info(\"%s HTTP get ...\"%(self.getUri()))\n headers = { \"Accept\": \"text/plain\" }\n self._httpcon.request(\"GET\", \"/request_path_ignored\", None, headers)\n response = self._httpcon.getresponse()\n if response.status == 200:\n delay_on_error = delay_on_error_min\n msgbody = response.read()\n Trace(\"%s get msgbody: %s\"%(self.getUri(),msgbody), \"EventLib.EventRelayHTTPC\")\n # Parse message and act accordingly\n msgdata = parseMessageData(msgbody)\n Trace(\"%s get msgdata: %s\"%(self.getUri(),str(msgdata)), \"EventLib.EventRelayHTTPC\")\n if msgdata == None:\n #TODO: Log \"Request body malformed\"\n pass\n elif msgdata[0] == \"forward\":\n # msgdata = [\"forward\", [['R1', 'R2', 'R3'], 'ev:typ', 'ev:src', 'payload']]\n event = makeEvent(evtype=msgdata[1][1],source=msgdata[1][2],payload=msgdata[1][3])\n env = constructEnvelope(msgdata[1][0], event)\n self.forward(event, env)\n elif msgdata[0] == \"idle\":\n # Idle response gives client a chance to send if anything is queued\n pass\n else:\n #TODO: handle closedown message?\n Warn( \"%s Request body unrecognized option: %s\"%(self.getUri(),msgdata[0]), \"EventRelayHTTPC\")\n pass\n elif response.status == 503:\n Trace( \"%s processEvent error response: %u, %s\"%(self.getUri(),response.status,response.reason), \"EventLib.EventRelayHTTPC\")\n # Remote end closed down\n break\n else:\n # TODO: (log error response)\n Warn( \"%s processEvent error response: %u, %s\"%(self.getUri(),response.status,response.reason), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n \n except httplib.BadStatusLine, e:\n # This can happen at closedown\n Info( \"%s processEvent bad response: %s\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n except httplib.CannotSendRequest, e:\n # This can happen at closedown\n Info( \"%s Cannot send request: %s\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n except httplib.ResponseNotReady, e:\n # This can happen at startup and sometimes other times:\n # maybe multiple requests on a single HTTP connection object?\n Info( \"%s Response not ready: (%s)\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n except socket.error, e:\n Warn( \"%s Socket error: %s\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n return", "def __init__(\n self,\n client_id: str,\n loop: asyncio.BaseEventLoop = None,\n pipe: int = 0,\n handler: callable = None\n ):\n self._rpc = AioClient(client_id, loop=loop, pipe=pipe, handler=handler)\n self.loop = self._rpc.loop\n self._status = {'pid': os.getpid()}\n self.updating_loop = None", "def __init__(self, application):\n self.app = application\n self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.response_headers = \"\"\n self.times = 0", "def __init__(self, params={}):\n self.lt_ses = lt.session() # pylint: disable=no-member\n self.lt_ses.listen_on(6881, 6891)\n\n self.params = params\n self.queue = deque()\n self.stream_thread = None\n self.handle = None", "def __init__(self):\n # Create a TCP/IP socket\n self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)", "def __init__(self, host, port=None, strict=None, \r\n timeout=socket._GLOBAL_DEFAULT_TIMEOUT,\r\n source_address=None,\r\n username=None, password=None,\r\n certChain=None, privateKey=None,\r\n checker=None,\r\n settings=None,\r\n ignoreAbruptClose=False, \r\n anon=False):\r\n if source_address:\r\n httplib.HTTPConnection.__init__(self, host, port, strict,\r\n timeout, source_address)\r\n if not source_address:\r\n httplib.HTTPConnection.__init__(self, host, port, strict,\r\n timeout)\r\n self.ignoreAbruptClose = ignoreAbruptClose\r\n ClientHelper.__init__(self,\r\n username, password, \r\n certChain, privateKey,\r\n checker,\r\n settings, \r\n anon)", "def __init__(self, assoc, client_socket=None, address=('', 0)):\n self._assoc = assoc\n\n if client_socket is not None and address != ('', 0):\n LOGGER.warning(\n \"AssociationSocket instantiated with both a 'client_socket' \"\n \"and bind 'address'. The original socket will not be rebound\"\n )\n\n if client_socket is None:\n self.socket = self._create_socket(address)\n self._is_connected = False\n else:\n self.socket = client_socket\n self._is_connected = True\n # Evt5: Transport connection indication\n self.event_queue.put('Evt5')\n\n self.tls_args = None\n self.select_timeout = 0.5", "def __init__(self, event_type, source, target, orcbot, raw, data=None):\n self.event_type = event_type\n self.source = source\n self.target = target\n self.data = data\n self.socket = source\n self.message = raw\n self.orcbot_socket = orcbot", "def initialize_networkHandler(self):\n\t\tself.networkHandler = NetworkHandler(\n\t\t\tself.callbackQueue,\n\t\t\tself.received_order,\n\t\t\tself.set_light_callback,\n\t\t\tself.newOrderQueue,\n\t\t\tself.startedOrderQueue,\n\t\t\tself.lost_connection\n\t\t\t)", "def connect(cls, url=None, router=None, timeout=10, ssl_domain=None, sasl=None, edge_router=None):\n url_ = Url(url) # Convert string to Url class.\n\n if url_.path is not None:\n path = url_.path\n elif router:\n path = '_topo/0/%s/$management' % router\n elif edge_router:\n path = '_edge/%s/$management' % edge_router\n else:\n path = u'$management'\n connection = BlockingConnection(url,\n timeout=timeout,\n ssl_domain=ssl_domain,\n sasl_enabled=bool(ssl_domain or sasl),\n allowed_mechs=str(sasl.mechs) if sasl and sasl.mechs is not None else None,\n user=str(sasl.user) if sasl and sasl.user is not None else None,\n password=str(sasl.password) if sasl and sasl.password is not None else None)\n try:\n return cls(connection, path)\n except Exception:\n # ownership of connection has not been given to a new Node; close the connection\n connection.close()\n raise", "def __init__(self, client_id, rhost, rport, lhost, lport, map):\n asyncore.dispatcher.__init__(self, map=map)\n self.client_id = client_id\n self.lhost = lhost\n self.lport = lport\n self.rhost = rhost\n self.rport = rport\n self.molo_tcp_pack = MoloTcpPack()\n self.tranparency = None\n self.append_recv_buffer = None\n self.append_send_buffer = None\n self.append_connect = None\n self.client_token = None\n self.clear()", "def handle_client(self):\n e = threading.Event()\n reg_t = threading.Thread(target=self.handle_reg_client, args=(e,))\n stream_t = threading.Thread(target=self.handle_stream_client,\n args=(e,))\n reg_t.start()\n stream_t.start()", "def receive(self, fromrouter, envelope):\n event = envelope.unWrap(self.getUri())\n if event:\n Trace(\"%s receive %s from %s\"%(self.getUri(),event,fromrouter), \"EventLib.EventRelayHTTPC\")\n return self.queueItem([\"forward\",envelope])\n return makeDeferred(StatusVal.OK)", "def __init__(self):\r\n self._map1 = {\r\n \"CIRC\" : self.circ_status_event,\r\n \"STREAM\" : self.stream_status_event,\r\n \"ORCONN\" : self.or_conn_status_event,\r\n \"STREAM_BW\" : self.stream_bw_event,\r\n \"BW\" : self.bandwidth_event,\r\n \"DEBUG\" : self.msg_event,\r\n \"INFO\" : self.msg_event,\r\n \"NOTICE\" : self.msg_event,\r\n \"WARN\" : self.msg_event,\r\n \"ERR\" : self.msg_event,\r\n \"NEWDESC\" : self.new_desc_event,\r\n \"ADDRMAP\" : self.address_mapped_event,\r\n \"NS\" : self.ns_event,\r\n \"NEWCONSENSUS\" : self.new_consensus_event,\r\n \"BUILDTIMEOUT_SET\" : self.buildtimeout_set_event,\r\n \"GUARD\" : self.guard_event,\r\n \"TORCTL_TIMER\" : self.timer_event\r\n }\r\n self.c = None # Gets set by Connection.set_event_hanlder()\r\n self.pre_listeners = []\r\n self.post_listeners = []", "def __init__(self, url, routing_key, log_file='/dev/null', exchange='yacamc_exchange', exchange_type='direct',\n queue=None, acked=True, sender=False, otq = False, log_level=logging.FATAL):\n\n if queue is None:\n queue = routing_key\n self.exchange = exchange\n self.exchange_type = exchange_type\n self.queue = queue\n self.routing_key = routing_key\n self._url = url\n self.acked = acked\n self.otq = otq\n\n self.cb = None\n\n self._connection = None\n self._channel = None\n self._closing = False\n\n log_format = '%(levelname) -10s %(asctime)s %(name) -30s %(funcName) -35s %(lineno) -5d: %(message)s'\n handler = logging.FileHandler(log_file)\n logging.basicConfig(level=log_level, format=log_format)\n self.logger = logging.getLogger(__name__)\n self.logger.addHandler(handler)\n\n # used only for sending\n self._deliveries = []\n self._acked = 0\n self._nacked = 0\n self._message_number = 0\n self._stopping = False\n self._done_sending = False\n self.message = \"\"\n self.sender = sender\n\n # self.run()\n # self._connection = self.connect()", "def __init__(self, client: com.Bot):\n self._listening_components = []\n \"\"\"A list of components that are listening for interaction\"\"\"\n self._discord = client\n self._discord.add_listener(self._on_socket_response, 'on_socket_response')", "def __init__(self):\r\n self.client_socket = socket.socket() # the socket of the client.\r\n self.communicator = Communicator()\r\n self.events_handler = EventsHandler(self.client_socket)\r\n self.running = True\r\n self.display_resolution = DEFAULT_DISPLAY_RESOLUTION\r\n self.screen = self.get_display()", "def __init__(self):\r\n self._map1 = {\r\n \"CIRC\" : self.circ_status_event,\r\n \"STREAM\" : self.stream_status_event,\r\n \"ORCONN\" : self.or_conn_status_event,\r\n \"STREAM_BW\" : self.stream_bw_event,\r\n \"BW\" : self.bandwidth_event,\r\n \"DEBUG\" : self.msg_event,\r\n \"INFO\" : self.msg_event,\r\n \"NOTICE\" : self.msg_event,\r\n \"WARN\" : self.msg_event,\r\n \"ERR\" : self.msg_event,\r\n \"NEWDESC\" : self.new_desc_event,\r\n \"ADDRMAP\" : self.address_mapped_event,\r\n \"NS\" : self.ns_event,\r\n \"NEWCONSENSUS\" : self.new_consensus_event,\r\n \"BUILDTIMEOUT_SET\" : self.buildtimeout_set_event,\r\n \"GUARD\" : self.guard_event,\r\n \"TORCTL_TIMER\" : self.timer_event\r\n }\r\n self.parent_handler = None\r\n self._sabotage()", "def __init__(self, test_stream=None, no_delay=False, window=None, server=None):\n self._transport = None\n self._socket = None\n self._stream = test_stream\n self._logger = logging.getLogger('py3iperf3')\n self._sock_id = None\n self._no_delay = no_delay\n self._window = window\n self._server = server", "def __init__(self, client_id: str):\n\n self._cs = aiohttp.ClientSession(\n loop=asyncio.get_event_loop(),\n raise_for_status=True,\n headers={\"Client-ID\": client_id},\n )", "def __init__(self, loop=None):\n object.__setattr__(self, '_loop', loop or get_event_loop())", "def on_open(self, request):\n\n # Find the right endpoint and create th connection\n dest = destinations[self.endpoint]\n\n name = self.session.handler.name if self.session.handler else '??'\n logger.info('New %s client for endpoint %s on port %s' %\n (name, self.endpoint, dest[1]))\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)\n self.endpoint_stream = iostream.IOStream(s)\n self.endpoint_stream.connect(dest, self.on_endpoint_connected)", "def __init__(self):\n\n\t\t#: Create the client and connect it to the host server.\n\t\tself.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n\t\t#: Create a seperate thread to control listening to messages\n\t\t#: coming from the server.\n\t\tself.listen_thread = threading.Thread(target=self.listen)\n\n\t\t#: Create a seperate thread to control displaying messages.\n\t\t#: Handling this seperatedly from listening for messages\n\t\t#: ensures that messages aren't lost in the time it takes\n\t\t#: for a message to be displayed.\n\t\tself.messages = []\n\t\tself.display_thread = threading.Thread(target=self.display_messages)\n\n\t\t#: Have a most recent message for testing purposes.\n\t\tself.most_recent_message = \"\"\n\n\t\tself.joined = True\n\n\t\t#: Used to ensure you: doesnt appear twice.\n\t\tself.displayed_you = False", "def __init__(self, notification_handler=None, error_handler=None):\n self._socket = None\n self._thread = None\n self._cur_socket_timeout = 0\n self._next_request_id = 0\n self._notification_handler = notification_handler\n self._error_handler = error_handler", "def __init__(self, local_address):\n self.loopbacknode = local_address\n self.recvqueue = conc.ConcurrentDeque()\n self.numsends = conc.AtomicNumber(0)\n self.numrecvs = conc.AtomicNumber(0)\n self.numforcedrops = conc.AtomicNumber(0)\n self.numrandomdrops = conc.AtomicNumber(0)\n self.closed = False\n self.failurerate = 0\n self.maxdelay = 0\n self.client_recv = None # set in start_listening\n self.scheduler = conc.EventScheduler()\n _MTTRACE.pause() # transport tracing paused by default", "def __init__(self, loop, hsgid, session_id, testcase):\n super().__init__(loop=loop)\n self.responses = {}\n self.responses['login'] = LoginResponse(session_id, 200)\n self.responses['artwork'] = None\n self.responses['playing'] = PlayingResponse()\n self.hsgid = hsgid\n self.last_button_pressed = None\n self.tc = testcase\n\n self.router.add_get('/login', self.handle_login)\n self.router.add_get(\n '/ctrl-int/1/playstatusupdate', self.handle_playstatus)\n self.router.add_post(\n '/ctrl-int/1/controlpromptentry', self.handle_remote_button)\n self.router.add_get(\n '/ctrl-int/1/nowplayingartwork', self.handle_artwork)\n self.router.add_post(\n '/ctrl-int/1/setproperty', self.handle_set_property)\n for button in ['play', 'pause', 'nextitem', 'previtem']:\n self.router.add_post('/ctrl-int/1/' + button,\n self.handle_playback_button)", "def __init__(self):\n\n # Every WSGI application must have an application object - a callable\n # object that accepts two arguments. For that purpose, we're going to\n # use a function (note that you're not limited to a function, you can\n # use a class for example). The first argument passed to the function\n # is a dictionary containing CGI-style environment variables and the\n # second variable is the callable object (see PEP 333).\n def application(environ, start_response):\n \"\"\"\n WSGI application object. Returns request status.\n For specific endpoints (e.g. get_with_params), returns\n specific response bodies.\n \"\"\"\n\n response_text = 'Hello World!'\n endpoint = environ['PATH_INFO'][1:]\n\n if endpoint == 'get_with_params':\n #echo back uri parameters as dict...\n response_text = str(dict(urlparse.parse_qsl(environ['QUERY_STRING'])))\n\n #set status code for response based on request...\n requested_status = environ['PATH_INFO'][1:]\n\n status = self.codes.get(requested_status, '200 OK') # HTTP Status\n headers = [('Content-type', 'text/plain')] # HTTP Headers\n start_response(status, headers)\n #print(environ)\n #print('pathInfo: {0}'.format(environ.get('PATH_INFO')))\n #print('queryString: {0}'.format(environ.get('QUERY_STRING')))\n #print('requestMethod:{0}'.format(environ['REQUEST_METHOD']))\n # The returned object is going to be printed\n return response_text\n\n threading.Thread.__init__(self)\n self.httpd = make_server('', 0, application)\n self.address = self.httpd.server_address", "def on_open(self):\n def event_loop():\n logger.debug(pformat(self.query.request))\n self.send(json.dumps(self.query.request))\n while not self.event.is_set():\n #print('Waiting around on the socket: %s' % self.gettimeout())\n self.event.wait(self.gettimeout())\n \n logger.debug('Event loop terminating.')\n \n self.thread = threading.Thread(\n target=event_loop)\n self.thread.setDaemon(True)\n self.thread.start()", "def init(event_model=None):\n with _network_thread_reentrant_lock:\n global _network_thread\n\n # init should only succeed once; if _network_thread is not\n # None, someone has already successfully called init\n if _network_thread:\n raise FDBError(2000)\n\n try:\n\n class NetworkThread(threading.Thread):\n def run(self):\n try:\n _capi.fdb_run_network()\n except FDBError as e:\n sys.stderr.write(\n \"Unhandled error in FoundationDB network thread: %s\\n\" % e\n )\n # print(\"Network stopped\")\n\n _network_thread = NetworkThread()\n _network_thread.daemon = True\n # may not set actual underlying OS thread name\n _network_thread.name = \"fdb-network-thread\"\n\n if event_model is not None:\n if event_model == \"gevent\":\n import gevent\n\n if gevent.__version__[0] != \"0\":\n\n def nullf():\n pass\n\n class ThreadEvent(object):\n has_async_ = hasattr(gevent.get_hub().loop, \"async_\")\n\n def __init__(self):\n if ThreadEvent.has_async_:\n self.gevent_async = gevent.get_hub().loop.async_()\n else:\n self.gevent_async = getattr(\n gevent.get_hub().loop, \"async\"\n )()\n\n self.gevent_async.start(nullf)\n\n def set(self):\n self.gevent_async.send()\n\n def wait(self):\n gevent.get_hub().wait(self.gevent_async)\n\n else:\n # gevent 0.x doesn't have async, so use a pipe. This doesn't work on Windows.\n if platform.system() == \"Windows\":\n raise Exception(\n \"The 'gevent' event_model requires gevent 1.0 on Windows.\"\n )\n\n import gevent.socket\n\n class ThreadEvent(object):\n def __init__(self):\n self.pair = os.pipe()\n\n def set(self):\n os.write(self.pair[1], \"!\")\n\n def wait(self):\n gevent.socket.wait_read(self.pair[0])\n\n def __del__(self):\n os.close(self.pair[0])\n os.close(self.pair[1])\n\n Future.Event = ThreadEvent\n\n def _gevent_block_until_ready(self):\n e = self.Event()\n\n def is_ready_cb(future):\n e.set()\n\n self.on_ready(is_ready_cb)\n e.wait()\n\n Future.block_until_ready = _gevent_block_until_ready\n elif event_model == \"debug\":\n import time\n\n class DebugEvent(object):\n def __init__(self):\n self.ev = threading.Event()\n\n def set(self):\n self.ev.set()\n\n def wait(self):\n while not self.ev.isSet():\n self.ev.wait(0.001)\n\n Future.Event = DebugEvent\n\n def _debug_block_until_ready(self):\n while not self.is_ready():\n time.sleep(0.001)\n\n Future.block_until_ready = _debug_block_until_ready\n elif event_model == \"asyncio\":\n global asyncio\n try:\n import asyncio\n except ImportError:\n import trollius as asyncio\n\n if isinstance(asyncio.futures._FUTURE_CLASSES, type):\n asyncio.futures._FUTURE_CLASSES = (\n asyncio.futures._FUTURE_CLASSES,\n )\n asyncio.futures._FUTURE_CLASSES += (Future,)\n\n def _do_not_block(self):\n if not self.is_ready():\n raise Exception(\"Future not ready\")\n\n Future.block_until_ready = _do_not_block\n Future.call_soon_threadsafe = (\n asyncio.get_event_loop().call_soon_threadsafe\n )\n Future._loop = asyncio.get_event_loop()\n\n def iterate(self):\n \"\"\"Usage:\n fa = tr.get_range(...).iterate()\n for k,v in (yield From(fa)):\n print(k,v)\n yield From(fa)\"\"\"\n\n def it():\n yield asyncio.From(self._future)\n raise asyncio.Return(self)\n\n return it()\n\n FDBRange.iterate = iterate\n AT = _TransactionCreator.declare_asynchronous_transactions()\n for name in dir(AT):\n if name.startswith(\"__TransactionCreator__creator_\"):\n setattr(_TransactionCreator, name, getattr(AT, name))\n\n def to_list(self):\n if self._mode == StreamingMode.iterator:\n if self._limit > 0:\n mode = StreamingMode.exact\n else:\n mode = StreamingMode.want_all\n else:\n mode = self._mode\n yield asyncio.From(self._future)\n out = []\n for kv in self.__iter__(mode=mode):\n out.append(kv)\n yield asyncio.From(self._future)\n raise asyncio.Return(out)\n\n FDBRange.to_list = to_list\n else:\n # Hard coded error\n raise FDBError(2000)\n\n _capi.fdb_setup_network()\n\n # Sketchy... the only error returned by fdb_run_network\n # (invoked by _network_thread) is if the network hasn't\n # been setup, so if we get here without exception we know\n # it has been.\n _network_thread.start()\n except Exception:\n # We assigned _network_thread but didn't succeed in init,\n # so clear it out so the next caller has a chance\n _network_thread = None\n raise", "def __init__(self, catchall=True, autojson=True, config=None):\r\n self.routes = [] # List of installed :class:`Route` instances.\r\n self.router = Router() # Maps requests to :class:`Route` instances.\r\n self.plugins = [] # List of installed plugins.\r\n\r\n self.error_handler = {}\r\n #: If true, most exceptions are catched and returned as :exc:`HTTPError`\r\n self.config = ConfigDict(config or {})\r\n self.catchall = catchall\r\n #: An instance of :class:`HooksPlugin`. Empty by default.\r\n self.hooks = HooksPlugin()\r\n self.install(self.hooks)\r\n if autojson:\r\n self.install(JSONPlugin())\r\n self.install(TemplatePlugin())", "def testEventHandlerInit(self):\n stub = NetworkObjectStub()\n\n e1 = Event(5, stub, 'message')\n e2 = Event(0, stub, 'message')\n e3 = Event(7, stub, 'message')\n e4 = PacketEvent(1, 'sender2', stub, 4, 'message5')\n eventList = [e1, e2, e3, e4]\n\n eventHandler = EventHandler('network', eventList)\n self.assertEqual('network', eventHandler._network)\n self.assertEqual(4, eventHandler._queue.qsize())", "def __init__(self, worker, event_loop):\n self.weakref_worker = weakref.ref(worker)\n self.event_loop = event_loop\n self.asyncio_task = None", "def __init__(self, addr):\r\n asyncore.dispatcher.__init__(self)\r\n self.accept_channel = None\r\n self.addr = addr\r\n self.create_socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.bind(addr)\r\n self.listen(5)\r\n \r\n # Start the asyncore polling loop if it's not already running\r\n if not asyncore_loop.running:\r\n stackless.tasklet(asyncore_loop)()", "def __init__(self, server_address, RequestHandlerClass):\n\t\tBaseServer.__init__(self, server_address, RequestHandlerClass)\n\t\tself.socket = socket.socket(self.address_family,\n\t\t\t\t\t\t\t\t\tself.socket_type)\n\t\tself.server_bind()\n\t\tself.server_activate()", "def __init__(self, host, port, **kwargs):\n\n SocketHandler.__init__(self, host, port)\n BaseHandler.__init__(self, **kwargs)", "def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n\n state.addConnection(self.connection)\n\n # Loop that listens for messages from the client\n while True:\n received_string = self.connection.recv(4096)\n\n if len(received_string) == 0:\n continue\n\n # TODO: Add handling of received payload from client\n\n # Convert payload from JSON to object\n payloadToData = json.loads(received_string)\n\n # determine what request is being made\n request_handler = RequestHandler(payloadToData,\n state,\n self.connection)\n\n # execute and generate response (JSON formatted)\n jsonResponse = request_handler.callHandler()\n\n if not jsonResponse == 'BROADCAST':\n # send response\n self.connection.send(bytes(jsonResponse, \"ascii\"))", "def handle(self):\n try:\n peers = Peers([\n gevent.spawn(self.route.proxy_input, self.client.sock,\n self.sock, self.buf, self.extra),\n gevent.spawn(self.route.proxy_connected, self.sock, \n self.client.sock, self.extra)])\n gevent.joinall(peers.greenlets)\n finally:\n self.sock.close()", "def run(self, event_loop):\n protocol_factory = lambda: self.PROTOCOL_CLS(self)\n coro = event_loop.create_connection(\n protocol_factory, host=self.host, port=self.port)\n event_loop.run_until_complete(coro)", "def init(loop):\n tasks = JobsHandler()\n config = ConfigHandler()\n task = TaskHandler()\n\n\n\n app = web.Application(loop = loop)\n app.router.add_route('*', '/tasks/{do_something}', tasks.handle)\n app.router.add_route('*', '/config/{do_something}', config.handle)\n app.router.add_route('*', '/task/{id}/{do_something}', task.handle)\n\n handler = app.make_handler()\n srv = yield from loop.create_server(handler, '0.0.0.0', 8080)\n print(\"Server started at http://0.0.0.0:8080\")\n return srv, handler", "def __init__(self, server_address, RequestHandlerClass, settings, bind_and_activate=True):\n\n self.log_path = settings['log']\n self.key_file = settings['key']\n self.cert_file = settings['cert']\n self.allow_ip = IPRange(settings['allow_ip'])\n\n self.spellchecker = hunspell.HunSpell('/usr/share/hunspell/en_US.dic',\n '/usr/share/hunspell/en_US.aff')\n\n SocketServer.TCPServer.__init__(self, server_address, RequestHandlerClass, False)\n\n # initialize SSL connection\n self.socket = ssl.wrap_socket(self.socket,\n keyfile=self.key_file,\n certfile=self.cert_file,\n cert_reqs=ssl.CERT_NONE,\n ssl_version=ssl.PROTOCOL_TLSv1,\n server_side=True)\n\n # start serving\n if bind_and_activate:\n self.server_bind()\n self.server_activate()", "def __init__(self, event, object, participant, source,):\n self.event = event\n self.object = object\n self.participant = participant\n self.source = source", "def __init__(self, testcase, url, method='GET'):\n self.testcase = testcase\n logging.handlers.HTTPHandler.__init__(self, 'testserver', url, method)", "def __init__(self, web_app_host, web_app_port, request_timeout=40, retries=3, on_fail_sleep_duration=5):\n logging.Handler.__init__(self)\n self.addr = web_app_host + ':' + str(web_app_port)\n self.request_timeout = request_timeout\n self.retries = retries\n self.on_fail_sleep_duration = on_fail_sleep_duration\n self.http_client = httpclient.HTTPClient()", "def _start_event_stream(self):\r\n\r\n # Register with an event queue, which will be used as event source:\r\n self._event_queue = self._call_factory(\"subscribe\")\r\n if self._event_queue is None:\r\n self.logger.debug(\"SseHTTPRequestHandler(Thread-%s): no queue, \"\r\n \"stopping this thread\",\r\n threading.current_thread().ident)\r\n # As per http://dev.w3.org/html5/eventsource/, a response code\r\n # of 204 tells the browser not to reconnect:\r\n self.send_response(204)\r\n return\r\n self.logger.debug(\"SseHTTPRequestHandler(Thread-%s): registered queue, \"\r\n \"start sending events\", threading.current_thread().ident)\r\n\r\n # Send HTTP headers:\r\n self.send_response(200)\r\n self.send_header(\"Content-type\", \"text/event-stream\")\r\n self.end_headers()\r\n\r\n # Start event serving loop:\r\n self._send_events()", "def setUpClass(cls):\n\n cls.client = TestWorkflow.start()\n\n cls.httpd = HTTPServer((\"127.0.0.1\", 8001), RequestHandler)\n\n server = Thread(target=cls.httpd.serve_forever)\n server.setDaemon(True)\n server.start()", "def __init__(self, loop: AbstractEventLoop, response: bytes) -> None:\n super().__init__(loop, response, ResponseMessageType.CONTROL)", "def router(paramstring):\r\n channels()", "def __init__(self, auth_provider):\n AbstractTransport.__init__(self, auth_provider)\n self._pipeline = (\n pipeline_stages_base.PipelineRoot()\n .append_stage(pipeline_stages_iothub.UseSkAuthProvider())\n .append_stage(pipeline_stages_base.EnsureConnection())\n .append_stage(pipeline_stages_iothub_mqtt.IotHubMQTTConverter())\n .append_stage(pipeline_stages_mqtt.Provider())\n )\n\n def _handle_pipeline_event(event):\n if isinstance(event, pipeline_events_iothub.C2DMessageEvent):\n if self.on_transport_c2d_message_received:\n self.on_transport_c2d_message_received(event.message)\n else:\n logger.warning(\"C2D event received with no handler. dropping.\")\n\n elif isinstance(event, pipeline_events_iothub.InputMessageEvent):\n if self.on_transport_input_message_received:\n self.on_transport_input_message_received(event.input_name, event.message)\n else:\n logger.warning(\"input mesage event received with no handler. dropping.\")\n\n elif isinstance(event, pipeline_events_iothub.MethodRequest):\n if self.on_transport_method_request_received(event.method_request):\n self.on_transport_method_request_received(event.method_request)\n else:\n logger.warning(\"Method request event received with no handler. Dropping.\")\n\n else:\n logger.warning(\"Dropping unknown pipeline event {}\".format(event.name))\n\n def _handle_connected():\n if self.on_transport_connected:\n self.on_transport_connected(\"connected\")\n\n def _handle_disconnected():\n if self.on_transport_disconnected:\n self.on_transport_disconnected(\"disconnected\")\n\n self._pipeline.on_pipeline_event = _handle_pipeline_event\n self._pipeline.on_connected = _handle_connected\n self._pipeline.on_disconnected = _handle_disconnected\n\n def remove_this_code(call):\n if call.error:\n raise call.error\n\n self._pipeline.run_op(\n pipeline_ops_iothub.SetAuthProvider(\n auth_provider=auth_provider, callback=remove_this_code\n )\n )", "def __init__(self, store=None):\n self.sockets = []\n self.poller = zmq.core.poll.Poller()\n self.mh = MessageHandler()\n self.store = store", "def __init__(self, url, header=None, logger=None):\n\n httpDict = {}\n header = header or {}\n # url is end point\n httpDict['endpoint'] = \"%s/data\" % url\n httpDict['logger'] = logger if logger else logging.getLogger()\n\n # cherrypy converts request.body to params when content type is set\n # application/x-www-form-urlencodeds\n httpDict.setdefault(\"content_type\", 'application/json')\n httpDict.setdefault('cacheduration', 0)\n httpDict.setdefault(\"accept_type\", \"application/json\")\n httpDict.update(header)\n self.encoder = json.dumps\n Service.__init__(self, httpDict)\n # This is only for the unittest: never set it true unless it is unittest\n self._noStale = False", "def __init__(self, address, username=None, password=None, debug=False):\n self.container_id = \"eventhub.pysdk-\" + str(uuid.uuid4())[:8]\n self.address = urlparse(address)\n url_username = unquote_plus(self.address.username) if self.address.username else None\n username = username or url_username\n url_password = unquote_plus(self.address.password) if self.address.password else None\n password = password or url_password\n if not username or not password:\n raise ValueError(\"Missing username and/or password.\")\n auth_uri = \"sb://{}{}\".format(self.address.hostname, self.address.path)\n self.auth = self._create_auth(auth_uri, username, password)\n self.connection = None\n self.debug = debug\n\n self.clients = []\n self.stopped = False\n log.info(\"{}: Created the Event Hub client\".format(self.container_id))", "def __init__(self, c, selmgr, RouterClass=TorCtl.Router,\r\n strm_selector=StreamSelector):\r\n TorCtl.ConsensusTracker.__init__(self, c, RouterClass)\r\n self.last_exit = None\r\n self.new_nym = False\r\n self.resolve_port = 0\r\n self.num_circuits = 1\r\n self.circuits = {}\r\n self.streams = {}\r\n self.selmgr = selmgr\r\n self.selmgr.reconfigure(self.current_consensus())\r\n self.imm_jobs = Queue.Queue()\r\n self.low_prio_jobs = Queue.Queue()\r\n self.run_all_jobs = False\r\n self.do_reconfigure = False\r\n self.strm_selector = strm_selector\r\n plog(\"INFO\", \"Read \"+str(len(self.sorted_r))+\"/\"+str(len(self.ns_map))+\" routers\")", "def __init__(self, processor, server_address,\r\n inputProtocolFactory, outputProtocolFactory = None,\r\n server_class = BaseHTTPServer.HTTPServer):\r\n\r\n if outputProtocolFactory is None:\r\n outputProtocolFactory = inputProtocolFactory\r\n\r\n TServer.TServer.__init__(self, processor, None, None, None,\r\n inputProtocolFactory, outputProtocolFactory)\r\n\r\n thttpserver = self\r\n\r\n class RequestHander(BaseHTTPServer.BaseHTTPRequestHandler):\r\n def do_POST(self):\r\n # Don't care about the request path.\r\n itrans = TTransport.TFileObjectTransport(self.rfile)\r\n otrans = TTransport.TFileObjectTransport(self.wfile)\r\n itrans = TTransport.TBufferedTransport(itrans, int(self.headers['Content-Length']))\r\n otrans = TTransport.TMemoryBuffer()\r\n iprot = thttpserver.inputProtocolFactory.getProtocol(itrans)\r\n oprot = thttpserver.outputProtocolFactory.getProtocol(otrans)\r\n try:\r\n thttpserver.processor.process(iprot, oprot)\r\n except ResponseException, exn:\r\n exn.handler(self)\r\n else:\r\n self.send_response(200)\r\n self.send_header(\"content-type\", \"application/x-thrift\")\r\n self.end_headers()\r\n self.wfile.write(otrans.getvalue())\r\n\r\n self.httpd = server_class(server_address, RequestHander)", "def __init__(self, loop, websession, host, port, api_key, **kwargs):\n self.groups = {}\n self.lights = {}\n self.scenes = {}\n self.sensors = {}\n self.config = None\n self.loop = loop\n self.session = websession\n self.host = host\n self.api_url = 'http://{}:{}/api/{}'.format(host, port, api_key)\n if 'legacy_websocket' in kwargs:\n from .websocket import WSClient as ws_client\n else:\n from .websocket import AIOWSClient as ws_client\n self.ws_client = ws_client\n self.websocket = None\n self.async_add_device_callback = kwargs.get('async_add_device')\n self.async_connection_status_callback = kwargs.get('connection_status')", "def __init__(self):\n self.server_socket = None\n try:\n self.receive_video_socket = \\\n self.start_socket(IP, RECEIVE_VIDEO_PORT)\n print('started socket at ip {} port {}'\n .format(IP, RECEIVE_VIDEO_PORT))\n self.send_video_socket = \\\n self.start_socket(IP, SEND_VIDEO_PORT)\n print('started socket at ip {} port {}'\n .format(IP, SEND_VIDEO_PORT))\n self.receive_audio_socket = \\\n self.start_socket(IP, RECEIVE_AUDIO_PORT)\n print('started socket at ip {} port {}'\n .format(IP, RECEIVE_AUDIO_PORT))\n self.send_audio_socket = \\\n self.start_socket(IP, SEND_AUDIO_PORT)\n print('started socket at ip {} port {}'\n .format(IP, SEND_AUDIO_PORT))\n self.client_video_dict = {}\n self.client_audio_dict = {}\n\n except socket.error as e:\n print(\"socket creation fail: \", e)\n self.close_all()\n except Exception as e:\n print(\"server construct fail: \", e)\n self.close_all()", "def __init__(self):\n self._order_handlers = []\n self._target_handlers = {}\n\n self._robot = None\n self._lock = threading.Lock()", "def initialize(self) -> None:\n conn = self.optionally_wrap_socket(self.client.connection)\n conn.setblocking(False)\n self.client = TcpClientConnection(conn=conn, addr=self.addr)\n if b'ProtocolHandlerPlugin' in self.config.plugins:\n for klass in self.config.plugins[b'ProtocolHandlerPlugin']:\n instance = klass(self.config, self.client, self.request)\n self.plugins[instance.name()] = instance", "def init(self) -> None:\r\n\t\tself._server = socket.socket()\r\n\t\tself._server.bind(self.network_info)\r\n\t\tself._server.listen()\r\n\r\n\t\tpath, static_files = utils.get_static_files()\r\n\t\tif path is not None:\r\n\t\t\tfor filename in static_files:\r\n\t\t\t\tself.paths[\"GET\"].append(([filename[1:]], self._send_file(path + filename)))", "def __init__(self, requestMessage, clientAddr, caching, ttl):\r\n super(Thread, self).__init__()\r\n self.requestByte = requestMessage\r\n self.request = dns.message.Message.from_bytes(self.requestByte)\r\n self.clientAddr = clientAddr\r\n self.caching = caching\r\n self.ttl = ttl", "def setUp(self):\n self.client_socket = open_client_socket()", "def setUp(self):\n self.client_socket = open_client_socket()", "def main(self):\n addr = (self.uri, self.port)\n try:\n self.client.connect(addr)\n except socket.gaierror:\n print(\"[ERROR] not a valid URI. Try again please...\")\n else:\n print(\"[SETUP] client connected to IPv4 address\", self.uri, \"on port\", self.port)\n self.handler()", "def __init__(self, loop: asyncio.AbstractEventLoop):\n self._loop = loop\n self._asyncio_sleep_time = 0.0005\n\n #Setup for async\n self._init()", "def __init__(self, event_loop: asyncio.AbstractEventLoop, ssl_context: ssl.SSLContext=None,\n loglevel: int=logging.DEBUG, buffer_size: int=asyncio.streams._DEFAULT_LIMIT):\n self._event_loop = event_loop\n self._server = None\n if not ssl_context:\n # This looks very similar to the code for create_default_context\n # That's because it is the code\n # For some reason, create_default_context doesn't like me and won't work properly\n self._ssl = ssl.SSLContext(protocol=ssl.PROTOCOL_SSLv23)\n # SSLv2 considered harmful.\n self._ssl.options |= ssl.OP_NO_SSLv2\n\n # SSLv3 has problematic security and is only required for really old\n # clients such as IE6 on Windows XP\n self._ssl.options |= ssl.OP_NO_SSLv3\n self._ssl.load_default_certs(ssl.Purpose.SERVER_AUTH)\n self._ssl.options |= getattr(_ssl, \"OP_NO_COMPRESSION\", 0)\n self._ssl.set_ciphers(ssl._RESTRICTED_SERVER_CIPHERS)\n self._ssl.options |= getattr(_ssl, \"OP_CIPHER_SERVER_PREFERENCE\", 0)\n\n else:\n self._ssl = ssl_context\n\n self._bufsize = buffer_size\n self.default_butterfly = Butterfly\n self.default_net = Net\n\n self._executor = futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count() * 2 + 1)\n\n self.net = None\n self.log_level = loglevel\n self.logger = logging.getLogger(\"ButterflyNet\")\n self.logger.setLevel(loglevel)\n if self.logger.level <= logging.DEBUG:\n self._event_loop.set_debug(True)\n\n self.butterflies = {}", "def __listener__(self):\n frame_interval = 0.1\n str_list = []\n c = ''\n while True:\n with Timeout(frame_interval, False):\n while True:\n try:\n c = self.ser.read()\n except:\n self.ser.close()\n self.make_connection.go()\n self.connection_made.wait()\n str_list.append(c)\n if c == \"\\n\" or c == '':\n break\n received = ''.join(str_list)\n str_list = []\n if received:\n for i in self.read_handlers:\n gevent.spawn(i, received)\n sleep(0.001)", "def __init__(self, url, redirectChain=[], serverEncoding=None,\n HTTPignore=[]):\n self.url = url\n self.serverEncoding = serverEncoding\n\n fake_ua_config = config.fake_user_agent_default.get(\n 'weblinkchecker', False)\n if fake_ua_config and isinstance(fake_ua_config, str):\n user_agent = fake_ua_config\n elif fake_ua_config:\n user_agent = comms.http.fake_user_agent()\n else:\n user_agent = comms.http.user_agent()\n self.header = {\n 'user-agent': user_agent,\n 'Accept': 'text/xml,application/xml,application/xhtml+xml,'\n 'text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5',\n 'Accept-Language': 'de-de,de;q=0.8,en-us;q=0.5,en;q=0.3',\n 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',\n 'Keep-Alive': '30',\n 'Connection': 'keep-alive',\n }\n self.redirectChain = redirectChain + [url]\n self.changeUrl(url)\n self.HTTPignore = HTTPignore", "def setup_routes():\n root = CherryTarball()\n d = cherrypy.dispatch.RoutesDispatcher()\n d.connect('main', '/', controller=root)\n # This enumerates the tarball and connects each file within to a URL in the dispatcher\n tar = tarfile.open(tarball_path)\n for tarobj in tar.getmembers():\n if tarobj.isdir():\n pass # Skip directories\n else:\n d.connect(tarobj.name, tarobj.name, controller=root, action='static', filepath=tarobj.name)\n dispatcher = d\n return dispatcher", "def __init__(self, port, server_core):\n super().__init__((\"127.0.0.1\", port), ClientHandler._Handler)\n ClientHandler._Handler.server_core = server_core", "def __init__(self, *args, **kwargs):\n self.events = {}", "def run(self, event_loop):\n protocol_factory = lambda: self.PROTOCOL_CLS(self)\n coro = event_loop.create_server(protocol_factory, port=self.port)\n event_loop.run_until_complete(coro)", "def __init__(self, remote_service_coord, auto_retry=None):\n super(RemoteServiceClient, self).__init__(\n get_service_address(remote_service_coord))\n self.remote_service_coord = remote_service_coord\n\n self.pending_outgoing_requests = dict()\n self.pending_outgoing_requests_results = dict()\n\n self.auto_retry = auto_retry\n\n self._loop = None", "def __init__(self, port_num=0):\n address = ('0.0.0.0', port_num)\n HTTPServer.__init__(self, address, self.HANDLER_CLASS)\n\n # Create a dict to store configuration values set by the client\n self.config = dict()\n\n # Start the server in a separate thread\n server_thread = threading.Thread(target=self.serve_forever)\n server_thread.daemon = True\n server_thread.start()\n\n # Log the port we're using to help identify port conflict errors\n LOGGER.debug(f'Starting service on port {self.port}')", "def __init__(\n self, router_name=None, disable_auto_logout=None, enable_validation_code=None\n ):\n self.router_name = router_name\n self.disable_auto_logout = disable_auto_logout\n self.enable_validation_code = enable_validation_code", "def __init__(self, reactor=None):\n self.Setup()\n self.ServiceEnabled = settings.SERVICE_ENABLED\n self.peer_zero_count = 0 # track the number of times PeerCheckLoop saw a Peer count of zero. Reset e.g. after 3 times\n self.connection_queue = []\n self.reactor = twisted_reactor\n self.incoming_server_running = False\n self.forced_disconnect_by_us = 0\n self.peers_connecting = 0\n\n # for testability\n if reactor:\n self.reactor = reactor", "def __init__(self, host, port, in_queue=Queue()):\n\n # Settings\n self.application = Application()\n self.server = tornado.httpserver.HTTPServer(self.application)\n self.host = host\n self.port = port\n self.in_queue = in_queue\n\n # Listen to ..\n self.server.listen(self.port, self.host)\n\n # Logging settings\n logging.basicConfig(level=logging.DEBUG)\n self.logger = logging.getLogger(\"WebSocketServer\")\n self.logger.setLevel(logging.INFO)", "def evented(cls: Cls) -> Cls:\n handlers: List[Handler] = list(_handlers)\n _handlers.reset()\n\n og_init: Callable = cls.__init__\n\n @wraps(cls.__init__)\n def init(self, *args, **kwargs):\n og_init(self, *args, **kwargs)\n if not hasattr(self, \"event_emitter\"):\n self.event_emitter = EventEmitter()\n\n for h in handlers:\n self.event_emitter.on(h.event, _bind(self, h.method))\n\n cls.__init__ = init\n\n return cls", "def __init__(self, config):\n self.__clients = {} # dictionary to store all the active connections\n signal.signal(signal.SIGINT, self.shutdown) # execute shutdown method on Ctrl + C\n # create and setup TCP socket\n self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.config = config\n self.setup_socket()\n self.regex = re.compile(r'({})$'.format('.css|.js'))\n # initialize cache\n self.cache = LFUCache()\n logging.basicConfig(level=logging.DEBUG,\n format='[%(CurrentTime)-10s] (%(ThreadName)-10s) %(message)s')", "def __init__(self, reactor, masterIP, masterPort, commPort, extIP, extPort,\r\n loader, converter):\r\n Endpoint.__init__(self, reactor, loader, commPort)\r\n\r\n self._masterIP = masterIP\r\n self._masterPort = masterPort\r\n self._extAddress = '{0}:{1}'.format(extIP, extPort)\r\n self._loader = loader\r\n self._converter = converter\r\n\r\n self._connections = set()\r\n self._deathCandidates = {}", "def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True):\r\n SocketServer.BaseServer.__init__(self, server_address, RequestHandlerClass)\r\n self.socket = socket.socket(self.address_family,\r\n self.socket_type)\r\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\r\n\r\n if bind_and_activate:\r\n try:\r\n self.server_bind()\r\n self.server_activate()\r\n except:\r\n self.server_close()\r\n raise", "def __init__(self, loop: AbstractEventLoop, response: bytes,\n msg_type: ResponseMessageType) -> None:\n self._loop = loop\n self._unparsed_response = response\n self._msg_type = msg_type", "def __init__(self, handler):\n\n self.event_handler = handler", "def initialize(self):\n if self.real:\n self.agent.connect(self)\n else:\n self.connect() # Connect python client to VREP\n self.agent.connect(self)", "def __init__(self, request_timeout=3, max_concurrency=3, backlog=16, debug=False):\n self.loop = asyncio.get_event_loop()\n self.request_timeout = request_timeout\n self.max_concurrency = max_concurrency\n self.backlog = backlog\n self.debug = debug\n self.explicit_url_map = {}\n self.catch_all_handler = None\n self.parameterized_url_map = {}\n # Currently opened connections\n self.conns = {}\n # Statistics\n self.processed_connections = 0", "def __init__(self, host, port, map):\n asyncore.dispatcher.__init__(self, map=map)\n self.host = host\n self.port = port\n self.append_send_buffer = None\n self.append_connect = None\n self.clear()", "def get_handler(cls, loop: asyncio.AbstractEventLoop, ssl_context: ssl.SSLContext=None,\n log_level: int=logging.INFO, buffer_size: int=asyncio.streams._DEFAULT_LIMIT):\n if not cls.instance:\n cls.instance = cls(loop, ssl_context, log_level, buffer_size)\n return cls.instance", "def __init__(self, client_ident):\n\t\tthreading.Thread.__init__(self, None)\n\t\tself.client_ident\t\t= client_ident\n\t\tself.start()", "def __init__(self, type, addresses=(), handler=None):\n object.__init__(self)\n self._client = Client(type=type)\n self.__handled_message = None\n self.__working = True\n self.__has_sent_response = None\n self._lock = RLock()\n self._handler = handler\n\n # connect\n connect_futures = map(self._client.connect, addresses)\n for future in connect_futures:\n try:\n future.wait(5)\n except FutureException:\n print_exc()", "def __init__(self):\n self.model = Model()\n self.view = View()\n\n self.server = Server(msg_handler=self.msg_handler,\n err_handler=self.err_handler,\n conn_handler=self.conn_handler,\n quit_handler=self.quit_handler)\n self.server.start()\n\n self.view.frame.onclose(self.server.close)", "def init(a: str, h: str, c: str, r: bool, A: str, lock: Lock) -> None:\n global host, action, report, router, algorithm\n\n action = a\n algorithm = A\n\n if r:\n report = r\n if h:\n host = h\n if c:\n lock.acquire()\n try:\n router = PyOSRM(c, use_shared_memory=False, algorithm=algorithm)\n LOGGER.debug(\"Router instantiated\")\n finally:\n lock.release()", "def __init__(self, host):\n self._io = RemoteIO(host)\n self._host = host\n\n self._left_wheel = Wheel(id='b', side='left', remote_io=self._io)\n self._right_wheel = Wheel(id='a', side='right', remote_io=self._io, inverse=True)\n\n self._cam = Camera(host)\n\n self._left_led = LED(side='left', remote_io=self._io)\n self._front_led = LED(side='center', remote_io=self._io)\n self._right_led = LED(side='right', remote_io=self._io)", "def __init__(self, host, event_hub_path, credential, **kwargs):\n # type:(str, str, Union[EventHubSharedKeyCredential, EventHubSASTokenCredential, TokenCredential], ...) -> None\n self.container_id = \"eventhub.pysdk-\" + str(uuid.uuid4())[:8]\n self.address = _Address()\n self.address.hostname = host\n self.address.path = \"/\" + event_hub_path if event_hub_path else \"\"\n self._auth_config = {}\n self.credential = credential\n if isinstance(credential, EventHubSharedKeyCredential):\n self.username = credential.policy\n self.password = credential.key\n self._auth_config['username'] = self.username\n self._auth_config['password'] = self.password\n\n self.host = host\n self.eh_name = event_hub_path\n self.keep_alive = kwargs.get(\"keep_alive\", 30)\n self.auto_reconnect = kwargs.get(\"auto_reconnect\", True)\n self.mgmt_target = \"amqps://{}/{}\".format(self.host, self.eh_name)\n self.auth_uri = \"sb://{}{}\".format(self.address.hostname, self.address.path)\n self.get_auth = functools.partial(self._create_auth)\n self.config = _Configuration(**kwargs)\n self.debug = self.config.network_tracing\n\n log.info(\"%r: Created the Event Hub client\", self.container_id)", "def __init__(self, root, restricted_folders,\n restricted_page=RESTRICTED_HTML_PAGE, address=constants.ADDR):\n self._root = root\n self._restricted_folders = restricted_folders\n self._restricted_html = restricted_page\n self._client = socket.socket()\n try:\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Some times the socket won't bind correctly and accept will raise\n # exception, therefore we set SO_REUSEADDR before binding\n self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n self._socket.bind(address)\n if DEBUG_LEVEL >= 0:\n print \"Listening on: {}\".format(address)\n self._socket.listen(5)\n except socket.error as err:\n if DEBUG_LEVEL > 0:\n print \"Got error: {}\".format(err.message)", "def __init__(self, hosts: List[IPv4Address], loop: asyncio.AbstractEventLoop):\n super().__init__()\n self.hosts = hosts\n self.loop = loop", "def __init__(self, c, selmgr, num_circuits, RouterClass):\r\n PathBuilder.__init__(self, c, selmgr, RouterClass)\r\n # Set handler to the connection here to \r\n # not miss any circuit events on startup\r\n c.set_event_handler(self)\r\n self.num_circuits = num_circuits # Size of the circuit pool\r\n self.check_circuit_pool() # Bring up the pool of circs\r" ]
[ "0.61509657", "0.5864122", "0.57680523", "0.5765937", "0.5636957", "0.542871", "0.53911084", "0.5345458", "0.52738434", "0.52246845", "0.517456", "0.51684", "0.51025426", "0.5091683", "0.50771654", "0.5053803", "0.50480044", "0.50263023", "0.501474", "0.49918574", "0.49689016", "0.49622035", "0.49477503", "0.4925583", "0.49240094", "0.49104512", "0.4904054", "0.48972535", "0.48854524", "0.48783186", "0.4866233", "0.48647276", "0.48607337", "0.48578396", "0.48532528", "0.48467195", "0.48418668", "0.48300645", "0.48244256", "0.48200852", "0.48195893", "0.47997376", "0.47963434", "0.47932446", "0.4784088", "0.47831333", "0.47822598", "0.47752863", "0.47682038", "0.4766281", "0.47662058", "0.47652534", "0.47394824", "0.47378844", "0.47250333", "0.47218668", "0.4718774", "0.4713455", "0.4702037", "0.46895033", "0.46866104", "0.46652916", "0.46628177", "0.46626395", "0.46577114", "0.46553266", "0.46553266", "0.46529528", "0.46447203", "0.464322", "0.46253958", "0.46210626", "0.4620824", "0.46204525", "0.46202716", "0.4609198", "0.46089908", "0.46045208", "0.46030018", "0.45968866", "0.45966303", "0.45951298", "0.45903042", "0.4585684", "0.45805573", "0.45771113", "0.4575959", "0.4568138", "0.4561663", "0.45535934", "0.455252", "0.45503107", "0.45501778", "0.4540561", "0.45399666", "0.45395324", "0.45387465", "0.4538514", "0.45353606", "0.4532745" ]
0.74102676
0
This function receives messages from the associated router and queues them for transmission on the HTTP interface.
def receive(self, fromrouter, envelope): event = envelope.unWrap(self.getUri()) if event: Trace("%s receive %s from %s"%(self.getUri(),event,fromrouter), "EventLib.EventRelayHTTPC") return self.queueItem(["forward",envelope]) return makeDeferred(StatusVal.OK)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recv_messages(self):\n while True:\n b = unwrap_read(self.sock.recv(4096))\n msgs = self.parser.feed(b)\n if msgs:\n for msg in msgs:\n self.router.incoming(msg)\n return", "def __process_requests(self):\n\t\tfor received_message in self.receiver:\n\t\t\tif self.registry.ip_known(received_message.sender):\n\t\t\t\tlogger.info(\"Message received from registered client.\")\n\t\t\t\tif received_message.body.startswith(COMMAND_FLAG_CHAR):\n\t\t\t\t\tlogger.debug(\"Message was a command.\")\n\t\t\t\t\tself.parse(received_message.body)\n\t\t\t\telse:\n\t\t\t\t\tlogger.debug(\"Message was generic.\")\n\t\t\t\t\tself.send_to_all(received_message)\n\t\t\telse:\n\t\t\t\tlogger.info(\"Message received from an unregistered client.\")\n\t\t\t\tself.attempt_to_register(received_message)", "def listen_for_messages(self, callback):\n # generate get requests for all input queues\n requests = [port.in_queue.get() for port in self.ports]\n while requests:\n # helper variable for the asserts\n queues_with_pending_requests = [req.resource for req in requests]\n # There is a request for each input queue.\n assert set(self.input_queues) == set(queues_with_pending_requests)\n # For each input queue there's exactly one request.\n assert (\n len(queues_with_pending_requests) ==\n len(set(queues_with_pending_requests)))\n\n log.debug(\"{} waiting for next reception\".format(self))\n completed_requests = (yield self.env.any_of(requests))\n received_messages = list(completed_requests.values())\n log.debug(\"{} received {}\".format(\n self, received_messages))\n\n callback(received_messages)\n\n # Only leave the requests which have not been completed yet\n remaining_requests = [\n req for req in requests if req not in completed_requests]\n # Input queues that have been emptied since the last wake up.\n emptied_queues = [req.resource for req in completed_requests]\n # Add new get requests for the input queues that have been emptied.\n new_requests = []\n for input_queue in emptied_queues:\n new_requests.append(input_queue.get())\n requests = remaining_requests + new_requests", "def _dispatch_messages(self):\n while True:\n select_obj = (yield)\n if select_obj == self._message_queue.selobj:\n msg = self._message_queue.get_nowait()\n if msg is not None:\n msg_type = msg.get('type', None)\n if msg_type is not None:\n msg_handler = self._message_handlers.get(msg_type, None)\n if msg_handler is not None:\n msg_handler(msg['data'])", "def handle(self):\r\n # self.request is the TCP socket connected to the client\r\n # read the incoming command\r\n request = self.request.recv(1024).strip()\r\n # write to the queue waiting to be processed by the server\r\n INPUT_QUEUE.put(request)\r\n # wait for the server answer in the output queue\r\n response = OUTPUT_QUEUE.get(timeout=5.0)\r\n # send back the answer\r\n self.request.send(response)", "def processIncoming(self):\n while self.queue.qsize():\n try:\n # print 'queue'\n msg = self.queue.get(0)\n # Check contents of message and do what it says\n # As a test, we simply print it\n if msg == \"exit\":\n self.deviceError()\n if msg == \"error\":\n self.deviceError()\n else:\n self.decode(msg)\n except Queue.Empty:\n pass", "def _start_receive_from_queue(self):\n while True:\n received_message = recv_msg(self.TCPSock)\n # received_message = self.TCPSock.recv(self.buf)\n if self.verbose: print \"Server sends: \" + received_message\n self.receive_message_queue.put(received_message)", "def process(self):\n\n self.wsRms.connect()\n\n self.scheduler = Scheduler(self)\n self.scheduler.setDaemon(True)\n self.scheduler.start()\n\n while not self.stop:\n json = self.wsEngine.receive()\n if json == None:\n time.sleep(1)\n continue\n print \"------->Receive from lib: %s\" %json\n message = Message().restore(json)\n\n if message.getCmd() == Message.CMD_REGISTER:\n self.waitingQueue.append(message)\n\n elif message.getCmd() == Message.CMD_RELEASE:\n self.wsRms.release(message.getRes())\n self.runningQueue.remove(message)\n\n self.scheduler.stop()", "def handle(self):\n try:\n # Wait for data\n data = json.loads(self.request.recv(1024).decode('UTF-8').strip())\n\n # Process data\n self.process_data(data)\n\n except Exception as e:\n print(\"Exception wile receiving message: \", e)\n self.request.sendall(\n bytes(json.dumps({'return': 'error'}), 'UTF-8'))", "def receive(self, message):", "def main_WPHandler_thread(inputQueue, queues, runtimeVars):\n while True:\n if not inputQueue.empty():\n print \"WPListener received a message\"\n message = inputQueue.get()\n handle(message, queues[\"socketServer\"], runtimeVars)", "def _msg_server(self, message):\r\n \r\n # Add message to the outgoing queue.\r\n self._outgoinglock.acquire()\r\n self._outgoing.append(message)\r\n self._outgoinglock.release()", "def _client(self):\n while True:\n body = self.queue.get(True)\n print \"Sending %s bytes (%s/%s)\" % (len(body), self.queue.qsize(), self.queue.maxsize)\n\n try:\n req = urllib2.Request(self.endpoint, body)\n urllib2.urlopen(req).read()\n except:\n print \"Cannot send request. Retrying in 5 seconds\"\n print_exception(*sys.exc_info())\n print \"continuing...\"\n self.enqueue(body)\n sleep(5)", "def run(self):\n self.channel.queue_declare(self._request_queue)\n self.channel.basic_consume(self._request_queue, self.on_message)\n try:\n msg = \"Waiting for message ...\"\n print(msg)\n logging.info(msg)\n self.channel.start_consuming()\n except KeyboardInterrupt:\n self.channel.stop_consuming()\n\n self.connection.close()", "def receive_message(self, message):", "def process_queued_msg(self):\n try:\n while not self.queue.empty():\n port, tbl = self.queue.get()\n reveived_port = self.switches[port.neighbor_switch_dpid].ports[port.neighbor_port_no]\n self.tbl.update_by_neighbor(reveived_port, port, tbl)\n self.deploy_routing_table()\n except:\n pass", "def send_msg(self, my_queue, my_msg):", "def process_queue(self):\n while not self.msg_queue.empty():\n addr, msg = self.msg_queue.get()\n if msg:\n print(msg)\n self.broadcast(addr, msg)\n else:\n self.clean(addr)", "def handleIncoming(self):\r\n\t\trawQueue = list()\r\n\r\n\t\twhile True:\r\n\t\t\tif not self.activeConnection:\r\n\t\t\t\ttime.sleep(.1)\r\n\t\t\t\tcontinue\r\n\t\t\ttry:\r\n\t\t\t\trawQueue.append(self.serialPort.read(1).decode('ascii'))\r\n\t\t\texcept serial.serialutil.SerialException as e:\r\n\t\t\t\tcontinue\r\n\t\t\t# print(rawQueue[-1], int.from_bytes(rawQueue[-1], byteorder='big'))\r\n\t\t\t# if len(rawQueue) >= 1000:\r\n\t\t\t# \trawQueue.pop(0)\r\n\t\t\t# print(rawQueue)\r\n\t\t\tif rawQueue[0] != '$': # we pop items until the first one is a $ sign\r\n\t\t\t\t# print('popping the first character')\r\n\t\t\t\trawQueue.pop(0)\r\n\t\t\tif '\\n' in rawQueue: # we assume with the \\n we have a valid message\r\n\t\t\t\t# print('valid message')\r\n\t\t\t\trawQueue.pop(0) # remove the $\r\n\t\t\t\trawPayload = rawQueue[0:rawQueue.index(\"*\")]\r\n\t\t\t\tstringPayload = \"\".join(rawPayload)\r\n\t\t\t\tvalueList = stringPayload.split(\",\")\r\n\t\t\t\t# print(valueList)\r\n\t\t\t\tfor i in range(1, len(valueList)):\r\n\t\t\t\t\tvalueList[i] = int(valueList[i])\r\n\t\t\t\tvalueList[0] = messageTypes[valueList[0]]\r\n\r\n\t\t\t\tself.eventQueue.put(valueList)\r\n\t\t\t\trawQueue.clear()\r\n\t\t\t\t# print(valueList)\r\n\t\t\t\t# we are going to ignore checksums for now\r", "def process_messages(self):\n pass", "def _multicastRouteIncomingMessages(self):\n with self.lock:\n with self.multicast.lock:\n while self.multicast.inbox.qsize() > 0:\n packet = self.multicast.inbox.get()\n for destinationUUID in packet.keys():\n if self.inbox.has_key(destinationUUID):\n # Copy the message from the packet to the\n # inbox with the correct destination.\n message = packet[destinationUUID]\n self.inbox[destinationUUID].put(message)", "def recv(self, *messages):\n for message in messages:\n self.input.put(message)", "def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n\n state.addConnection(self.connection)\n\n # Loop that listens for messages from the client\n while True:\n received_string = self.connection.recv(4096)\n\n if len(received_string) == 0:\n continue\n\n # TODO: Add handling of received payload from client\n\n # Convert payload from JSON to object\n payloadToData = json.loads(received_string)\n\n # determine what request is being made\n request_handler = RequestHandler(payloadToData,\n state,\n self.connection)\n\n # execute and generate response (JSON formatted)\n jsonResponse = request_handler.callHandler()\n\n if not jsonResponse == 'BROADCAST':\n # send response\n self.connection.send(bytes(jsonResponse, \"ascii\"))", "def run(self):\n while self._msg_queue:\n actor, msg = self._msg_queue.popleft()\n try:\n actor.send(msg)\n except StopIteration:\n pass", "def work(self):\n while True:\n message = self.get()\n self.handle(message)", "def run(self):\n\n def callback(ch, method, properties, body):\n json_body = json.loads(body)\n self.buffer.append(Fvalue.fromdict(json_body))\n\n sleep(5) # We introduce a slight delay to let the RabbitMQ container to accept connections\n connection = pika.BlockingConnection(pika.ConnectionParameters(host=self.mq_host,port=self.mq_port))\n channel = connection.channel()\n channel.exchange_declare(exchange=self.mq_host + '_exchange', exchange_type='direct')\n result = channel.queue_declare(exclusive=True)\n queue_name = result.method.queue\n channel.queue_bind(exchange=self.mq_host + '_exchange',\n queue=queue_name,\n routing_key=self.routing_key)\n channel.basic_consume(callback,queue=queue_name,no_ack=True)\n channel.start_consuming()", "def serve(self):\r\n self.channel.wait()\r\n handler, seq, obj = self._recv()\r\n if handler == \"result\":\r\n self.dispatch_result(seq, obj)\r\n elif handler == \"exception\":\r\n self.dispatch_exception(seq, obj)\r\n else:\r\n self.dispatch_request(handler, seq, obj)", "def get_messages_from_queue(fx):\n\n for msg in queue.receive_messages():\n fx(msg)", "def run(self):\n while True:\n socks = select.select(self.sockets.values(), [], [], 0.1)[0]\n for conn in socks:\n try:\n k = conn.recv(65535)\n except:\n # either died on a connection reset, or was SIGTERM's by parent\n return\n if k:\n for sock in self.sockets:\n if self.sockets[sock] == conn:\n srcif = sock\n msg = json.loads(k)\n\n # DEBUG STATEMENTS\n if False:\n print(\"<--------------------------------->\")\n print(\"[NEW MESSAGE INCOMING]\")\n print(\"SOURCE:\", srcif)\n print(\"MSG:\", msg)\n print(\"<--------------------------------->\")\n\n if not self.handle_packet(srcif, msg):\n self.send_error(conn, msg)\n else:\n print (\"ROUTES:\", self.routes)\n return", "def _r_handle_message_contents(self, msg, protocol):\n if isinstance(msg, ResponseMessage):\n d = self._waiting_messages.pop(msg.response_to, None)\n if d is not None:\n d.callback(msg)\n elif isinstance(msg, ServerMotdMessage):\n print(\"Connected: %s\" % msg.motd)\n self._r_successful_connection()\n elif isinstance(msg, EventMessage):\n callback = self._event_callbacks.get((msg.service_name, msg.event_name))\n if callback is not None:\n threads.deferToThread(callback, *msg.pargs, **msg.kwargs)", "def _listen_to_queues(cls):\n queues = cls.get_service_queues()\n for queue in queues:\n queue.consume(cls.process_messages)", "def _process_messages(self):\r\n \r\n self._print(\"%s: Starting _process messages, looking out for special messages:\" \\\r\n % (self._clientnr))\r\n \r\n # Set some expected messages.\r\n expected = {}\r\n expected['clientconfirm'] = cb.CLIENTCONFIRM[:cb.CLIENTCONFIRM.find('_')]\r\n expected['waitwhat'] = cb.WAITWHATCLIENT[:cb.WAITWHATCLIENT.find('_')]\r\n \r\n for key in expected.keys():\r\n self._print(\"%s: Special message '%s': '%s'\" % \\\r\n (self._clientnr, key, expected[key]))\r\n \r\n # Run idefinitively\r\n while True:\r\n \r\n # Get new incoming commands.\r\n cmds = self.udp.getCommands()\r\n self._print(\"%s: Found %d new UDP commands.\" % \\\r\n (self._clientnr, len(cmds)))\r\n # Add new commands to the queue.\r\n for c in cmds:\r\n # Parse the message.\r\n target, message, clienttime = c.text.split('|')\r\n self._print(\"%s: Found message (%s to %s, t=%s) '%s'\" % \\\r\n (self._clientnr, c.ip, target, clienttime, message))\r\n # Only process messages from the server.\r\n if c.ip == self._servernr:\r\n # Check if this is a client confirmation message.\r\n if expected['clientconfirm'] in message:\r\n self._print(\"%s: Adding message '%s' (t=%s) to the incoming queue\" \\\r\n % (self._clientnr, message, clienttime))\r\n self._incominglock.acquire()\r\n self._incoming.append(message)\r\n self._incominglock.release()\r\n # Only process the messages that were directed at this client.\r\n elif target in ['None', str(self._clientnr)]:\r\n # Check if this is a confused message to find out what\r\n # the client is waiting for.\r\n if expected['waitwhat'] in message:\r\n self._print(\"%s: Received '%s' from server\" % \\\r\n (self._clientnr, message))\r\n # Parse the waitwhat message, which looks like this:\r\n # 'waitwhatclient_expected=%s'\r\n msg, xpctd = message.split('_')\r\n xpctd = xpctd[xpctd.find('=')+1:]\r\n # Re-send the last version of the expected message.\r\n if xpctd in self._lastmessage.keys():\r\n self._outgoing.append(self._lastmessage[xpctd])\r\n self._print(\"%s: Resending the last version of expected message '%s': '%s'\" % \\\r\n (self._clientnr, xpctd, self._lastmessage[xpctd]))\r\n else:\r\n self._print(\"%s: Do not have a last version of expected message '%s'\" % \\\r\n (self._clientnr, xpctd))\r\n else:\r\n # Add the message to the queue.\r\n self._print(\"%s: Adding message '%s' (t=%s) to the incoming queue\" \\\r\n % (self._clientnr, message, clienttime))\r\n self._incominglock.acquire()\r\n self._incoming.append(message)\r\n self._incominglock.release()\r\n # Chuck a message out if the queue is getting too long.\r\n if len(self._incoming) > self._maxincominglen:\r\n self._incominglock.acquire()\r\n delmsg = self._incoming.pop(0)\r\n self._incominglock.release()\r\n self._print(\"%s: Removed message '%s' from the incoming queue\" \\\r\n % (self._clientnr, delmsg))\r\n else:\r\n self._print(\"%s: Ignoring message '%s', as it wasn't for me (%s)\" \\\r\n % (self._clientnr, message, self._clientnr))\r\n else:\r\n self._print(\"%s: Ignoring message '%s', as it wasn't from the server (%s)\" \\\r\n % (self._clientnr, message, self._servernr))\r\n \r\n # Process outgoing commands.\r\n while len(self._outgoing) > 0:\r\n # Send a message to the server.\r\n self._outgoinglock.acquire()\r\n message = self._outgoing.pop(0)\r\n self._outgoinglock.release()\r\n self._print(\"%s: Sending '%s' to %s\" % \\\r\n (self._clientnr, message, self._servernr))\r\n msg = 'cmd,%s|%s' % (self._servernr, message)\r\n self.udp.sendWithTimeStamp(msg, '|')\r\n for i in range(self._message_reps):\r\n self.udp.sendWithTimeStamp(msg, '|')\r\n # Store the message in the 'last sent' dict.\r\n if '_' in message:\r\n m = message[:message.find('_')]\r\n else:\r\n m = message\r\n self._lastmessage[m] = message", "async def route_message(self, msg):\n raise NotImplementedError", "def controls():\n\n context = zmq.Context()\n\n print(\"Transmitting commands to process.\")\n socket = context.socket(zmq.REQ)\n rc = socket.connect(\"ipc:///tmp/mail_queue_ipc\")\n #print(rc)\n\n\n for request in range(2):\n print(\"Sending request %s\" % request)\n socket.send(b\"insert\")\n\n message = socket.recv()\n print(\"Recieved reply %s [ %s ]\" % (request, message))\n time.sleep(1)", "def processIncoming(self):\n while (self.queue.qsize()):\n try:\n message = self.queue.get_nowait()\n \n self.terminal.insert(END,message)\n\n # Autoscroll the terminal if set\n if (self.autoscroll_value.get()):\n self.terminal.yview(END)\n\n except Queue.Empty:\n pass", "def dispatch_messages(sock, queue, channel):\n while run:\n try:\n message = queue.get()\n except Queue.Empty:\n pass\n else:\n if message.recipient is None:\n message.recipient = channel\n sock.send(\"{0}\\r\\n\".format(message.msg()))\n logging.debug(\"{0}\".format(message.msg()))\n queue.task_done()", "def router(paramstring):\r\n channels()", "def handle_recv(self,stream,msgs):\n pass", "def run(self):\n alogger.info(\"Recieved message from %s, Message: (%d) %s\" % (self.client.getaddress(), self.action_type, self.message))\n \n #Try to call th function associated with this message type.\n #format = \"handle_<type>\" (eg: handle_100)\n fn = globals().get(\"handle_\" + str(self.action_type))\n if fn and callable(fn):\n fn(self.message, self.address, self.client)\n else:\n alogger.info(\"Received unknown message from %d, type: %d\" % (self.client.getaddress(), self.action_type))", "def process(self, message=None):\n\n while self.running:\n message = self.channel.basic.get(self.queue)\n if message:\n content = message.body\n\n # log message\n if self.debug:\n self.log(\"Recieved: \" + str(content))\n\n # send to child nodes\n self.scatter(Message(**self.parse(content)))\n else:\n # yield to other greenlet\n # self.tick()\n self.sleep(1)", "def consume_messages(self):\n\n method_frame, properties, body = self.channel.basic_get(self.queue_name, no_ack=False)\n\n while method_frame:\n\n LOGGER.info(\"Message received\")\n\n self.channel.basic_ack(method_frame.delivery_tag)\n payload = json.loads(body)\n if not isinstance(payload, dict):\n return\n\n # Process the message\n if 'control' in payload:\n LOGGER.info(\"A control signal received!\")\n # self.set_control(payload['control'])\n print(payload['control'])\n\n # Continue getting messages\n method_frame, properties, body = self.channel.basic_get(self.queue_name, no_ack=False)\n\n # TODO\n # return control_signal", "def query_thread_func(self):\n while True:\n # Receive and parse the query message.\n message = self.router_socket.recv_multipart()\n assert (len(message) == 3)\n assert not message[1]\n query = json.loads(message[2].decode('utf-8'))\n peer = message[0]\n assert (\"type\" in query)\n log.info(\n \"ACL Manager received packet %s from %s\",\n query, binascii.hexlify(peer)\n )\n\n if query[\"type\"] == \"GETACLSTATE\":\n endpoint = query[\"endpoint_id\"]\n log.info(\"Received query message %s from Felix\" % message)\n self.acl_store.query_endpoint_rules(endpoint)\n query[\"rc\"] = \"SUCCESS\"\n query[\"message\"] = \"\"\n else:\n # Received unexpected message. Log and return it.\n log.warning(\"Received query %s of unknown type\" % query)\n query[\"rc\"] = \"FAILURE\"\n query[\"message\"] = \"Unknown message type: expected GETACLSTATE\"\n\n log.debug(\"Sending response message: %s, %s\" %\n (peer, json.dumps(query).encode(\"utf-8\")))\n self.router_socket.send_multipart(\n (peer,\n \"\",\n json.dumps(query).encode(\"utf-8\"))\n )", "def handle(self):\n data = self.request.recv(1024)\n self.request.send(data)", "def handle(self):\n try:\n while True:\n\n # Pop the message from the queue\n\n msg = self.queue.get_nowait()\n\n # Log anything if necesary\n\n self.log_message(msg)\n\n # Identify the src peer\n\n if 'src_id' in msg:\n\n if msg['src_id'] == -1:\n\n this_peer = None # Server message\n\n else:\n\n this_peer = self.peers[msg['src_id']]\n\n # If we are not up-to-date with server, only accept MSG_CONNECT and MSG_SET_ALL\n\n if isinstance(msg, MSG_CONNECT):\n\n if self.marker.id != msg['src_id']:\n\n print(\"Peer '{}' has joined the session\".format(msg['name']))\n\n elif type(msg) == MSG_SET_ALL:\n\n # Set the contents of the text box\n\n self.handle_setall(msg['data'])\n\n # Move the peers to their position\n\n for _, peer in self.peers.items():\n \n peer.move(peer.row, peer.col)\n\n # self.mark_set(peer.mark, peer.index())\n\n # Format the lines\n\n self.format_text()\n\n # Move the local peer to the start\n\n self.marker.move(1,0)\n\n # Flag that we've been update\n\n self.is_up_to_date = True\n\n elif self.is_up_to_date:\n\n # If the server responds with a console message\n\n if isinstance(msg, MSG_RESPONSE):\n\n if hasattr(self.root, \"console\"):\n\n self.root.console.write(msg['string']) \n\n # Stop running when server is manually killed \n\n elif isinstance(msg, MSG_KILL):\n\n if hasattr(self.root, \"console\"):\n\n self.root.console.write(msg['string']) \n\n self.root.push.kill()\n self.root.pull.kill()\n\n # Handles selection changes\n\n elif isinstance(msg, MSG_SELECT):\n\n sel1 = str(msg['start'])\n sel2 = str(msg['end'])\n \n this_peer.select(sel1, sel2)\n\n # Handles keypresses\n\n elif isinstance(msg, MSG_DELETE):\n\n self.handle_delete(this_peer, msg['row'], msg['col'])\n\n self.root.colour_line(msg['row'])\n\n elif type(msg) == MSG_BACKSPACE:\n\n self.handle_backspace(this_peer, msg['row'], msg['col'])\n\n self.root.colour_line(msg['row'])\n\n elif isinstance(msg, MSG_EVALUATE_BLOCK):\n\n lines = (int(msg['start_line']), int(msg['end_line']))\n\n this_peer.highlightBlock(lines)\n\n # Experimental -- evaluate code based on highlight\n\n string = self.get(\"{}.0\".format(lines[0]), \"{}.end\".format(lines[1]))\n \n self.root.lang.evaluate(string, name=str(this_peer), colour=this_peer.bg)\n\n elif isinstance(msg, MSG_EVALUATE_STRING):\n\n # Handles single lines of code evaluation, e.g. \"Clock.stop()\", that\n # might be evaluated but not within the text\n\n self.root.lang.evaluate(msg['string'], name=str(this_peer), colour=this_peer.bg)\n\n elif isinstance(msg, MSG_SET_MARK):\n\n row = msg['row']\n col = msg['col']\n\n this_peer.move(row, col)\n\n # If this is a local peer, make sure we can see the marker\n\n if this_peer == self.marker:\n\n self.mark_set(INSERT, \"{}.{}\".format(row, col))\n\n self.see(self.marker.mark)\n\n elif isinstance(msg, MSG_INSERT):\n\n self.handle_insert(this_peer, msg['char'], msg['row'], msg['col'])\n\n # Update IDE keywords\n\n self.root.colour_line(msg['row'])\n\n # If the msg is from the local peer, make sure they see their text AND marker\n\n if this_peer == self.marker:\n\n self.see(self.marker.mark)\n\n self.edit_separator()\n\n elif isinstance(msg, MSG_GET_ALL):\n\n # Return the contents of the text box\n\n data = self.handle_getall()\n\n reply = MSG_SET_ALL(-1, data, msg['src_id'])\n\n self.root.push_queue.put( reply ) \n\n elif isinstance(msg, MSG_REMOVE):\n\n # Remove a Peer\n this_peer.remove()\n \n del self.peers[msg['src_id']]\n \n print(\"Peer '{}' has disconnected\".format(this_peer)) \n\n elif isinstance(msg, MSG_BRACKET):\n\n # Highlight brackets on local client only\n\n if this_peer.id == self.marker.id:\n\n row1, col1 = msg['row1'], msg['col1']\n row2, col2 = msg['row2'], msg['col2']\n\n peer_col = int(self.index(this_peer.mark).split(\".\")[1])\n\n # If the *actual* mark is a ahead, adjust\n\n col2 = col2 + (peer_col - col2) - 1\n\n self.tag_add(\"tag_open_brackets\", \"{}.{}\".format(row1, col1), \"{}.{}\".format(row1, col1 + 1))\n self.tag_add(\"tag_open_brackets\", \"{}.{}\".format(row2, col2), \"{}.{}\".format(row2, col2 + 1))\n\n elif type(msg) == MSG_CONSTRAINT:\n\n new_name = msg['name']\n\n print(\"Changing to constraint to '{}'\".format(new_name))\n\n for name in self.root.creative_constraints:\n\n if name == new_name:\n\n self.root.creative_constraints[name].set(True)\n self.root.__constraint__ = constraints[name](msg['src_id'])\n\n else:\n\n self.root.creative_constraints[name].set(False)\n\n elif type(msg) == MSG_SYNC:\n\n # Set the contents of the text box\n\n self.handle_setall(msg['data'])\n\n # Move the peers to their position\n\n for _, peer in self.peers.items():\n \n peer.move(peer.row, peer.col)\n\n # Format the lines\n\n self.format_text()\n\n elif type(msg) == MSG_UNDO:\n\n self.handle_undo()\n\n # Give some useful information about what the message looked like if error\n\n else:\n\n print(\"Error in text box handling. Message was {}\".format(msg.info()))\n\n raise e\n\n # Update any other idle tasks\n\n self.update_idletasks()\n\n # This is possible out of date - TODO check\n\n if msg == self.root.wait_msg:\n\n self.root.waiting = False\n self.root.wait_msg = None\n self.root.reset_title()\n\n self.refreshPeerLabels()\n\n # Break when the queue is empty\n except queue.Empty:\n \n self.refreshPeerLabels()\n\n # Recursive call\n self.after(30, self.handle)\n return", "def run(self):\n while True:\n try:\n processor, iprot, oprot, otrans, callback = self.queue.get()\n if processor is None:\n break\n callback.getContext().setProtocols(iprot, oprot)\n processor.process(iprot, oprot, callback.getContext())\n callback.success(reply=otrans.getvalue())\n except Exception:\n logging.exception(\"Exception while processing request\")\n callback.failure()", "async def _process_messages(self) -> None:\n try:\n while not self._client.closed:\n msg = await self._client.receive()\n\n if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSED, WSMsgType.CLOSING):\n break\n\n if msg.type == WSMsgType.ERROR:\n raise ConnectionFailed()\n\n if msg.type != WSMsgType.TEXT:\n raise InvalidMessage(f\"Received non-Text message: {msg.type}\")\n\n try:\n data = msg.json(loads=ujson.loads)\n except ValueError as err:\n raise InvalidMessage(\"Received invalid JSON.\") from err\n\n if LOGGER.isEnabledFor(logging.DEBUG):\n LOGGER.debug(\"Received message:\\n%s\\n\", pprint.pformat(msg))\n\n self._handle_incoming_message(data)\n\n finally:\n # TODO: handle reconnect!\n LOGGER.debug(\"Listen completed. Cleaning up\")\n\n for future in self._result_futures.values():\n future.cancel()\n\n if not self._client.closed:\n await self._client.close()\n\n if self._shutdown_complete_event:\n self._shutdown_complete_event.set()\n else:\n LOGGER.debug(\"Connection lost, will reconnect in 10 seconds...\")\n self._loop.create_task(self._auto_reconnect())", "def receive(self):\n while True:\n if self.pending_request:\n request = self.unpack(self.pending_request)\n self.pending_request = None\n else: \n request = self.unpack(self.mh.receive_message())\n if request:\n yield request\n else: break", "def process_received_message(self, message):\n self.log.debug('Received \"%s\"', message)\n self.receive_queue.put(message)", "def receive(self, msg):\n pass", "def _recv(self) -> None:\n if not self.connected or now() < self.next_poll:\n return\n self.next_poll += self.poll_interval\n data = []\n while True:\n try:\n data.append(self.endpoint.recv(BUFFSIZE))\n except BlockingIOError:\n break\n if data:\n stream = io.BytesIO(b\"\".join(data))\n while True:\n try:\n info = pickle.load(stream)\n msg = Message(*info)\n self.inq.append(msg)\n except EOFError:\n break", "def run(self):\n\n self.make_connection()\n self.channel()\n self.declare_queue()\n self.publish_message()\n self.close_connection()", "def handle_read(self):\n packet = self.recv(8192)\n if packet == \"\":\n #print \"[WARNING] Socket closed by remote host %s:%s\" % (\n # self.address,self.port)\n self.close()\n return\n packet_list = messages.separate_messages(packet)\n #received_types = \" + \".join(\n # messages.get_message_type(messages.parse(packet))\n # for packet in packet_list)\n #print \"From %s:%s received: \" % (self.address, self.port), received_types\n # Process a single message at a time\n for packet in packet_list:\n message = messages.parse(packet)\n if messages.get_message_type(message) == \"OFPT_ECHO_REQUEST\":\n self.buffer.append(messages.of_echo_reply)\n else:\n self.handle_message(message)", "def process_frontend_msg(self):\n logging.debug('Received message on the frontend socket')\n\n _id = self.frontend_socket.recv()\n _empty = self.frontend_socket.recv()\n msg = self.frontend_socket.recv_json()\n\n logging.debug('ID: %s', _id)\n logging.debug('Message: %s', msg)\n\n if not isinstance(msg, dict):\n self.frontend_socket.send(_id, zmq.SNDMORE)\n self.frontend_socket.send(\"\", zmq.SNDMORE)\n self.frontend_socket.send_json({ 'success': -1, 'msg': 'Request message should be in JSON format' })\n return\n\n logging.debug('Generating client id for result collecting')\n\n # Generate a service request id for our client and ask them to\n # subscribe to the result publisher endpoint in order to receive\n # their results\n req_id = uuid.uuid4().get_hex()\n self.frontend_socket.send(_id, zmq.SNDMORE)\n self.frontend_socket.send(\"\", zmq.SNDMORE)\n self.frontend_socket.send_json({'uuid': req_id, 'port': self.result_pub_port})\n \n logging.debug('Client service request id is: %s', req_id)\n \n # The message we send to the backend also contains the client\n # service request id as well. This is done so later when we receive\n # the results in the sink we can route the results to the clients properly\n msg['uuid'] = req_id\n\n logging.debug('Sending message to backend for processing')\n \n self.backend_socket.send_unicode(msg['topic'], zmq.SNDMORE)\n self.backend_socket.send_json(msg)", "def process_task(self, body, message):\n logger.info(\"Listener: New message received in MQ\")\n try:\n json_payload = json.loads(body)\n except ValueError:\n logger.error(\"Listener: Invalid JSON data received: ignoring the message\\n{body}\")\n return\n try:\n message.ack()\n except ConnectionResetError:\n logger.error(\"Listener: ConnectionResetError: message may have not been ack...\")\n message.properties['id'] = json_payload[0]['id']\n logger.debug(\"Listener: Processing request message in a new thread...\")\n try:\n if self.thread_support:\n thread = getattr(\n self.sub_worker_mod,\n self.sub_worker.split(\".\")[-1])(\n message_worker = self,\n data = json_payload,\n message = message\n )\n thread.start() # threading usage\n else:\n pass # not yet implemented\n except Exception as exc:\n logger.error('Listener: Task raised exception: %r', exc)", "def _r_on_incoming_message(self, string, protocol):\n #print(\"Incoming: %s\" % string)\n d = threads.deferToThread(parse_message_string, string)\n d.addCallback(self._r_handle_message_contents, protocol)", "def handle_messages(self):\n\n #Get the time at which the code started running\n current_time = datetime.datetime.now()\n\n #get all messages between now and the time where a message was last received\n messages = self.client.messages.list(\n date_sent_before = datetime.datetime.now()+ datetime.timedelta(hours = TIMEDIFFERENCE),\n date_sent_after = self.last_message_timing + datetime.timedelta(hours = TIMEDIFFERENCE)\n )\n\n #Iterate through all the new messages\n for record in messages:\n #If it is not from the Twilio Client\n if record.from_ != 'whatsapp:+14155238886':\n #Then update the timing of the last message to the current time\n self.last_message_timing = current_time\n #If the message sent is the '?' that seeks to get the number\n #of people in the queue\n if record.body == '?':\n #Get the data about people from firebase\n people_data = self.firebase.get_data('people_count')\n #Get the number of people queueing\n no_of_people = people_data['people_count']\n #Create a message from the API to tell the person\n #asking the number of people in the queue\n message = self.client.messages.create(\n body='The number of the people in the queue is {}'.format(no_of_people),\n from_='whatsapp:{sender_number}'.format(**self.config),\n to=record.from_\n )", "def consumeMsg():\n\tosuser = 'osdev'\n\tospass = 'osdev'\n\toshost = '10.32.29.94'\n\tosport = '5672'\n\tosvhost = '/openstack'\n\tneutronExchange = Exchange('quantum', type='topic', durable=False)\n\tinfoQueue = Queue('exthook', exchange=neutronExchange , durable=False,\n\t\t\trouting_key='notifications.info')\n\twith Connection(\"\".join(['amqp://', osuser, ':', ospass, '@', \n\t\toshost, ':',osport, '/', osvhost])) as conn:\n\t\twith conn.Consumer(infoQueue, callbacks=[msgParse]):\n\t\t\twhile True:\n\t\t\t\ttry: \n\t\t\t\t\tconn.drain_events()\n\t\t\t\texcept Exception, e:\n\t\t\t\t\tlogging.exception('Draining events from AMQP stop')\n\t\t\t\t\tbreak", "def messageHandler(self):\n\n while len(self.ReceiveMessageBuffer) > 0: # if message handler is called all received messages will be processed\n #print 'entered message handler of ID {0}'.format(self.CommID)\n msg = self.ReceiveMessageBuffer.popleft()\n self.MsgReceiveCount += 1\n self.MsgReceiveCount_interval += 1\n type = msg.getType()\n # for communication test:\n if type == 0: #System message\n print 'ID {0} has received msg {1} from ID {2}'.format(self.CommID, msg.getData(), msg.getIDSender())\n # send reply\n data = msg.getData()\n if data == 'ping':\n retval = self.sendMessage(msg.getIDSender(), 0, 'pong')\n return retval\n elif data == 'pong':\n retval = self.sendMessage(msg.getIDSender(), 0, 'ping')\n return retval\n # elif data[0] == 'system':\n # if(data[1] == 'startRONOPT'):\n # #save fluctuation curve of cluster\n # self.EFluctuationCurve = data[4]\n # #begin with local optimization (data[2] = fromTime, data[3]=toTime)\n # self.stateRONOPT = 0\n # for n in range(len(self.Neighbors)):\n # self.NeighborMessageRec[n] = 0\n # self.RemainderOfNeighborsOpt(data[2],data[3],1)\n #########################################################################################################\n\n elif type == 20: # pseudo tree generation message\n ret = self.messageHandler_PseudoTree(msg)\n if ret == -1:\n break\n\n elif type == 40: # load propagation message\n self.messageHandler_LoadProp(msg)\n\n elif type == 70:\n self.messageHandler_RemainderMulticast(msg) #remainder multicast optimization\n\n return 0", "def start(self):\n while True:\n ident = self.reply_socket.recv()\n assert self.reply_socket.rcvmore(), \"Missing message part.\"\n msg = self.reply_socket.recv_json()\n omsg = Message(msg)\n print>>sys.__stdout__\n print>>sys.__stdout__, omsg\n handler = self.handlers.get(omsg.msg_type, None)\n if handler is None:\n print >> sys.__stderr__, \"UNKNOWN MESSAGE TYPE:\", omsg\n else:\n handler(ident, omsg)", "def queue_processor(self):\n\n while self.state != consts.SMPP_CLIENT_STATE_CLOSED:\n try:\n p = self.queue.get(timeout=1)\n self._request_handler(p)\n self.queue.task_done()\n except Empty:\n pass", "def handle_message_queue(self):\n # Add some dummy messages to the queue\n message = ['dummy1', 'dummy2', 'dummy3']\n for message in messages:\n self.queue.put(message)\n\n time.sleep(1)\n # Send kill event\n self.dut.kill.set()\n\n time.sleep(1)\n\n assertTrue(self.dut.queue_empty.is_set())\n # _process_message_queue is done\n\n # Check that dummy messages are in server socket queue\n for message in messages:\n size, received = self.get_message_from_queue()\n\n self.assertIsNotNone(received)\n if received is None:\n return\n\n self.assertIsNotNone(size)\n if size is None:\n return\n\n fail_size = 'Message length {} Received length {}'.format(len(message),\n size)\n self.assertEqual(size, len(message), msg=fail_size)\n\n fail_contents = 'Message: ({}) Received: ({})'.format(message, received)\n self.assertEqual(message, received, msg=fail_contents)", "def run(self):\n self.logger.info(\"Starting messenger.\")\n self.recv()", "def process(self, msg):\n print \"HANDLER: received a msg: %s\" % msg", "def process_sink_msg(self):\n logging.debug('Received message on the sink socket')\n \n msg = self.sink_socket.recv_json()\n \n logging.debug('Message: %s', msg)\n\n # Publish the results to the clients using the\n # request id of the service request as the topic\n self.result_pub_socket.send_unicode(msg['uuid'], zmq.SNDMORE)\n self.result_pub_socket.send_json(msg)", "def _process_messages(self, room, new_messages):\n\t\tfor message in new_messages:\n\t\t\tself._log.info(\"handling message {}\".format(message[\"id\"]))\n\n\t\t\tfor reactive in self._reactives:\n\t\t\t\ttry:\n\t\t\t\t\treactive(room, message, self, self._hipchat)\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tself._log.error(\"reactive {!r} errored while handling message\".format(reactive), exc_info=True)", "def on_message(self, data):\n req = json.loads(data)\n self.serve(req)", "def on_message(self, data):\n req = json.loads(data)\n self.serve(req)", "def run(self):\n while True:\n msg = self.recv()", "def _hear_message_from_server(self):\n while self.is_alive:\n data = self._socket.recv(1024)\n content = loads(data)\n self._current_list = content\n print(\"Servidor: {}\".format(content))", "def receive_message(self, message):\r\n return", "def _dispatch(self, msg):\n self.debug(\"Dispatching message CMD %r %s\", msg.cmd, msg)\n if msg.seqno in self.listeners:\n # self.debug(\"Dispatching sequence number %d\", msg.seqno)\n sem = self.listeners[msg.seqno]\n if isinstance(sem, asyncio.Semaphore):\n self.listeners[msg.seqno] = msg\n sem.release()\n else:\n self.debug(\"Got additional message without request - skipping: %s\", sem)\n elif msg.cmd == HEART_BEAT:\n self.debug(\"Got heartbeat response\")\n if self.HEARTBEAT_SEQNO in self.listeners:\n sem = self.listeners[self.HEARTBEAT_SEQNO]\n self.listeners[self.HEARTBEAT_SEQNO] = msg\n sem.release()\n elif msg.cmd == UPDATEDPS:\n self.debug(\"Got normal updatedps response\")\n if self.RESET_SEQNO in self.listeners:\n sem = self.listeners[self.RESET_SEQNO]\n self.listeners[self.RESET_SEQNO] = msg\n sem.release()\n elif msg.cmd == SESS_KEY_NEG_RESP:\n self.debug(\"Got key negotiation response\")\n if self.SESS_KEY_SEQNO in self.listeners:\n sem = self.listeners[self.SESS_KEY_SEQNO]\n self.listeners[self.SESS_KEY_SEQNO] = msg\n sem.release()\n elif msg.cmd == STATUS:\n if self.RESET_SEQNO in self.listeners:\n self.debug(\"Got reset status update\")\n sem = self.listeners[self.RESET_SEQNO]\n self.listeners[self.RESET_SEQNO] = msg\n sem.release()\n else:\n self.debug(\"Got status update\")\n self.listener(msg)\n else:\n if msg.cmd == CONTROL_NEW:\n self.debug(\"Got ACK message for command %d: will ignore it\", msg.cmd)\n else:\n self.debug(\n \"Got message type %d for unknown listener %d: %s\",\n msg.cmd,\n msg.seqno,\n msg,\n )", "def test_process_message_queue(self):\n t = threading.Thread(target=self.handle_message_queue)\n t.start()\n\n self.dut._process_message_queue()\n\n t.join()", "def received_message(self, m):\n self.receiver.handle_message(m)", "def _r_on_incoming_message(self, string, protocol):\n #print(\"Incoming: %s\" % string)\n d = threads.deferToThread(self._parse_message, string, protocol)\n d.addCallback(self._r_process_message, protocol)\n d.addCallbacks(callback=self._r_send_result, errback=self._r_send_error, callbackArgs=(protocol,), errbackArgs=(protocol,))", "async def _order_book_snapshot_router(self):\n while True:\n try:\n ob_message: OrderBookMessage = await self._order_book_snapshot_stream.get()\n trading_pair: str = ob_message.trading_pair\n if trading_pair not in self._tracking_message_queues:\n continue\n message_queue: asyncio.Queue = self._tracking_message_queues[trading_pair]\n await message_queue.put(ob_message)\n except asyncio.CancelledError:\n raise\n except Exception:\n self.logger().error(\"Unknown error. Retrying after 5 seconds.\", exc_info=True)\n await asyncio.sleep(5.0)", "def on_messages(self, priority=-1):\n\n def decorator(coro):\n router = ListRouter(priority=priority)\n\n @functools.wraps(coro)\n async def _wrapper(update, context):\n if not isinstance(update, Message):\n return SKIPPED\n return await coro(update, context)\n\n router.add_handler(_wrapper)\n\n self._routers.append(router)\n\n return coro\n\n return decorator", "def sendall(self, message):\n reg = re.compile(':::(\\d+):::')\n match = reg.match(message)\n self.test.assertIsNotNone(match)\n offset = len(match.group(0))\n\n try:\n size = int(match.group(1))\n except ValueError:\n size = None\n\n message = message[offset:]\n message = pickle.loads(message)\n self.queue.put((size, message))", "def handleMessage(msg):", "def run(self):\n\n try:\n if not self._connected:\n self.connect()\n\n while self._connected:\n msg = self._recvmsg()\n self.handle(msg)\n finally:\n if self._connected:\n self.disconnect()", "def handle_read(self):\n while True:\n try:\n content = self.recv(1024)\n if content:\n self.rbuf.write(content.decode('utf-8'))\n if len(content) < 1024:\n break\n except Exception as e:\n print(e)\n self.handle_rpc()", "def _process_incoming_queue_messages(self):\n while self._queue.qsize():\n msg = self._queue.get()\n if msg == MAP_UPDATE:\n self._clear_measurement_progress_label()\n self._presenter.update_map(self.chosen_value.get())", "def _dispatch_to_client_request(self):\n # Listen for client connection\n self._to_client_request.listen()\n\n while not self._exit_request:\n readable, _, _ = select([self._to_client_request], [], [self._to_client_request], 0.1)\n if readable:\n client_conn, client_addr = readable[0].accept()\n client_conn.setblocking(False)\n self._to_client_connections.append(client_conn)\n print(\"Sending replies to [\" + client_addr[0] + \", \" + str(client_addr[1]) + ']')", "def processEvent(self):\n # Note: break out of event dispatch loop when closedown event is received\n # and closing flag is set. This is to prevent DoS attack by faked closedown\n # event type, and to ensure that prior events received are all processed.\n delay_on_error_min = 0.125 # Back off retry interval on error..\n delay_on_error_max = 20.0 # ..\n delay_on_error = delay_on_error_min # ..\n while True:\n if delay_on_error < delay_on_error_max:\n delay_on_error *= 2\n try:\n # PLEASE NOTE: In the event that the HTTPC is run as duplex, not simplex\n # then the post methods will be delayed if nothing is sent down to the client\n # from the server. This timeout is controlled by QUEUE_WAIT_TIMEOUT in EventRouterHTTPS.py\n if self._simplex == True:\n self._queueEvent.wait()\n self._queueEvent.clear()\n \n if not self._queue.empty():\n Trace(\"%s queue.get ...\"%(self.getUri()), \"EventLib.EventRelayHTTPC\")\n ###msgbody = self._queue.get()\n ###Trace(\"%s get msgbody: %s\"%(self.getUri(),msgbody), \"EventLib.EventRelayHTTPC\")\n ###self._event.set()\n msgbody = self.getQueuedItem()\n [typ,env] = msgbody\n if typ == \"closedown\":\n if self._closing: break\n else:\n # process request as an HTTP POST request\n data = makeEnvelopeData(env)\n headers = { \"Content-type\": \"text/plain\",\n \"Accept\": \"text/plain\",\n \"Content-length\": str(len(data)) }\n self._httpcon.request(\"POST\", \"/request_path_ignored\", data, headers)\n response = self._httpcon.getresponse()\n delay_on_error = delay_on_error_min\n elif self._simplex == False:\n # Nothing in queue:\n # issue a GET for incoming events\n _log.info(\"%s HTTP get ...\"%(self.getUri()))\n headers = { \"Accept\": \"text/plain\" }\n self._httpcon.request(\"GET\", \"/request_path_ignored\", None, headers)\n response = self._httpcon.getresponse()\n if response.status == 200:\n delay_on_error = delay_on_error_min\n msgbody = response.read()\n Trace(\"%s get msgbody: %s\"%(self.getUri(),msgbody), \"EventLib.EventRelayHTTPC\")\n # Parse message and act accordingly\n msgdata = parseMessageData(msgbody)\n Trace(\"%s get msgdata: %s\"%(self.getUri(),str(msgdata)), \"EventLib.EventRelayHTTPC\")\n if msgdata == None:\n #TODO: Log \"Request body malformed\"\n pass\n elif msgdata[0] == \"forward\":\n # msgdata = [\"forward\", [['R1', 'R2', 'R3'], 'ev:typ', 'ev:src', 'payload']]\n event = makeEvent(evtype=msgdata[1][1],source=msgdata[1][2],payload=msgdata[1][3])\n env = constructEnvelope(msgdata[1][0], event)\n self.forward(event, env)\n elif msgdata[0] == \"idle\":\n # Idle response gives client a chance to send if anything is queued\n pass\n else:\n #TODO: handle closedown message?\n Warn( \"%s Request body unrecognized option: %s\"%(self.getUri(),msgdata[0]), \"EventRelayHTTPC\")\n pass\n elif response.status == 503:\n Trace( \"%s processEvent error response: %u, %s\"%(self.getUri(),response.status,response.reason), \"EventLib.EventRelayHTTPC\")\n # Remote end closed down\n break\n else:\n # TODO: (log error response)\n Warn( \"%s processEvent error response: %u, %s\"%(self.getUri(),response.status,response.reason), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n \n except httplib.BadStatusLine, e:\n # This can happen at closedown\n Info( \"%s processEvent bad response: %s\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n except httplib.CannotSendRequest, e:\n # This can happen at closedown\n Info( \"%s Cannot send request: %s\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n except httplib.ResponseNotReady, e:\n # This can happen at startup and sometimes other times:\n # maybe multiple requests on a single HTTP connection object?\n Info( \"%s Response not ready: (%s)\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n except socket.error, e:\n Warn( \"%s Socket error: %s\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n return", "def activate(self):\n self.socket.listen(self.request_queue_size)", "def receive_messages(self):\n messages = self.incoming_messages\n self.incoming_messages = []\n return messages", "async def _listen(self,sub_params): \n async with websockets.connect(self.url) as websocket:\n await websocket.send(json.dumps(sub_params))\n # self.keepalive.start()\n start_time = time.time()\n while not self.shutdown_event.is_set():\n try:\n now = time.time()\n if((now - start_time) > 0.5):\n self.calculate_order_depth()\n start_time = now \n data = await websocket.recv()\n msg = json.loads(data)\n except ValueError as e:\n self.on_error(e)\n except Exception as e:\n self.on_error(e)\n else:\n self.on_message(msg)", "def handle_messages(data_socket):\r\n size = 1\r\n message_number = 1\r\n while size != 0:\r\n lines = read_message(data_socket)\r\n size = len(lines)\r\n if size != 0:\r\n file_name = str(message_number) + \".txt\"\r\n message_number += 1\r\n with open(file_name, \"wb\") as file:\r\n write_lines(lines, file)\r\n data_socket.sendall(b'A')\r\n data_socket.sendall(b'Q')", "async def _process_queue(self, callback, socket_info,\n has_heartbeat_seq=True):\n pending_callback = False\n while True:\n unparsed_message = await socket_info.queue.get()\n #log.debug(\"Received: \" + unparsed_message)\n response = json.loads(unparsed_message)\n # Sometimes the response is a list sometimes not. Convert to list.\n message_list = response if type(response) == list else [response]\n if not message_list:\n log.warning(\"Received empty message from Gemini. This isn't a \"\n \"type of response documented in their API docs.\")\n continue\n if message_list[0]['type'] == 'heartbeat':\n if has_heartbeat_seq:\n self._process_heartbeat(message_list[0], socket_info)\n self._check_sequence(message_list[0], socket_info)\n continue\n # A non heartbeat message.\n for message in message_list:\n self._check_sequence(message, socket_info)\n state_update = callback(message)\n if state_update:\n pending_callback = True\n if not socket_info.queue.empty():\n continue\n if pending_callback and self.is_setup():\n self.exchange_state.update_publisher.notify()\n pending_callback = False", "def receive(self):\n pass", "def subscribe2API():\n\tconnection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))\n\tchannel = connection.channel()\n\n\tchannel.queue_declare(queue='ToAPIgatewayQueue')\n\n\tdef callback(ch, method, properties, body):\n\t\tif(body != ''):\t\n\t\t\tconnection.close()\n \t\tpublish2apiHandler(body)\n\t\t\t\n\t\t\t\n\t\t\t\n\tchannel.basic_consume(callback, queue='ToAPIgatewayQueue', no_ack=True)\n\n\tprint(' [*] Waiting for messages. To exit press CTRL+C')\n\tchannel.start_consuming()\n\t\n\treturn", "def msgsToSock(self, recv_queue):\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tmsg = recv_queue.get(True, 1)\n\t\t\t\tif self.debug:\n\t\t\t\t print(\"Message processed in the OS:\")\n\t\t\t\t print(\" Dest IP: {}\".format(msg.getHeader(0)))\n\t\t\t\t print(\" Src IP: {}\".format(msg.getHeader(1)))\n\t\t\t\t print(\" Dest Port: {}\".format(msg.getPayload().getHeader(0)))\n\t\t\t\t print(\" Src Port: {}\".format(msg.getPayload().getHeader(1)))\n\t\t\t\t print(\" Message: {}\".format(msg.getPayload().getPayload()))\n\t\t\t\t print(\" \")\n\t\t\t\tif isinstance(msg, ms.IPLayer):\n\t\t\t\t\tdest_port = ord(msg.getPayload().getHeader(0))\n\t\t\t\t\tif dest_port in self.sock_dict:\n\t\t\t\t\t\tself.sock_dict[dest_port].put(msg)\n\n\t\t\texcept q.Empty:\n\t\t\t\tcontinue", "def __receive_messages(self) -> [str]:\n while True:\n try:\n data = self.__socket.recv(4096)\n if data:\n msgs = self.__json_serializer.bytes_to_jsons(data)\n if RemotePlayerProxy.DEBUG:\n for msg in msgs:\n print(f'[RPP] [RECV] <- [{self.name}]: {msg}')\n return msgs\n except Exception as e:\n if RemotePlayerProxy.DEBUG:\n print(f'Lost client {self.name} because: ', e)\n return []", "def handle(self):\n req_lines = self._read_lines()\n if not req_lines:\n self.cleanup()\n for req in req_lines:\n log.debug('%s => %s', self.client, req)\n req = req.split()\n cmd = req.pop(0)\n try:\n self.get_command(cmd)(req)\n result = [OK]\n except Exception as error:\n result = [ERROR, error.message]\n self.send_line(' '.join(result))\n self.flush()", "def serve_requests(self):\n while True:\n self.server_socket.listen(self.request_queue_size)\n client_connection, client_address = self.server_socket.accept()\n self.request_handler(client_connection)", "def received_message(self, textMessage):\n\n debug(\"Recieved: %s\" % str(textMessage))\n try:\n data = json.loads(str(textMessage))\n except Exception:\n error(\"Decoding json %s \\n%s\" % (str(textMessage),\n traceback.format_exc()))\n return\n\n if 'target' not in data:\n error(\"No target in message %s \" % str(textMessage))\n return;\n\n if 'message' not in data:\n data['message'] = ''\n\n target = data['target']\n message = data['message']\n\n if target == SERVER_KEY:\n debug(\"%s - %s - %s\" % ('registering', str(id(self)), message))\n\n if message in WebSocketRouter.nodemap:\n error(\"Attempt to register existing name %s\" % message)\n return;\n\n WebSocketRouter.nodemap[message] = self\n\n # send register message to all users\n registerData = {'target':'nodemanager', 'message': message }\n registerJSON = json.dumps(registerData)\n for ws in WebSocketRouter.usermap:\n if type(ws) is WebSocketRouter:\n debug(\"Found one\")\n ws.send(registerJSON)\n\n elif target in WebSocketRouter.nodemap:\n debug(\"Sending to node id:%s\" % \\\n str(id(WebSocketRouter.nodemap[target])))\n data['target'] = self.getSender()\n debug(str(WebSocketRouter.nodemap[target]))\n try:\n WebSocketRouter.nodemap[target].send(json.dumps(data))\n except Exception, e:\n error(\" Function: %s\\nSender: %s\\nMessage: %s\\nError: %s\" % (\n target, self.getSender(), message,\n traceback.format_exc()))\n return\n\n debug(\"Sent\")\n\n elif target in WebSocketRouter.usermap:\n debug(\"Sending to user\")\n data['target'] = self.getSender()\n WebSocketRouter.usermap[target].send(json.dumps(data))\n\n elif target in WebSocketRouter.funcmap:\n self.getSender() # places user in usermap if not already\n try:\n result = WebSocketRouter.funcmap[target](self, message)\n debug('performed func with result %s' % str(result))\n if result is not None:\n JSON = json.dumps({'target':target, 'message': result})\n self.send(JSON)\n\n except Exception:\n error(\" Function: %s\\nSender: %s\\nMessage: %s\\nError: %s\" % (\n target, self.getSender(), message,\n traceback.format_exc()))\n return\n else:\n error(\"No Target: %s message: %s sender: %s\" % (target, message,\n self.getSender()))\n return", "def handle_request(self):\n try:\n data = self.sock.recv(1024)\n except socket.error as e: # ...,e:\n if e == 10040:\n print('Message too long, ignoring.')\n return\n raise\n self.append_to_seq(parse_packet(data))", "def send_messages(self):\n if self.messages:\n messages, self.messages = self.messages, []\n self.mpub(\"events.%s\" % config.pool, messages)", "def __listener__(self):\n frame_interval = 0.1\n str_list = []\n c = ''\n while True:\n with Timeout(frame_interval, False):\n while True:\n try:\n c = self.ser.read()\n except:\n self.ser.close()\n self.make_connection.go()\n self.connection_made.wait()\n str_list.append(c)\n if c == \"\\n\" or c == '':\n break\n received = ''.join(str_list)\n str_list = []\n if received:\n for i in self.read_handlers:\n gevent.spawn(i, received)\n sleep(0.001)", "async def receiver(self):\n socket_input = await self.websocket.recv()\n logger.debug(\"<<< Received:\\n{}\".format(socket_input))\n\n # Showdown sends this response on initial connection\n if socket_input == \"o\":\n logger.info(\"Connected on {}\".format(self.websocket_url))\n self.connected = True\n self.add_task(self.on_connect())\n return\n\n inputs = utils.parse_socket_input(socket_input)\n for room_id, inp in inputs:\n room_id = room_id or \"lobby\"\n logger.debug(\"||| Parsing:\\n{}\".format(inp))\n inp_type, params = utils.parse_text_input(inp)\n\n # Set challstr attributes and autologin\n if inp_type == \"challstr\":\n self.challengekeyid, self.challstr = params\n if self.name and self.password and self.autologin:\n await self.login()\n elif self.autologin:\n msg = (\n \"Cannot login without username and password. If \"\n \"you don't want your client to be logged in, \"\n \"you can use Client.start(autologin=False).\"\n )\n raise Exception(msg)\n\n # Process query response\n elif inp_type == \"queryresponse\":\n response_type, data = params[0], \"|\".join(params[1:])\n data = json.loads(data)\n self.add_task(\n self.on_query_response(response_type, data), transient=True\n )\n if response_type == \"savereplay\":\n self.add_task(\n self.server.save_replay_async(data), transient=True\n )\n\n # Challenge updates\n elif inp_type == \"updatechallenges\":\n self.challenges = json.loads(params[0])\n self.add_task(\n self.on_challenge_update(self.challenges), transient=True\n )\n\n # Messages\n elif inp_type == \"c:\" or inp_type == \"c\":\n timestamp = None\n if inp_type == \"c:\":\n timestamp, params = int(params[0]), params[1:]\n author_str, *content = params\n content = \"|\".join(content)\n chat_message = message.ChatMessage(\n room_id, timestamp, author_str, content, client=self\n )\n self.add_task(\n self.on_chat_message(chat_message), transient=True\n )\n elif inp_type == \"pm\":\n author_str, recipient_str, *content = params\n content = \"|\".join(content)\n private_message = message.PrivateMessage(\n author_str, recipient_str, content, client=self\n )\n self.add_task(\n self.on_private_message(private_message), transient=True\n )\n\n # Rooms\n elif inp_type == \"init\":\n room_type = params[0]\n room_obj = room.class_map.get(room_type, room.Room)(\n room_id, client=self, max_logs=self.max_room_logs\n )\n self.rooms[room_id] = room_obj\n self.add_task(self.on_room_init(room_obj), transient=True)\n elif inp_type == \"deinit\":\n if room_id in self.rooms:\n self.add_task(\n self.on_room_deinit(self.rooms.pop(room_id)),\n transient=True,\n )\n\n # add content to proper room\n if isinstance(self.rooms.get(room_id, None), room.Room):\n self.rooms[room_id].add_content(inp)\n\n self.add_task(\n self.on_receive(room_id, inp_type, params), transient=True\n )", "def handle(self, message):" ]
[ "0.69763803", "0.63376343", "0.6313037", "0.62614304", "0.62203604", "0.6201189", "0.6163866", "0.61292046", "0.6127975", "0.60782707", "0.60757583", "0.6066614", "0.6057568", "0.6052637", "0.6049549", "0.6036926", "0.60339874", "0.60330033", "0.60038686", "0.5997388", "0.59721", "0.59465736", "0.59287274", "0.5921095", "0.5920825", "0.5917532", "0.59102213", "0.5902322", "0.5898464", "0.58868957", "0.5881477", "0.58695185", "0.58664876", "0.5865328", "0.5863873", "0.58636934", "0.5810151", "0.58059686", "0.5803103", "0.5803016", "0.578942", "0.5785212", "0.5773676", "0.5772197", "0.57693064", "0.57676524", "0.57674664", "0.57625574", "0.57578695", "0.5755704", "0.5751001", "0.5750513", "0.5743363", "0.5735322", "0.5728757", "0.5719356", "0.5714945", "0.5713018", "0.57096016", "0.5706943", "0.5706703", "0.56946075", "0.56882215", "0.56814164", "0.56766367", "0.5674658", "0.5674658", "0.56602204", "0.56593335", "0.56584984", "0.5652429", "0.5635758", "0.5626339", "0.562183", "0.5618266", "0.56176525", "0.5615941", "0.561519", "0.56149155", "0.5609464", "0.5607916", "0.56072503", "0.5607099", "0.5602759", "0.55932707", "0.5591552", "0.55913556", "0.55833113", "0.5571275", "0.5564289", "0.55625695", "0.5558606", "0.5551219", "0.55505383", "0.55450326", "0.5540545", "0.553077", "0.5518565", "0.55167323", "0.5511307" ]
0.55585676
92
Shut down the event router thread
def close(self): Trace("%s close"%(self.getUri()), "EventLib.EventRelayHTTPC") self._httpcon.close() self._closing = True self._event.set() self._queueEvent.set() self._queue.put(["closedown",[]]) self._thread.join() Trace("%s closed"%(self.getUri()), "EventLib.EventRelayHTTPC") return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def shutdown(self):\n self.socket_thread.stop()", "def shutdown(self):\n if not self.stop_event.is_set():\n self.stop_event.set()\n\n if self.pusher_thread:\n self.pusher_thread.join()", "def __del__(self):\n AppHelper.stopEventLoop()", "def __del__(self):\n AppHelper.stopEventLoop()", "def shutDown(self):\n self.host = None\n self.port = None\n if(self.loop is not None):\n test = asyncio.run_coroutine_threadsafe(self.stopLoop(), self.loop)\n self.thread.join()\n if(self.loop.is_running()):\n self.loop.stop()\n else:\n self.loop.close()\n self.pool.shutDown()\n self.pool = None\n self.loop = None\n self.thread = None", "def shutdown(self):\n\t\tself._log.info('shutting down DHT')\n\t\tself._threads.shutdown() # Trigger shutdown of maintainance threads\n\t\tself._krpc.shutdown() # Stop listening for incoming connections\n\t\tself._nodes.shutdown()\n\t\tself._threads.join() # Trigger shutdown of maintainance threads", "def __exit__(self, exc_type, exc_val, exc_tb):\n if self.event_loop:\n self.event_loop.stop()", "def Quit(self):\n t = threading.Thread(target=self.server.shutdown)\n t.start()", "def shutdown(self):\n self.exit_event.set()", "def shutdown(self):\n ...", "def shutdown(self):\n self.thread.server.shutdown()\n self.thread.join()", "def shutdown(self):", "def shutdown(self):\r\n self.socket.close()\r\n # self.socket_video.close()\r\n self.socket_state.close()", "def stop(self):\n logging.info(\"Shutting down thread...\")\n self.disconnect()\n self.running = False", "def shutdown(self):\n self.log.info(\"Purging event queue...\")\n self.kill_all()\n self.log.info(\"done\")\n self.log.info(\"Exiting...\")\n sys.exit()", "def shutdown():\n global handler, transport, protocol\n if handler is not None:\n handler.close()\n transport.close()\n handler = None\n transport = None\n protocol = None", "def __shutdown(self):\n\n self._rpyReader.shutdownEvent.set()\n self._gpsReader.shutdownEvent.set()\n self._tcpSender.shutdownEvent.set()\n\n self._rpyReader.join()\n self._gpsReader.join()\n self._tcpSender.join()\n\n self._serverSocket.close()", "def stop(self):\n if self.socket is not None:\n self.socket.close()\n self.__stopevent.set()\n del self._cb_handle", "def shutdown(self) -> None:", "def shutdown(self) -> None:", "def _shutdown(self):\n self.control_socket.send(zmqmessage.IPC_END)\n self.end_threads = True\n self.timeout = 1", "def shutdown(self):\n self.req_shutdown = True", "def stop(self):\n self._transport = None\n self._cleanup()\n self._disconnected_callback = None", "def shutdown(self):\n pass", "def shutdown(self):\n pass", "def shutdown(self):\n pass", "async def shutdown(self):", "def tearDownZServerThread(self):\n self.zserver.close()\n self.zthread.stop()", "def teardownClass(cls):\n shutil.rmtree(cls._tmp_dir)\n cls.shutdown_event.set()\n cls.server_proc.join()", "def shutdown(self):\n\n pass", "def shutdown(self):\n\n reactor.callLater(0, reactor.stop)", "def stop(self):\n\n self.__stop_threads = True\n self.__new_bus_Socket.close()\n self.__bus_stations_Socket.close()\n print(f\"stopped {self}\")", "def shutdown() -> None: # TODO Better place for this code\n # TODO Safe landing\n pass", "def clean_up(self):\n self.stop_full()\n self.ctrl_sock.close()\n self.context.term()", "def shutdown(self):\n self.exit_app()", "def Release(self):\n print(\"Release : stopping event loop\", self.loop)\n self.loop.quit()\n pass", "def shutdown(self):\n self.sock.close()", "def tearDown(self):\n self.server.stop()\n self._server_thread.join()", "def shutdown(self):\n\n self.active = False\n\n try:\n self.listen_socket.shutdown(socket.SHUT_RDWR)\n except:\n self.logger.info(\"Ignoring listen soc shutdown error\")\n self.listen_socket = None\n\n with self.connect_cv:\n self.connect_cv.notifyAll()\n\n self.wakeup()\n self.dbg_state = \"down\"", "def quit(self):\n self.stop()\n self.handler = None", "def _shutdown(self):", "def __del__(self):\n self.shutdown()", "def main_thread_exit(self):\n ...", "def shutdown(self) -> None:\n pass", "def terminate(self):\n print('Terminating Revshell thread.')\n self.server.close()", "def shutdown(self):\n try:\n if self.working and self.exiting.acquire():\n self.bot('shutting down...')\n self.working = False\n self._handleEvent(self.getEvent('EVT_STOP'))\n if self._cron:\n self._cron.stop()\n self.bot('shutting down database connections...')\n self.storage.shutdown()\n except Exception, e:\n self.error(e)", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.server.stop()\n self.driver.quit()", "def shutup(self):\n logging.debug(\"shutup...\")\n self._eng.stop()", "def teardown(self):\n if self.ae:\n self.ae.shutdown()", "def shutdown(self):\n debug_print('Stopping JSONRPCTCPServer thread...')\n self.__rpc_server.shutdown()", "def shutdown(self):\n self._shutdown(None, None)\n self._running = False", "def shutdown(self):\n self._msg_disp.abort()\n self._conn_mgr.shutdown_connections()", "def shutdown(self):\n self._ifccountslock.acquire()\n self._ifccounts.clear()\n self._ifccountslock.release()\n self._objslock.acquire()\n if len(self._objs) == 0:\n self._objslock.release()\n return\n logger.info(\"Stopping EMANE daemons.\")\n self.deinstallnetifs()\n self.stopdaemons()\n self.stopeventmonitor()\n self._objslock.release()", "def teardown(self):\n\n\t\tself.shutdown = True\n\t\tself.terminate_process()", "def __exit__(self, exc_type, exc_value, traceback): \n self.shutdown()", "def shutdown(self):\n ts.client.transport.close()", "def stop(self):\n self._running = False\n self._logger.info(\"Locator finished main loop\")", "async def cleanup(self) -> None:\n if self.args.sync:\n self.stop_events_sync()\n self._processing.join()\n else:\n await self.stop_events_async()\n await self._processing\n try:\n raise self._error\n except TypeError:\n pass", "def __exit__(self):\n self._stop_all()", "def takedown(self):\n GPIO.cleanup()\n\n self.running = False\n self.data_handler.send_all_to_backlog()\n\n \n del(self)", "def stop(self):\n self.stopped = True\n self.broadcast('host down')", "def shutdown(self):\n if self.alive:\n libplasma.disconnect(self.conn)\n self.alive = False", "def cleanupAtExit():\n \n global client\n \n client.stop()", "def stop(self):\n self.bus.log('Stopping down OLA Plugin.')\n self.bus.unsubscribe(\n self.channel_names['channel_request'],\n self.handle_channel\n )\n self.bus.unsubscribe(\n self.channel_names['channel_set'],\n self.handle_channel_set\n )\n self.ola_connection.disconnect()\n # wait for thread to finish.\n self.ola_connection.join()", "def stop(self):\n\n self.stop_thread = True", "async def _internalStop(self):\n self.wserver.close()\n await self.wserver.wait_closed()\n self.server.stop()\n while (self.server.is_running()):\n time.sleep(0.5)\n self.server.close()\n self.server = None", "def shutdown(self):\n self.broadcast(self.server_socket, '[server shutdown]', 'server')\n self.selector.unregister(self.server_socket)\n self.server_socket.close()", "def shutdown(self):\n self._shutdown_requested_event.set()\n SimpleJSONRPCServer.SimpleJSONRPCServer.shutdown(self)\n logging.info('Server shutdown complete')", "def shutdown(self):\n self.shutdown_requested = True", "def __shutdown(self):\n\n pass", "def shutdown(self):\n for param in [self._mic_name_param, self._lm_param, self._dic_param,\n self._audio_topic_param, self._hmm_param, self._kws_param]:\n if rospy.has_param(param):\n rospy.delete_param(param)\n\n # Stop & disconnect pipeline\n self.pipeline.remove(self.asr)\n self.bus.disconnect(self.bus_id)\n\n self.pipeline.set_state(gst.State.NULL)\n self.pipeline.get_state(gst.CLOCK_TIME_NONE)\n \"\"\" Shutdown the GTK thread. \"\"\"\n gtk.main_quit()", "def teardown_test(self):\n self.log.info('Tearing down the test case')\n self.iperf_server.stop()\n self.access_point.bridge.teardown(self.brconfigs)\n self.access_point.close()\n wputils.reset_host_interface(self.pkt_sender.interface)\n self.mon.usb('on')", "def stop() -> None:\n global _server\n if _server:\n try:\n _server.shutdown()\n except Exception:\n pass", "def shutdown(self):\n #=======================================================================\n #\n # TODO: Place any cleanup code here.\n #\n #=======================================================================\n return", "def atexit(self):\n self.stop_listen()\n for driver in self.drivers.values():\n driver.stop()\n if hasattr(driver, \"atexit\"):\n driver.atexit()\n try:\n self.processor_thread.join()\n except AttributeError:\n pass", "def shutdown(self):\n self.running = False\n ev3.Leds.all_off()\n self.left_motor.stop()\n self.right_motor.stop()", "def stopThreads():\n global listen, root\n\n listen = False\n root.destroy()", "def __exit__(self, *args):\n self.stop()", "async def __aexit__(self, *args) -> None:\n self.stop()\n await self.join()", "def shutdown(self):\n print(\"shutting down resource manager\")", "async def callback_homeassistant_stop(self, event: \"Event\") -> NoReturn:\n _LOGGER.debug(\"Hekr system is shutting down\")\n for device_id, device in self.devices.items():\n connector = device.connector\n listener = connector.listener\n if listener is not None and listener.is_running:\n _LOGGER.debug('Shutting down listener for device ID \"%s\"' % device_id)\n listener.stop()\n\n if connector.is_connected:\n _LOGGER.debug('Shutting down connector for device ID \"%s\"' % device_id)\n await connector.close_connection()", "def socket_thread_stopped(self):\n self.done = True", "def stop(self):\n self._Thread__stop()", "def shutdown(self):\n self.action('shutdown')", "async def shutdown(self) -> int:", "def stop():\n current_event_loop().stop()", "def quit_application(self, event):\n self.Close()\n server.closeSocket()", "def stop(self):\n if self.started:\n try:\n self.server.shutdown()\n self.server.server_close()\n self.server_thread.join()\n self.server_thread = None\n except AttributeError:\n pass\n self.started = False\n self.server = None", "def slot_stop(self):\n\n self.thread.working = False", "def REBshutdown(self):\n pass", "def shutdown():\n\n logger.debug(\"GiantbombHandler shutdown\")", "def OnClose(self, event):\r\n if self.worker: #stop main GPIB thread\r\n self.worker.abort()\r\n time.sleep(0.3)\r\n self.Destroy()", "async def shutdown(self):\n if self._unsub_stop:\n self._unsub_stop()\n self._unsub_stop = None\n await self.device.shutdown()", "def Shutdown(self):\n self.conn.send(False)\n self.process.join()", "def shutdown(self):\n\n if self.isBound() and not self.connectionCorrupted:\n self.log.warning(\"Shutdown requested...unbinding\")\n self.unbind().addBoth(lambda result: self.disconnect())\n elif self.sessionState not in (SMPPSessionStates.UNBIND_RECEIVED, SMPPSessionStates.UNBIND_PENDING):\n self.log.warning(\"Shutdown requested...disconnecting\")\n self.disconnect()\n else:\n self.log.debug(\"Shutdown already in progress\")", "def stop(self):\n # print \"process shutdown complete\"", "def shutdown(self, signum, frame):\n self.log('WARNING', -1, 'Shutting down normally ...')\n main_thread = threading.current_thread()\n\n for t in threading.enumerate():\n if t is main_thread:\n continue\n t.join()\n self.server_socket.close()\n sys.exit(0)", "def stop_connection(self):\n self.libEDK.EE_EngineDisconnect()\n self.libEDK.EE_EmoStateFree(self.e_state)\n self.libEDK.EE_EmoEngineEventFree(self.e_event)", "def stop(self):\n with self.stop_event_lock:\n self.stop_event.set()\n self.clean_up()", "def shutdown(self):\n TCPServer.shutdown(self)\n self.server_close()\n self.ae._servers.remove(self)", "def _shutdown(self, *args):\n self.server.shutdown()" ]
[ "0.73733056", "0.7205434", "0.7066241", "0.7066241", "0.68949795", "0.6849437", "0.6786754", "0.6766629", "0.67512125", "0.6745822", "0.6738774", "0.67175186", "0.6715772", "0.6702667", "0.669924", "0.66910434", "0.6680487", "0.66720706", "0.66561806", "0.66561806", "0.6640596", "0.6633338", "0.6625795", "0.6624714", "0.6624714", "0.6624714", "0.66127837", "0.65947753", "0.6586154", "0.6579285", "0.65784246", "0.65445685", "0.65369576", "0.6499835", "0.64899814", "0.64832884", "0.64760804", "0.6473791", "0.64658004", "0.64625186", "0.64503926", "0.6444075", "0.6439922", "0.6439393", "0.642913", "0.6415193", "0.6402132", "0.63923174", "0.63755137", "0.63626736", "0.6360209", "0.6347473", "0.63458365", "0.634125", "0.63295794", "0.6316964", "0.6309852", "0.63009876", "0.6297995", "0.6295834", "0.6287845", "0.627978", "0.62795264", "0.6275598", "0.6273465", "0.6267022", "0.6258988", "0.62557817", "0.6253495", "0.62532836", "0.62448627", "0.62421167", "0.6220931", "0.6216546", "0.6215381", "0.62049055", "0.6204861", "0.6203862", "0.6203142", "0.6195443", "0.6188418", "0.6183701", "0.6175124", "0.61748403", "0.6174007", "0.617141", "0.61682653", "0.61624485", "0.6158489", "0.6158123", "0.61576205", "0.615355", "0.61520743", "0.61518145", "0.6151423", "0.61479354", "0.61379737", "0.6132271", "0.6127136", "0.612246", "0.6117748" ]
0.0
-1
Add item to the queue, and return a deferred object that fires when an item is removed (or the queue is empty).
def queueItem(self, item): Trace("%s queueItem (%s)"%(self.getUri(),item), "EventLib.EventRelayHTTPC") if not self._closing: self._queue.put(item) self._queueEvent.set() return makeQueueDeferred(StatusVal.OK, self._queue, self._event) return makeDeferred(StatusVal.OK)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(self, item):\n completeDeferred = defer.Deferred()\n self.queue.append((item, completeDeferred))", "def add(self, item: T) -> None:\n self._queue.append(item)\n if not self.is_empty():\n self._queue.sort(reverse=True)", "def enqueue(self, item):\n self.queue.append(item)", "def enqueue(self, item):\n self.queue.append(item)", "def enqueue(self, item):\n self._queue.append(item)", "def put(self, item: Any):\n has_item = True\n with self._lock:\n if item not in self._items:\n self._items.add(item)\n has_item = False\n if not has_item:\n self._queue.put(item)", "def enqueue(self, item):\n self.__queue.insert(0, item)", "def put_nowait(self, item: _T) -> None:\n self._consume_expired()\n if self._getters:\n assert self.empty(), \"queue non-empty, why are getters waiting?\"\n getter = self._getters.popleft()\n self.__put_internal(item)\n future_set_result_unless_cancelled(getter, self._get())\n elif self.full():\n raise QueueFull\n else:\n self.__put_internal(item)", "def getQueuedItem(self):\n Trace(\"%s getQueuedItem ...\"%(self.getUri()), context=\"EventLib.EventRelayHTTPC\")\n item = self._queue.get()\n Trace(\"%s getQueuedItem (%s)\"%(self.getUri(),item), context=\"EventLib.EventRelayHTTPC\")\n self._event.set()\n return item", "def _put(self, item, queue):", "def enqueue(self, item):\n self.list.append(item)", "def put_nowait(self, item):\r\n if self.full():\r\n raise QueueFull\r\n self._put(item)\r\n self._unfinished_tasks += 1\r\n self._finished.clear()\r\n self._wakeup_next(self._getters)", "def put(\n self, item: _T, timeout: Optional[Union[float, datetime.timedelta]] = None\n ) -> \"Future[None]\":\n future = Future() # type: Future[None]\n try:\n self.put_nowait(item)\n except QueueFull:\n self._putters.append((item, future))\n _set_timeout(future, timeout)\n else:\n future.set_result(None)\n return future", "def deque_non_blocking_put(self, item):\n try:\n self.q.put(item, block=False)\n return True\n except queue.Full:\n try:\n _ = self.q.get(block=False)\n dropped = False\n except queue.Empty:\n dropped = True\n # TODO - could crash due to a race condition, could be solved with a lock\n self.q.put(item, block=False)\n return dropped", "def add_to_queue(self, msg):\n if not self.queue.full():\n self.queue.put(msg)", "def enqueue(self, item):\n\n self.__items__.append(item)", "def enqueue(Q, x):\n # Q.append(x)\n Q.put_nowait(x)\n if debug: \n print(\"enqueue\", x, \":\", end=\" \")\n show_queue(Q)\n return Q", "def enqueue(self, item):\n\t\tself.items.insert(0, item)", "def enqueue(self, item):\n self.items.insert(0, item)", "def enqueue(self, item):\n self.items.insert(0, item)", "def append ( self , item ) :\n self.cond.acquire()\n try:\n if self.closed :\n raise Exception( \"Trying to append to a closed queue\" )\n else :\n self.weight += int( item['size'] )\n self.push( item )\n self.cond.notify()\n finally:\n self.cond.release()", "def push(self, value):\n if self.please_stop and not self.allow_add_after_close:\n Log.error(\"Do not push to closed queue\")\n\n with self.lock:\n self._wait_for_queue_space()\n if not self.please_stop:\n self.queue.appendleft(value)\n return self", "def add(self, item):\n self.sleeping.reveille() # wake items whose sleep timer has expired\n self.stack.push(item)", "def enqueue(self, x):\r\n self.queue.append(x)\r\n return self.queue", "def put(self, item):\r\n while self.full():\r\n putter = self._loop.create_future()\r\n self._putters.append(putter)\r\n try:\r\n yield from putter\r\n except:\r\n putter.cancel() # Just in case putter is not done yet.\r\n if not self.full() and not putter.cancelled():\r\n # We were woken up by get_nowait(), but can't take\r\n # the call. Wake up the next in line.\r\n self._wakeup_next(self._putters)\r\n raise\r\n return self.put_nowait(item)", "def __add__(self, value):\n self.queue.append(value)", "def non_blocking_put(self, item):\n try:\n self.q.put(item, block=False)\n return True\n except queue.Full:\n return False", "def add(self, item):\n if self.has_item(item):\n return\n\n self.cache.append(item)\n\n if self.size() > self.max_size:\n self.cache.popleft()", "def add_message_to_queue(self, message):\n\t\t\t\tself.message_queue.append(message)\n\t\t\t\treturn self.message_queue", "def maybe_enqueue(self):\n if len(self._vals) > 0:\n self.enqueued = True\n return self._queue.enqueue(self._vals)\n else:\n return None", "def append(self, item):\n\t\theapq.heappush(self.heap, (self.f(item), item))", "def put(self,item):\n\t\ttry:\n\t\t\tself.logger.debug('Im trying to put new item to queue %s'%(item))\n\t\t\tself.queue.put(item)\n\t\t\tself.logger.debug('Successfull put new item to queue')\n\t\t\treturn True\n\t\texcept Exception, e:\n\t\t\tself.logger.error('Error method put, item: %s, error: %s'%(item,e),exc_info=True)\n\t\t\treturn False", "def enqueue(self, item):\n\n self._data.append(item)", "def enqueue_rear(self, item):\n self._items.append(item)", "def enqueue(self, item):\r\n raise QueueException(\"Unimplemented Abstract Queue Function\")", "def add(self, item):\r\n if len(self.buff)==self.size: self.buff.pop(0)\r\n self.buff.append(item)", "def enqueue(self, val):\r\n self.queue.append(val)", "def deque_timeout_put(self, item, timeout):\n try:\n self.q.put(item, timeout=timeout)\n return True\n except queue.Full:\n try:\n _ = self.q.get(block=False)\n dropped = False\n except queue.Empty:\n dropped = True\n # TODO - could crash due to a race condition, could be solved with a lock\n self.q.put(item, block=False)\n return dropped", "async def added(self) -> Tuple[bool, List[Any]]:\n more = True\n # Make sure waiting on event_added is done by one coroutine at a time.\n # Multiple might be waiting and if there is only one event in the queue\n # they would all otherwise be triggered\n async with self.parent.event_added_lock:\n await self.parent.event_added.wait()\n async with self.parent.lock:\n notification_item = self.parent.notification_items.pop(0)\n # If there are still more items that the added event hasn't\n # processed then make sure we will return immediately if called\n # again\n if not self.parent.notification_items:\n more = False\n self.parent.event_added.clear()\n return more, notification_item", "def queue_append(self, obj, value):\n self.queue.append((obj, value))\n if len(self.queue) > self.queue_size:\n self.dump_queue()", "def add_queue(self, queue):\n with self.mutex:\n self.queues.append(queue)", "def enqueue(self, element):\n self.the_queue.append(element)", "def add(self, item):\n if not (item in self.set):\n self.set[item] = True\n heapq.heappush(self.heap, item)", "def push(self, item):\n if item not in self._items:\n self._items.append(item)", "def addEvent(self, IEvent):\n self.queue.put(IEvent)", "def enqueue(self, value: object) -> None:\n enqueue = self.da.append(value)\n return enqueue", "def push(self, val):\r\n return self.deque.append(val)", "def push(self, x):\r\n self.queue.append(x)", "def test_the_queue_enqueue(the_queue):\n the_queue.enqueue(2)\n assert the_queue._new_dll.head.data == the_queue._new_dll.tail.data == 2", "def put(self, item, block=True, timeout=None):\n self.q.put(item, block, timeout)", "def insert_and_check(self, item) -> bool:\n with Monitor.acquire(self):\n if item in self:\n return False\n self.add(item)\n return True", "def enqueue(self, item=None):\n if item:\n self._queue_items.insert(0, item)\n else:\n raise QueueException('queue element cannot be an empty string')", "def add_to_queue(self, removed):\n if self.exploration_strategy == 'uniform':\n self._add_to_queue_uniform(removed)\n elif self.exploration_strategy == 'uniform-exhaustive':\n self._add_to_queue_uniform_exhaustive(removed)\n elif self.exploration_strategy == 'valence-weighted':\n self._add_to_queue_valence_weighted(removed)\n elif self.exploration_strategy == 'count-weighted':\n self._add_to_queue_count_weighted(removed)\n else:\n raise ValueError('Did not recognise exploration strategy: {}'.format(\n self.exploration_strategy))", "def worker_take_item(self):\r\n worker = self.get_waiting_worker()\r\n if worker:\r\n worker.take_item(self.item)\r\n self.item = Item.E\r\n return worker", "def enqueue(queue, item):\n new_node = Node(item, None)\n if empty_queue(queue):\n queue.front = new_node\n queue.back = new_node\n else:\n queue.back.next = new_node\n queue.back = new_node\n queue.size = queue.size + 1", "def _dequeue(self) -> Optional[torch.cuda.Event]:\n if self._queue:\n event = self._queue.popleft()\n return event\n return None", "def enqueueItem(item):\n if item.type not in AgentInventory.__idQueue__:\n AgentInventory.__idQueue__[item.type] = []\n\n # If item id is already in queue, move it to front. Otherwise, just prepend it\n if item.id in AgentInventory.__idQueue__[item.type]:\n idx = AgentInventory.__idQueue__[item.type].index(item.id)\n del AgentInventory.__idQueue__[item.type][idx]\n AgentInventory.__idQueue__[item.type].insert(0, item.id)\n else:\n AgentInventory.__idQueue__[item.type].insert(0, item.id)", "def enqueue(self, item):\n # double size of array if necessary and recopy to front of array\n if self._N == len(self._q):\n self._resize(2*len(self._q)) # double size of array if necessary\n self._q[self._last] = item # add item\n self._last += 1\n if self._last == len(self._q):\n self._last = 0 # wrap-around\n self._N += 1", "def push(self, transition):\n # if we reached the capacity, delete oldest item\n if (self.size == self.capacity):\n del self.queue[0]\n self.queue.append(transition)", "def active_item(self, remove=True):\n self.sleeping.reveille() # wake items whose sleep timer has expired\n if not self.stack.empty():\n pass\n elif not self.backlog.empty():\n # feed the stack the top priority item from the queue\n self.stack.push(self.backlog.get())\n else: # both the stack & queue are empty\n raise queue.Empty\n\n assert not self.stack.empty(), \"BUG: empty stack\"\n\n if remove:\n return self.stack.pop()\n\n return self.stack.peek()", "def remove(self) -> T:\n if not self.is_empty():\n return self._queue.pop()", "def get_nowait(self):\r\n if self.empty():\r\n raise QueueEmpty\r\n item = self._get()\r\n self._wakeup_next(self._putters)\r\n return item", "def maybe_dequeue(self):\n if self._queue.enqueued:\n return self._queue.dequeue()\n else:\n return None", "def get_nowait(self) -> _T:\n self._consume_expired()\n if self._putters:\n assert self.full(), \"queue not full, why are putters waiting?\"\n item, putter = self._putters.popleft()\n self.__put_internal(item)\n future_set_result_unless_cancelled(putter, None)\n return self._get()\n elif self.qsize():\n return self._get()\n else:\n raise QueueEmpty", "def add_and_process(self, item, followers, users):\n mod = self.modder\n mod.set(item.seq)\n current = self.q[mod.val]\n if current:\n # cannot wait any longer\n current.dispatch(followers, users)\n\n self.q[mod.val] = item\n\n # try to dispatch all events that are in order\n mod.set(self.last_valid + 1)\n while self.q[mod.val]:\n ev = self.q[mod.val]\n ev.dispatch(followers, users)\n\n # clear place in queue\n self.q[mod.val] = None\n\n self.last_valid = ev.seq\n mod.inc()", "def on_item(self, ch, method, header, body):\n try:\n # Get the item from the playlist store\n item = self.playlist_store.find_one({'_id': ObjectId(body)})\n print \" [x] Received %r\" % (item['track']['track']['name'],)\n \n except:\n print \" [x] Not found: %r\" % (body,)\n \n else:\n # Add item to our list\n self.items.append(item)\n \n # Mark item as 'queued'\n item['status'] = 'queued'\n self.playlist_store.update({'_id': item['_id']}, item)\n \n # If no items 'sent' or 'playing', broadcast next item in queue\n self.send()\n \n # Acknowledge\n ch.basic_ack(delivery_tag=method.delivery_tag)", "def test_the_queue_dequeue(the_queue):\n the_queue.enqueue(2)\n assert the_queue.dequeue() == 2", "def dequeue(self):\r\n return self.queue.pop(0)", "def dequeue_message(self) -> MessageQueueItem:\n return heapq.heappop(self._message_queue)", "def add_to_queue(self, word):\n self.q.put(word)\n print(\"word \\'{}\\' added in clients queue\".format(word))", "def enqueue_message(self, item: MessageQueueItem):\n heapq.heappush(self._message_queue, item)", "def get_item_from_queue(Q, timeout=0.01):\n try:\n item = Q.get(True, 0.01)\n except queue.Empty:\n return None\n return item", "def enQueue(self, value):\n if not self.isFull():\n self.queue.append(value)\n return True\n else:\n return False", "def enQueue(self, value):\n if not self.isFull():\n self.queue.append(value)\n self.rear += 1\n return True\n else:\n return False", "def get_item_from_queue(Q, timeout=0.01):\n try:\n item = Q.get(True, 0.01)\n except Queue.Empty:\n return None\n return item", "def request_already_queued(self, request: str):\n try:\n self.create_request_queue_if_not_exists()\n queue = []\n db = self.get_db_safely()\n cursor = db.cursor()\n cursor.execute(\n \"\"\"SELECT rowid FROM queue WHERE request = ?\"\"\",\n (request,))\n for row in cursor:\n queue.append(row)\n if len(queue) == 0:\n return False\n else:\n return True\n except sqlite3.Error:\n # This is a lie, but we don't want to try and enqueue something if we got an error here.\n return True", "def pop(self, timeout=None):\n item = super(ExclusiveQueue, self).pop(timeout)\n try:\n self.remove(item)\n except ValueError:\n pass\n return item", "def _add_to_chat_queue(self, message):\n self.chat_message_queue.appendleft(message)", "def put_req(self, item):\n self.req_q.put(item)", "def add_request_to_queue(self,request):\n self.queue.append(request)", "def add(self, data):\n wasquiet = True if (self.tail == self.curr) else False\n\n # Assert the queue is clean\n qtail = self.base + \".\" + str(self.tail)\n print \"creating %s\" % qtail\n assert not os.path.exists(qtail)\n qt = open(qtail, \"w\")\n qt.write(data)\n qt.close()\n\n # Where does the next item go\n self.tail += 1\n self._settail(self.tail)\n\n return wasquiet", "def push(self, item) -> None:\n self.items.append(item)", "def enqueue(self, record):\r\n self.queue.put_nowait(record)", "def _dequeue(self):\n return self._queue.popleft()", "def push(self, item) -> None:\n self._items.append(item)", "def add(self, item):\n self._set(item, None)", "def remove(self):\n return self.queue.popleft()", "def push(self, item):\n heapq.heappush(self.heap, item)", "def process_queue(self, queue):\n\n while queue:\n deferred, data = queue.popleft()\n deferred.callback(data)", "def timeout_put(self, item, timeout):\n try:\n self.q.put(item, timeout=timeout)\n return True\n except queue.Full:\n return False", "def put(self, item):\n self.url_queue.put(item)", "def dequeue(self):\n return self.__queue.pop()", "def test_appended(self):\n genFn = Mock(return_value=None)\n expected = 123\n \n wrapper = KaoGenerator(genFn)\n wrapper.queue(expected)\n actual = wrapper._queue.pop()\n self.assertEqual(expected, actual)", "def evict_or_add (self, item):", "def register_queue(self, queue) -> None:\r\n if queue is None:\r\n raise ValueError('queue is None')\r\n if not hasattr(queue, 'empty'):\r\n raise ValueError(f'queue {queue} is missing empty member')\r\n if not hasattr(queue, 'get_nowait'):\r\n raise ValueError(f'queue {queue} is missing get_nowait member')\r\n self.receive_queues.append(queue)", "def dequeue(self):\n return self.the_queue.pop(0)", "def dequeue(self):\n return self.queue.pop(0)", "def send(self, item):\n self.input_queue.put(item)", "def push(self, item):\n self.items.append(item)", "def push(self, item):\n self.items.append(item)" ]
[ "0.809805", "0.66659725", "0.6658853", "0.6658853", "0.649351", "0.6432891", "0.6412939", "0.6350026", "0.6251586", "0.623737", "0.6233821", "0.6162664", "0.6149751", "0.6137725", "0.6091136", "0.60882497", "0.60764444", "0.6054781", "0.5985582", "0.5985582", "0.598518", "0.5972382", "0.5959985", "0.594677", "0.5944127", "0.59432197", "0.5928539", "0.5911735", "0.5863586", "0.58527267", "0.58521014", "0.5792889", "0.57886845", "0.57781714", "0.57729894", "0.57705414", "0.5765411", "0.57586163", "0.5730444", "0.57259667", "0.57148486", "0.5711285", "0.5698054", "0.5694351", "0.56763774", "0.56485", "0.564033", "0.56346565", "0.56319374", "0.56220984", "0.561701", "0.5609047", "0.56063515", "0.56063116", "0.5604719", "0.5604132", "0.5597111", "0.55937856", "0.5590126", "0.55730575", "0.55616546", "0.5548362", "0.5545486", "0.5542501", "0.5536824", "0.5525351", "0.5510965", "0.5493038", "0.5478092", "0.54703623", "0.5465571", "0.54600996", "0.5457797", "0.54523635", "0.5452208", "0.54438645", "0.5441057", "0.5440856", "0.5435963", "0.5433496", "0.54205495", "0.5403471", "0.5402086", "0.5398602", "0.5388546", "0.53846675", "0.5379767", "0.53776264", "0.5376609", "0.5375024", "0.53739196", "0.53698903", "0.5364151", "0.53617215", "0.535743", "0.53409535", "0.53388923", "0.5337402", "0.53285813", "0.53285813" ]
0.6929844
1
Wait for an item to be queued, then return it.
def getQueuedItem(self): Trace("%s getQueuedItem ..."%(self.getUri()), context="EventLib.EventRelayHTTPC") item = self._queue.get() Trace("%s getQueuedItem (%s)"%(self.getUri(),item), context="EventLib.EventRelayHTTPC") self._event.set() return item
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_item_from_queue(Q, timeout=0.01):\n try:\n item = Q.get(True, 0.01)\n except queue.Empty:\n return None\n return item", "def get_item_from_queue(Q, timeout=0.01):\n try:\n item = Q.get(True, 0.01)\n except Queue.Empty:\n return None\n return item", "def worker_take_item(self):\r\n worker = self.get_waiting_worker()\r\n if worker:\r\n worker.take_item(self.item)\r\n self.item = Item.E\r\n return worker", "def get_nowait(self):\r\n if self.empty():\r\n raise QueueEmpty\r\n item = self._get()\r\n self._wakeup_next(self._putters)\r\n return item", "def get(self):\n with self.__lock:\n while True:\n try:\n job = self.__queue.get(False)\n self.__lock.notify_all()\n return job\n except Queue.Empty:\n self.__lock.wait()", "def get_nowait(self) -> _T:\n self._consume_expired()\n if self._putters:\n assert self.full(), \"queue not full, why are putters waiting?\"\n item, putter = self._putters.popleft()\n self.__put_internal(item)\n future_set_result_unless_cancelled(putter, None)\n return self._get()\n elif self.qsize():\n return self._get()\n else:\n raise QueueEmpty", "def get_next_item(self, timeout=None):\n if self.current_item is not None:\n raise error_classes.UVMSequenceError(\"You must call item_done() before calling get_next_item again\")\n self.current_item = self.req_q.get(timeout=timeout)\n return self.current_item", "def _wait_for_event_in_queue(self):\n try:\n event = self._queue.get(timeout=SendTelemetryEventsHandler._MAX_TIMEOUT)\n self._queue.task_done()\n except Empty:\n # No elements in Queue, return None\n event = None\n\n return event", "def get(self):\n\t\ttry:\n\t\t\tself.logger.debug('Im trying to get item from queue')\n\t\t\titem = self.queue.get()\n\t\t\tself.logger.debug('Recevie item from queue %s'%(item))\n\t\t\treturn True, item\n\t\texcept Exception, e:\n\t\t\tself.logger.error('Error method get, error: %s'%(e),exc_info=True)\n\t\t\treturn False, None", "def non_blocking_get(self):\n try:\n return self.q.get(block=False)\n except queue.Empty:\n time.sleep(0)\n return None", "def _wait_queue(self):\n while True:\n time.sleep(0.1)\n if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():\n return", "def wait(self, timeout=None):\r\n with self._lock:\r\n if self._finished:\r\n raise RuntimeError(\"wait() does not support re-entry!\")\r\n if not self._started:\r\n for thread in self._wait_events:\r\n thread.timeout = timeout\r\n thread.start()\r\n self._started = True\r\n try:\r\n if self._queue.get(timeout=timeout):\r\n return True\r\n return False\r\n except Empty:\r\n return False\r\n finally:\r\n with self._lock:\r\n self._finished = True", "def dequeue(self, timeout=0):\n result = self.connection.dequeue_any([self], timeout)\n if result:\n job, queue = result\n return job\n else:\n return None", "def queueItem(self, item): \n Trace(\"%s queueItem (%s)\"%(self.getUri(),item), \"EventLib.EventRelayHTTPC\")\n if not self._closing:\n self._queue.put(item)\n self._queueEvent.set()\n return makeQueueDeferred(StatusVal.OK, self._queue, self._event)\n return makeDeferred(StatusVal.OK)", "def wait_for_queue(self, q, stop=True):\n return _loader.wait_for_queue(q, stop=stop)", "def get(self):\r\n try:\r\n # get with block=False returns an item if one\r\n # is immediately available, else raises the Empty exception\r\n return self._queue.get(block=False)\r\n except queue.Empty:\r\n return self._create_connection()", "def get(self, block=True, timeout=None):\n return self.queue.get(block, timeout)", "def wait(self):\n self.queue.join()", "def pollTillAvailable(self):\n item = self.getItem()\n while item is None:\n item = self.getItem()\n\n return item", "def wait_for_message(self, tag, timeout=None):\n def done_check():\n if self._message_queue.setdefault(tag,[]):\n value=heapq.heappop(self._message_queue[tag])[-1]\n return True,value\n return False,None\n return self._wait_in_process_loop(done_check,timeout=timeout)", "def q_get(self):\n while not self.stopped():\n try:\n return self.in_q.get(timeout=self.heart_beat)\n except queue.Empty:\n pass", "def _get_nowait(self):\n # Fulfills a waiting producer, returning its value, or raising Empty if\n # no fulfillable producers are waiting.\n def fulfill_waiting_producer():\n while True:\n if self._waiting_producers:\n produce_wish = self._waiting_producers.pop(0)\n with produce_wish.group.lock:\n if not produce_wish.group.fulfilled:\n return produce_wish.fulfill()\n else:\n raise Empty()\n\n if self._buf is not None and not self._buf.empty:\n value = self._buf.pop()\n try:\n # Cycles a producer's value onto the buffer\n produced = fulfill_waiting_producer()\n self._buf.push(produced)\n except Empty:\n pass\n return value\n else:\n return fulfill_waiting_producer()", "def put(self, item):\r\n while self.full():\r\n putter = self._loop.create_future()\r\n self._putters.append(putter)\r\n try:\r\n yield from putter\r\n except:\r\n putter.cancel() # Just in case putter is not done yet.\r\n if not self.full() and not putter.cancelled():\r\n # We were woken up by get_nowait(), but can't take\r\n # the call. Wake up the next in line.\r\n self._wakeup_next(self._putters)\r\n raise\r\n return self.put_nowait(item)", "def _wait_empty(self):\n while True:\n if self.queue.empty():\n # We still have to wait for the last queue item being processed\n # (queue.empty() returns True before queue.task_done() is\n # called).\n self.queue.join()\n return\n time.sleep(1)", "def put(\n self, item: _T, timeout: Optional[Union[float, datetime.timedelta]] = None\n ) -> \"Future[None]\":\n future = Future() # type: Future[None]\n try:\n self.put_nowait(item)\n except QueueFull:\n self._putters.append((item, future))\n _set_timeout(future, timeout)\n else:\n future.set_result(None)\n return future", "def worker_process(self, item):\n g_sleep()\n return item", "def put_nowait(self, item: _T) -> None:\n self._consume_expired()\n if self._getters:\n assert self.empty(), \"queue non-empty, why are getters waiting?\"\n getter = self._getters.popleft()\n self.__put_internal(item)\n future_set_result_unless_cancelled(getter, self._get())\n elif self.full():\n raise QueueFull\n else:\n self.__put_internal(item)", "def ztest_get_item(self):\n \n queue = NMSQueue()\n \n result_set = queue.get_items_with_priority(1,1,0,1)\n \n for item in result_set:\n print(\"\\nItem = %s\\n\" % (item) )\n newitem = queue.get_item(item.uuid)\n print(\"\\nRetrieve the same from queue Item = %s\\n\" % (newitem) )", "def timeout_get(self, timeout):\n try:\n return self.q.get(timeout=timeout)\n except queue.Empty:\n time.sleep(0)\n return None", "def get(self) -> Any:\n return self._queue.get()", "def active_item(self, remove=True):\n self.sleeping.reveille() # wake items whose sleep timer has expired\n if not self.stack.empty():\n pass\n elif not self.backlog.empty():\n # feed the stack the top priority item from the queue\n self.stack.push(self.backlog.get())\n else: # both the stack & queue are empty\n raise queue.Empty\n\n assert not self.stack.empty(), \"BUG: empty stack\"\n\n if remove:\n return self.stack.pop()\n\n return self.stack.peek()", "def non_blocking_put(self, item):\n try:\n self.q.put(item, block=False)\n return True\n except queue.Full:\n return False", "def __await__(self):\n return self.waiter.__await__()", "def __await__(self):\n return self.waiter.__await__()", "def wait(self):\n while self._worker is None:\n # wait() before self._run()\n time.sleep(0.1)\n self._worker.join()\n return self.poll()", "def get_waiting_worker(self):\r\n if self.item in (Item.A, Item.B):\r\n for worker in self.workers:\r\n if not worker.working and self.item not in worker.items:\r\n return worker", "def get(self, q_idx, data_id):\n\n while True:\n # Make sure no other get threads are pulling from the queue\n self.nsyncs[q_idx].done_lock.acquire()\n\n # Check every item currently in the queue\n done_queue = self.nsyncs[q_idx].done_queue\n size = done_queue.qsize() if self.os_supported else 1 # Workaround for non-UNIX systems\n for i in range(size):\n tup = done_queue.get()\n\n # Check if item is our item (ie. if the ids match)\n if tup[0] == data_id:\n self.nsyncs[q_idx].done_lock.release()\n return tup[1]\n\n # Nope, not our item, put it back on the done_queue \n # for someone else to grab\n done_queue.put(tup)\n\n # Didn't find our item, try again..\n self.nsyncs[q_idx].done_lock.release()", "def maybe_dequeue(self):\n if self._queue.enqueued:\n return self._queue.dequeue()\n else:\n return None", "def next_ele(self):\n\t\ttry:\n\t\t\tret = self._queue.get(block = True, timeout=0.5)\n\t\t\tself._queue.task_done()\n\t\t\treturn ret\n\t\texcept queue.Empty:\n\t\t\tif not self.is_running():\n\t\t\t\traise\n\t\t\telse:\n\t\t\t\treturn None", "def next(self):\n try:\n return self.queue.get()\n except Empty:\n raise StopIteration", "def pop(self, timeout=None):\n item = super(ExclusiveQueue, self).pop(timeout)\n try:\n self.remove(item)\n except ValueError:\n pass\n return item", "def wait(timeout=None, flush=True):\n if timeout is not None:\n timeout = timeout + _time.clock() # timeout at this time\n while True:\n if _eventQueue:\n return _eventQueue.pop(0)\n if flush:\n # a full 'round' of events need to be processed before flushing\n _tdl.flush()\n if timeout and _time.clock() >= timeout:\n return None # return None on timeout\n _time.sleep(0.001) # sleep 1ms\n _processEvents()", "def await(self):\n self.simulation_queue.join()\n return self.result_queue, self.fail_queue", "def next_ele(self):\n\t\ttry:\n\t\t\tret = self._queue.get(block=True, timeout=0.5)\n\t\t\tself._queue.task_done()\n\t\t\treturn ret\n\t\texcept queue.Empty:\n\t\t\tif not self.isAlive():\n\t\t\t\traise\n\t\t\telse:\n\t\t\t\treturn None", "def deque_non_blocking_put(self, item):\n try:\n self.q.put(item, block=False)\n return True\n except queue.Full:\n try:\n _ = self.q.get(block=False)\n dropped = False\n except queue.Empty:\n dropped = True\n # TODO - could crash due to a race condition, could be solved with a lock\n self.q.put(item, block=False)\n return dropped", "def wait(self):\n self.drain_call_queue()", "def put_nowait(self, item):\r\n if self.full():\r\n raise QueueFull\r\n self._put(item)\r\n self._unfinished_tasks += 1\r\n self._finished.clear()\r\n self._wakeup_next(self._getters)", "def wait(self, timeout=None):\n self.ev_done.wait(timeout=timeout)\n\n if not self.ev_done.is_set():\n raise TaskTimeout(\"Task %s timed out.\" % self)\n\n # --> self.result is set\n # If it is an exception, then raise it in this waiter\n if isinstance(self.result, Exception):\n raise self.result\n\n # Release waiters and perform callbacks\n # done() has already been called, because of self.ev_done check\n # \"asynchronous\" tasks should could call done() here\n #self.done(self.result)\n\n return self.result", "def wait(self, **kwargs):\n return self.client.api.wait(self.id, **kwargs)", "def get_message_from_queue(self):\n message = None, None\n\n try:\n message = self.queue.get(block=True, timeout=3)\n except Empty:\n self.fail(msg='Queue get() failed empty')\n\n return message", "def wait(self, timeout=None):\n self._do_work_thread.join(timeout)\n return self.finished()", "def wait(self, timeout=600):\n s = datetime.datetime.now()\n status = json.loads(self.get())\n while status['status'] != 'COMPLETE':\n status = self.get()\n e = datetime.datetime.now()\n if (e - s).seconds > timeout:\n raise RuntimeError('timeout')\n return status", "def _getqueue(self):\n go = self.tickqueue.get()\n for index in range(len(self.outqueues)):\n if not self.outqueues[index].empty(): return self.outqueues[index]", "def Wait(self, request_id, timeout):\n end_time = time.time() + timeout\n while not self._IsComplete(request_id):\n if end_time < time.time():\n self.AbortRequest(request_id)\n raise WorkQueueTimeout(request_id, timeout)\n time.sleep(self._WAIT_POLL_INTERVAL)\n completion_file = self._GetRequestPathname(request_id, self._COMPLETE)\n with open(completion_file, 'r') as f:\n result = pickle.load(f)\n os.remove(completion_file)\n if isinstance(result, Exception):\n raise result\n assert not isinstance(result, BaseException)\n return result", "def pop(self):\n record = self.db.crawl_queue.find_and_modify(\n query={'status': self.WAITING},\n update={'$set': {'status': self.PROCESSING, 'timestamp': datetime.now()}}\n )\n if record:\n return record\n else:\n self.repair()\n raise KeyError()", "def get_task(self):\n return self.queue.get()", "def _getqueue(self):\n\n go = self.tickqueue.get()\n for index in range(len(self.outqueues)):\n if not self.outqueues[index].empty():\n return self.outqueues[index]", "def wait(self):\n self.drain_call_queue()\n DaskWrapper.wait(self._data)", "def get_msg(self, block=True, timeout=None):\n return self.in_queue.get(block, timeout)", "def get_msg(self, block=True, timeout=None):\n return self.in_queue.get(block, timeout)", "def get_result(self, wait=-1):\n\n if not self.is_done():\n\n if wait >= 0:\n self.thread.join(wait)\n\n else:\n raise Asynchronous.NotYetDoneException(\n 'the call has not yet completed its task'\n )\n\n if self.result is None:\n self.result = self.queue.get()\n\n return self.result", "def drainQueue(q):\n buf = []\n while True:\n # Get as much as possible without blocking\n try:\n while True:\n item = q.get_nowait()\n if item is None:\n return buf\n else:\n buf.append(item)\n except Queue.Empty:\n pass\n\n if buf:\n return buf\n\n # Nothing in the queue. Block for\n # one item, then go back and get any\n # that we can without blocking.\n item = q.get()\n if item is None:\n return buf\n else:\n buf.append(item)", "def wait(self, timeout):\n if not hasattr(self, '_value'):\n try:\n value = self.broker.pop_result(self, timeout=timeout)\n except KeyError:\n return False\n except TaskExpired as err:\n value = err\n self._value = value\n return hasattr(self, '_value')", "def get(self, block=True, timeout=None):\n if not self.connected:\n raise QueueNotConnectedError(\"Queue is not Connected\")\n\n if block:\n payload = self.__db.brpop(self._key, timeout=timeout)\n else:\n payload = self.__db.rpop(self._key)\n\n if not payload:\n return None\n\n task = self.task_class(payload[1])\n\n # if task was marked as unique then\n # remove the unique_hash from lock table\n if task.unique:\n self.__db.srem(self._lock_key, task.unique_hash())\n\n return task", "def wait_for_messages(self):\n msg = self.inbox.get()\n return msg", "def pop(self):\n if not self.empty():\n return self.queue.pop()\n return None", "def _wait_for_queue_space(self, timeout=DEFAULT_WAIT_TIME):\n wait_time = 5\n\n (DEBUG and len(self.queue) > 1 * 1000 * 1000) and Log.warning(\"Queue {{name}} has over a million items\")\n\n now = time()\n if timeout != None:\n time_to_stop_waiting = now + timeout\n else:\n time_to_stop_waiting = now + DEFAULT_WAIT_TIME\n\n if self.next_warning < now:\n self.next_warning = now + wait_time\n\n while not self.please_stop and len(self.queue) >= self.max:\n if now > time_to_stop_waiting:\n Log.error(THREAD_TIMEOUT)\n\n if self.silent:\n self.lock.wait(Till(till=time_to_stop_waiting))\n else:\n self.lock.wait(Till(seconds=wait_time))\n if len(self.queue) >= self.max:\n now = time()\n if self.next_warning < now:\n self.next_warning = now + wait_time\n Log.alert(\n \"Queue by name of {{name|quote}} is full with ({{num}} items), thread(s) have been waiting {{wait_time}} sec\",\n name=self.name,\n num=len(self.queue),\n wait_time=wait_time\n )", "def wait(self):\n with self.__lock:\n while not self.__complete:\n self.__lock.wait()", "def dequeue(self):\n return self.the_queue.pop(0)", "def _queue_thread(self):\n while self.running:\n try:\n msg = self.q.get(True, max(self.blocktime / 1000, 1))\n self.busy = True\n self.send(msg)\n self.update()\n except Empty:\n self.busy = False\n pass\n\n # Prune the events list of dead events\n self.events_lock.acquire()\n self.events = filter(lambda t: t.is_alive(), self.events)\n self.events_lock.release()", "def getitem(self, index):\n #FIXME: A better way to get item without removing it.\n priority,size,trace=self.queues[index].get()\n self.enqueue(index,trace,priority)\n return trace", "def wait(self, msecs = sys.maxint):\n\t\tcall_sdk_function('PrlJob_Wait', self.handle, msecs)\n\t\terr_obj = None\n\t\ttry:\n\t\t\terr_obj = self.get_error()\n\t\texcept:\n\t\t\tpass\n\t\tsdk_check_result(self.get_ret_code(), err_obj)\n\t\treturn self.get_result()", "def dequeue(self):\r\n return self.queue.pop(0)", "def get(self, block=True, timeout=None):\n return self.q.get(block, timeout)", "def get_data(self):\n try:\n data = self._queue.get(block=False)\n except Empty:\n data = None\n return data", "def waitForItem(self, itemName, timeout, index=1,containerObject=None, relatedAreaEnd=None):\r\n t = time.time()\r\n\r\n while t+timeout/1000.0>time.time():\r\n #self.getCurrentState(refresh=True)\r\n isScrollable = self.isItemScrollable(itemName,containerObject=containerObject, relatedAreaEnd=relatedAreaEnd)\r\n itemVisibility = self.isItemVisible(itemName, index=index, containerObject=containerObject, relatedAreaEnd=relatedAreaEnd)[0]\r\n if (isScrollable and itemVisibility != self.NOT_FOUND) or (not isScrollable and itemVisibility == self.VISIBLE):\r\n return True\r\n self.phone.delay(1000, False)\r\n\r\n return False", "def get(self):\n if not self.finished():\n self.wait()\n return self._result", "def wait_request(self, req_id):\n self.infer_queue[req_id].wait()\n return self.outputs.pop(req_id, None)", "def __next__(self):\n waitable = queue.Queue(maxsize=1)\n getter = functools.partial(waitable.get, timeout=self._timeout)\n self._latest_item = self._async_wrapper_class(\n func=getter,\n concurrency_provider=self._provider\n )\n with self._lock:\n try:\n # get latest notification\n data = self._notifications.get_nowait()\n except queue.Empty:\n # store the consumer\n self._waitables.put(waitable)\n LOG.debug('no data for new consumer')\n else:\n # if we have a notification, pass it to the consumer immediately\n waitable.put_nowait(data)\n LOG.debug('new consumer taking next data immediately')\n return self._latest_item", "def wait_message(self):\n if self._state != states['open']:\n return False\n if len(self._read_queue) > 0:\n return True\n\n assert self._read_waiter is None or self._read_waiter.cancelled(), \\\n \"You may only use one wait_message() per connection.\"\n\n self._read_waiter = asyncio.Future(loop=self._loop)\n yield from self._read_waiter\n return self.wait_message()", "def get(self):\n if self.result_data.get(self.get_idx + 1) is not None:\n self.get_idx += 1\n res = self.result_data[self.get_idx]\n del self.result_data[self.get_idx]\n return res\n while True:\n res = self.result_queue.get(block=False)\n idx = res.id\n if idx == self.get_idx + 1:\n self.get_idx += 1\n return res\n self.result_data[idx] = res", "def queue_peek(queue_instance, timeout=60):\r\n while True:\r\n try:\r\n yield queue_instance.get(timeout=timeout)\r\n except Empty:\r\n break", "def pop(self, till=None):\n if till is not None and not isinstance(till, Signal):\n Log.error(\"expecting a signal\")\n\n with self.lock:\n while True:\n if self.queue:\n value = self.queue.popleft()\n return value\n if self.please_stop:\n break\n if not self.lock.wait(till=till | self.please_stop):\n if self.please_stop:\n break\n return None\n (DEBUG or not self.silent) and Log.note(self.name + \" queue stopped\")\n return THREAD_STOP", "def dequeue(self):\n if not self.is_empty():\n return self._queue_items.pop()\n else:\n raise QueueException('dequeue operation not supported on an empty queue')", "def wait(self):\n ident = get_ident()\n if ident not in self.events:\n # this is a new client\n # add an entry for it in the self.events dict\n # each entry has two elements, a threading.Event() and a timestamp\n self.events[ident] = [threading.Event(), time.time()]\n return self.events[ident][0].wait()", "def dequeue(self):\n return self.queue.pop(0)", "def popleft(self, timeout=None):\n item = super(ExclusiveQueue, self).popleft(timeout)\n try:\n self.remove(item)\n except ValueError:\n pass\n return item", "def deque_timeout_put(self, item, timeout):\n try:\n self.q.put(item, timeout=timeout)\n return True\n except queue.Full:\n try:\n _ = self.q.get(block=False)\n dropped = False\n except queue.Empty:\n dropped = True\n # TODO - could crash due to a race condition, could be solved with a lock\n self.q.put(item, block=False)\n return dropped", "def wait(self, timeout=None):\n with self.condition:\n if not self.ready:\n self.condition.wait(timeout)", "def dequeue(self):\n return self.__queue.pop()", "def get_queue_data():\n global grove_data\n try:\n grove_data = grove_queue.get_nowait()\n except Empty:\n # Just use old loopstate if queue is empty\n pass\n return grove_data", "def first(self, timeout=None):\n while True:\n with self._jobfinished:\n if self._results or not self._jobs.unfinished_tasks:\n break\n self._jobfinished.wait(timeout)\n return self._results[0] if self._results else None", "def dequeue(self):\n return self._queue.dequeue()", "async def wait_for(self, seqno, cmd, timeout=5):\n if seqno in self.listeners:\n raise Exception(f\"listener exists for {seqno}\")\n\n self.debug(\"Command %d waiting for seq. number %d\", cmd, seqno)\n self.listeners[seqno] = asyncio.Semaphore(0)\n try:\n await asyncio.wait_for(self.listeners[seqno].acquire(), timeout=timeout)\n except asyncio.TimeoutError:\n self.warning(\n \"Command %d timed out waiting for sequence number %d\", cmd, seqno\n )\n del self.listeners[seqno]\n raise\n\n return self.listeners.pop(seqno)", "def pop(self):\r\n return self.queue.pop(0)", "def get(self, timeout=None):\n if timeout is not None:\n timeout_deadline = time.time() + timeout\n\n with self._lock:\n try:\n return self._get_nowait()\n except Empty:\n pass\n\n if self._closed:\n raise ChanClosed(which=self)\n\n # Shortcut for if the operation shouldn't block.\n if timeout is not None and timeout <= 0:\n raise Timeout()\n\n group = WishGroup()\n wish = Wish(group, WISH_CONSUME, self)\n self._waiting_consumers.append(wish)\n\n with group.lock:\n while not group.fulfilled:\n if timeout is None:\n group.cond.wait()\n else:\n group.cond.wait(timeout_deadline - time.time())\n\n if time.time() >= timeout_deadline:\n # Only time out if the wish wasn't fulfilled\n if not group.fulfilled:\n self._waiting_consumers.remove(wish)\n raise Timeout()\n\n if wish.closed:\n raise ChanClosed(which=self)\n return wish.value", "def dequeue(self):\n return self.queue.popleft()", "def get_next_event(self, timeout=None):\n ret = self.inq.Wait(timeout)\n return ret", "def wait_send(self, timeout = None):\n\t\tself._send_queue_cleared.clear()\n\t\tself._send_queue_cleared.wait(timeout = timeout)", "def request(self, *args, **kwargs):\n self.work_request_queue.put((args, kwargs))\n return self.result_queue.get()" ]
[ "0.69198966", "0.69057333", "0.6884813", "0.6864645", "0.68376034", "0.6821141", "0.6660425", "0.6638609", "0.65500504", "0.6460967", "0.6446401", "0.63761413", "0.63736504", "0.6362188", "0.6324351", "0.62841207", "0.6243727", "0.61811554", "0.6120136", "0.61119175", "0.60853547", "0.60850537", "0.6048267", "0.6044677", "0.60400003", "0.6034453", "0.60216844", "0.59891033", "0.59822494", "0.5976261", "0.59724563", "0.59672153", "0.59362185", "0.59362185", "0.5926343", "0.5922113", "0.59214455", "0.5906842", "0.5900956", "0.5889584", "0.5883158", "0.5877458", "0.58427584", "0.58404225", "0.5831381", "0.5812643", "0.5800383", "0.57982075", "0.5797517", "0.5797063", "0.5790493", "0.5774333", "0.5764621", "0.5764605", "0.57583904", "0.57541966", "0.57530516", "0.5747142", "0.5742784", "0.5742784", "0.57374597", "0.5729477", "0.5719461", "0.5715755", "0.57067496", "0.5698188", "0.5697237", "0.5677781", "0.56709236", "0.56686866", "0.565926", "0.5652527", "0.56482303", "0.5642524", "0.56383854", "0.5626286", "0.5623751", "0.56225824", "0.56221914", "0.56217504", "0.56206065", "0.5611751", "0.5607913", "0.5602662", "0.55906636", "0.55847967", "0.5574999", "0.55687726", "0.5561468", "0.5537652", "0.5534225", "0.5530735", "0.55302745", "0.5523353", "0.55214787", "0.5520924", "0.5519951", "0.55187124", "0.5516488", "0.5500914" ]
0.734153
0
This function is the HTTP client worker thread.
def processEvent(self): # Note: break out of event dispatch loop when closedown event is received # and closing flag is set. This is to prevent DoS attack by faked closedown # event type, and to ensure that prior events received are all processed. delay_on_error_min = 0.125 # Back off retry interval on error.. delay_on_error_max = 20.0 # .. delay_on_error = delay_on_error_min # .. while True: if delay_on_error < delay_on_error_max: delay_on_error *= 2 try: # PLEASE NOTE: In the event that the HTTPC is run as duplex, not simplex # then the post methods will be delayed if nothing is sent down to the client # from the server. This timeout is controlled by QUEUE_WAIT_TIMEOUT in EventRouterHTTPS.py if self._simplex == True: self._queueEvent.wait() self._queueEvent.clear() if not self._queue.empty(): Trace("%s queue.get ..."%(self.getUri()), "EventLib.EventRelayHTTPC") ###msgbody = self._queue.get() ###Trace("%s get msgbody: %s"%(self.getUri(),msgbody), "EventLib.EventRelayHTTPC") ###self._event.set() msgbody = self.getQueuedItem() [typ,env] = msgbody if typ == "closedown": if self._closing: break else: # process request as an HTTP POST request data = makeEnvelopeData(env) headers = { "Content-type": "text/plain", "Accept": "text/plain", "Content-length": str(len(data)) } self._httpcon.request("POST", "/request_path_ignored", data, headers) response = self._httpcon.getresponse() delay_on_error = delay_on_error_min elif self._simplex == False: # Nothing in queue: # issue a GET for incoming events _log.info("%s HTTP get ..."%(self.getUri())) headers = { "Accept": "text/plain" } self._httpcon.request("GET", "/request_path_ignored", None, headers) response = self._httpcon.getresponse() if response.status == 200: delay_on_error = delay_on_error_min msgbody = response.read() Trace("%s get msgbody: %s"%(self.getUri(),msgbody), "EventLib.EventRelayHTTPC") # Parse message and act accordingly msgdata = parseMessageData(msgbody) Trace("%s get msgdata: %s"%(self.getUri(),str(msgdata)), "EventLib.EventRelayHTTPC") if msgdata == None: #TODO: Log "Request body malformed" pass elif msgdata[0] == "forward": # msgdata = ["forward", [['R1', 'R2', 'R3'], 'ev:typ', 'ev:src', 'payload']] event = makeEvent(evtype=msgdata[1][1],source=msgdata[1][2],payload=msgdata[1][3]) env = constructEnvelope(msgdata[1][0], event) self.forward(event, env) elif msgdata[0] == "idle": # Idle response gives client a chance to send if anything is queued pass else: #TODO: handle closedown message? Warn( "%s Request body unrecognized option: %s"%(self.getUri(),msgdata[0]), "EventRelayHTTPC") pass elif response.status == 503: Trace( "%s processEvent error response: %u, %s"%(self.getUri(),response.status,response.reason), "EventLib.EventRelayHTTPC") # Remote end closed down break else: # TODO: (log error response) Warn( "%s processEvent error response: %u, %s"%(self.getUri(),response.status,response.reason), "EventLib.EventRelayHTTPC") time.sleep(delay_on_error) except httplib.BadStatusLine, e: # This can happen at closedown Info( "%s processEvent bad response: %s"%(self.getUri(), str(e)), "EventLib.EventRelayHTTPC") time.sleep(delay_on_error) except httplib.CannotSendRequest, e: # This can happen at closedown Info( "%s Cannot send request: %s"%(self.getUri(), str(e)), "EventLib.EventRelayHTTPC") time.sleep(delay_on_error) except httplib.ResponseNotReady, e: # This can happen at startup and sometimes other times: # maybe multiple requests on a single HTTP connection object? Info( "%s Response not ready: (%s)"%(self.getUri(), str(e)), "EventLib.EventRelayHTTPC") time.sleep(delay_on_error) except socket.error, e: Warn( "%s Socket error: %s"%(self.getUri(), str(e)), "EventLib.EventRelayHTTPC") time.sleep(delay_on_error) return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __manage_client_thread(self, conn_socket: socket.socket, address: tuple):\n print(\"[THREAD] new thread started for client\")\n connected = True\n\n while connected:\n request_header = HttpServer.get_request_header(conn_socket)\n split_request_header = request_header.split()\n file = split_request_header[1]\n print(\"[RECV] header received from IPv4 address\", address[0], \":\", request_header)\n\n if not HttpServer.is_valid_http_request(split_request_header):\n http_message = HttpServer.create_400_response()\n pass\n else:\n if file == \"/\":\n file = \"/index.html\"\n\n put_or_post = HttpServer.is_put_or_post(split_request_header)\n try:\n if put_or_post:\n request_body = HttpServer.get_request_body(conn_socket, request_header)\n status_code = HttpServer.get_status_code_for_put_or_post(request_header, file)\n\n if status_code == 204:\n if split_request_header[0] == \"PUT\":\n f = open(file[1:], \"w\")\n else:\n f = open(file[1:], \"a\")\n\n f.write(request_body)\n f.close()\n http_message = HttpServer.create_204_response()\n elif status_code == 501:\n http_message = HttpServer.create_501_response()\n else: # status code is 201\n f = open(file[1:], \"x\")\n f.write(request_body)\n f.close()\n http_message = self.create_201_response(file)\n\n else:\n if split_request_header[0] == \"GET\":\n # GET needs filename\n status_code = HttpServer.get_status_code_for_get(request_header, file)\n else:\n status_code = HttpServer.get_status_code_for_head(file)\n\n if status_code == 404:\n http_message = HttpServer.create_404_response()\n elif status_code == 304:\n http_message = HttpServer.create_304_response()\n else: # status code is 200\n http_message = HttpServer.create_200_response(file)\n except Exception:\n http_message = HttpServer.create_500_response()\n\n conn_socket.send(http_message)\n\n # determine if connection has to be closed\n conn_str = \"Connection:\"\n conn_begin_ind = request_header.find(conn_str)\n\n if conn_begin_ind != -1:\n conn_end_ind = request_header[conn_begin_ind:].find(\"\\r\\n\") + conn_begin_ind\n conn_status = request_header[conn_begin_ind+len(conn_str):conn_end_ind].strip()\n if conn_status == \"close\":\n conn_socket.close()\n connected = False\n print(\"[THREAD] client thread ended\")", "def _client(self):\n while True:\n body = self.queue.get(True)\n print \"Sending %s bytes (%s/%s)\" % (len(body), self.queue.qsize(), self.queue.maxsize)\n\n try:\n req = urllib2.Request(self.endpoint, body)\n urllib2.urlopen(req).read()\n except:\n print \"Cannot send request. Retrying in 5 seconds\"\n print_exception(*sys.exc_info())\n print \"continuing...\"\n self.enqueue(body)\n sleep(5)", "def serveThread(self):\r\n while True:\r\n try:\r\n client = self.clients.get()\r\n self.serveClient(client)\r\n except Exception, x:\r\n logging.exception(x)", "def run(self):\n # sends download range from offset to offset + block_size - 1 (including) in the header\n headers = {'User-Agent': self.user_agent, 'Refferer': '{}://{}/'.format(self.url.protocol, self.url.host), \n 'Range': 'bytes={}-{}'.format(self.offset, self.offset + self.block_size - 1)}\n status = 0 # set status to 0 that means a connection error\n try:\n self.conn.request('GET', self.url.request, headers=headers)\n response = self.conn.getresponse()\n # the server does not support partial downloading - error\n if response.status != 206:\n status = response.status\n raise MirrorError\n part_size = int(response.getheader('Content-Length')) # actual count of bytes sent by the server\n data = b'' # data buffer\n # loop while all data will be received\n while part_size > len(data):\n if self.cancelled.is_set(): # if the thread has been cancelled\n # stop the thread, the TaskError would not be processed\n # because a loop in the main thread already broken\n raise Exception\n data_fragment = response.read(self.FRAGMENT_SIZE)\n data += data_fragment # add data to the buffer\n # put progress information into the queue\n info = TaskProgress(self.url.host, response.status, len(data))\n self.data_queue.put(info)\n # when the downloading loop finished, create TaskData object\n info = TaskData(self.url.host, response.status, self.offset, data)\n response.close()\n except:\n # if an error has occurred - create a TaskError object\n info = TaskError(self.url.host, status, self.offset)\n finally:\n self.data_queue.put(info) # put result TaskInfo object into the queue\n self.ready.set() # mark the thread as comleted", "def _http_thread_func(self):\r\n while not self._terminating:\r\n # pop queued request from the queue and process it\r\n (api_endpoint, params, reqid) = self.http_requests.get(True)\r\n translated = None\r\n try:\r\n answer = self.http_signed_call(api_endpoint, params)\r\n if answer[\"result\"] == \"success\":\r\n # the following will reformat the answer in such a way\r\n # that we can pass it directly to signal_recv()\r\n # as if it had come directly from the websocket\r\n translated = {\r\n \"op\": \"result\",\r\n \"result\": answer[\"data\"],\r\n \"id\": reqid\r\n }\r\n else:\r\n if \"error\" in answer:\r\n if answer[\"token\"] == \"unknown_error\":\r\n # enqueue it again, it will eventually succeed.\r\n self.enqueue_http_request(api_endpoint, params, reqid)\r\n else:\r\n\r\n # these are errors like \"Order amount is too low\"\r\n # or \"Order not found\" and the like, we send them\r\n # to signal_recv() as if they had come from the\r\n # streaming API beause Gox() can handle these errors.\r\n translated = {\r\n \"op\": \"remark\",\r\n \"success\": False,\r\n \"message\": answer[\"error\"],\r\n \"token\": answer[\"token\"],\r\n \"id\": reqid\r\n }\r\n\r\n else:\r\n self.debug(\"### unexpected http result:\", answer, reqid)\r\n\r\n except Exception as exc:\r\n # should this ever happen? HTTP 5xx wont trigger this,\r\n # something else must have gone wrong, a totally malformed\r\n # reply or something else.\r\n #\r\n # After some time of testing during times of heavy\r\n # volatility it appears that this happens mostly when\r\n # there is heavy load on their servers. Resubmitting\r\n # the API call will then eventally succeed.\r\n self.debug(\"### exception in _http_thread_func:\",\r\n exc, api_endpoint, params, reqid)\r\n\r\n # enqueue it again, it will eventually succeed.\r\n self.enqueue_http_request(api_endpoint, params, reqid)\r\n\r\n if translated:\r\n self.signal_recv(self, (json.dumps(translated)))\r\n\r\n self.http_requests.task_done()", "def serve(self):\n\t\timport thread\n\t\tthread.start_new_thread(self._server_thread, tuple())", "def run(self):\n\n\t\t#Begin running the clientHandler\n\t\tself.running = True\n\t\tself.rxThread.start()\n\n\t\twhile self.running:\n\t\t\ttime.sleep(0.1)\n\t\n\t\t\t#Keep a count of the number of missing Hello requests, over 5 kill client\n\t\t\tif self.missingCount >= 5:\n\t\t\t\tself.running = False", "def __thread(self):\n\n _LOGGER.debug(\"Webserver is starting.\")\n\n monitor = self\n\n # Embedding this because it's so trivial.\n class Handler(BaseHTTPServer.BaseHTTPRequestHandler):\n def do_GET(self):\n\n # We have the first line of the response with the authorization code\n # passed as a query argument.\n #\n # Example:\n #\n # GET /?code=4/clwm0rESq8sqeC-JxIcfiSdjh2593hLej9CZxAcbe1A HTTP/1.1\n #\n\n try:\n unicode\n except NameError:\n request = self.requestline + \"\\n\\n\"\n else:\n request = unicode(self.requestline + \"\\n\\n\")\n\n # Use Python to parse the request. We need to add one newline for the\n # line and another for a subsequent blank line to terminate the block\n # and conform with the RFC.\n hr = _HTTPRequest(request)\n u = urlparse.urlparse(hr.path)\n arguments = urlparse.parse_qs(u.query)\n\n # It's not an authorization response. Bail with the same error\n # the library would normally send for unhandled requests.\n if 'code' not in arguments:\n self.send_error(\n 501,\n \"Unsupported method ({}): {}\".format(\n self.command, hr.path))\n\n return\n\n token = arguments['code'][0]\n _LOGGER.debug(\"Received auth-code [{}]\".format(token))\n\n monitor._token = token\n\n monitor._request_state_e.set()\n\n self.send_response(200, message='OK')\n\n self.send_header(\"Content-type\", 'text/html')\n self.end_headers()\n\n self.wfile.write(\"\"\"\\\n<html>\n<head></head>\n<body>\nAuthorization recorded.\n</body>\n</html>\n\"\"\")\n\n def log_message(self, format, *args):\n pass\n\n\n class Server(SocketServer.TCPServer):\n def server_activate(self, *args, **kwargs):\n r = SocketServer.TCPServer.server_activate(self, *args, **kwargs)\n\n # Sniff the port, now that we're running.\n monitor._effective_port = self.server_address[1]\n\n return r\n\n # Our little webserver. (0) for the port will automatically assign it\n # to some unused port.\n binding = ('localhost', self._webserver_port)\n self.__s = Server(binding, Handler)\n\n _LOGGER.debug(\"Created server.\")\n\n # Signal the startup routine that we're starting.\n self.__server_state_e.set()\n\n _LOGGER.debug(\"Running server.\")\n self.__s.serve_forever()\n\n _LOGGER.debug(\"Webserver is stopping.\")\n\n # Signal the startup routine that we're stopping.\n self.__server_state_e.set()", "def run(self):\n try:\n # Initialize signal handler to be able to have a graceful shutdown.\n ServiceShutdownHandling.initServiceShutdownHandling()\n\n httpd = None\n # The HTTP server thread - our HTTP interface\n if self._port != None:\n httpd = RaspendHTTPServerThread(self._shutdownFlag, self._dataLock, self._sharedDict, self._cmdMap, self._port)\n # Start our threads.\n httpd.start()\n\n for worker in self._workers:\n worker.start()\n\n # Keep primary thread or main loop alive.\n while True:\n time.sleep(0.5)\n\n except ServiceShutdownHandling.ServiceShutdownException:\n # Signal the shutdown flag, so the threads can quit their work.\n self._shutdownFlag.set()\n\n # Wait for all threads to end.\n for worker in self._workers:\n worker.join()\n\n if httpd:\n httpd.join()\n\n except Exception as e:\n print (\"An unexpected error occured. Error: {}\".format(e))\n\n finally:\n pass\n\n return", "def run(self):\n server = TCPServer((self.host, self.port), TCPHandler)\n server.lymphocytes_getter = self.lymphocytes_getter\n\n #runs forever - so make this thread daemon\n server.serve_forever()", "def run(self):\n while self.running:\n self.handle_request()", "async def main(self, loop: asyncio.get_event_loop) -> None:\n queue = asyncio.Queue()\n\n for url in self.url_list:\n queue.put_nowait(url)\n\n async with aiohttp.ClientSession(loop=loop) as session:\n workers = [\n asyncio.create_task(self.worker(queue, session))\n for _ in range(self.max_treads)\n ]\n await queue.join()\n\n for worker in workers:\n worker.cancel()\n\n await asyncio.gather(*workers, return_exceptions=True)", "def initThread(self):\n thread1 = threading.Thread(target=self.processData)\n self.info(\"starting cod7http thread\")\n thread1.start()\n self.httpthreadinst = thread1", "def __init__(self):\n\n # Every WSGI application must have an application object - a callable\n # object that accepts two arguments. For that purpose, we're going to\n # use a function (note that you're not limited to a function, you can\n # use a class for example). The first argument passed to the function\n # is a dictionary containing CGI-style environment variables and the\n # second variable is the callable object (see PEP 333).\n def application(environ, start_response):\n \"\"\"\n WSGI application object. Returns request status.\n For specific endpoints (e.g. get_with_params), returns\n specific response bodies.\n \"\"\"\n\n response_text = 'Hello World!'\n endpoint = environ['PATH_INFO'][1:]\n\n if endpoint == 'get_with_params':\n #echo back uri parameters as dict...\n response_text = str(dict(urlparse.parse_qsl(environ['QUERY_STRING'])))\n\n #set status code for response based on request...\n requested_status = environ['PATH_INFO'][1:]\n\n status = self.codes.get(requested_status, '200 OK') # HTTP Status\n headers = [('Content-type', 'text/plain')] # HTTP Headers\n start_response(status, headers)\n #print(environ)\n #print('pathInfo: {0}'.format(environ.get('PATH_INFO')))\n #print('queryString: {0}'.format(environ.get('QUERY_STRING')))\n #print('requestMethod:{0}'.format(environ['REQUEST_METHOD']))\n # The returned object is going to be printed\n return response_text\n\n threading.Thread.__init__(self)\n self.httpd = make_server('', 0, application)\n self.address = self.httpd.server_address", "def handler(self):\n msg = self.create_http_request()\n self.send(msg)\n\n if self.file_name == \"/\":\n self.file_name = \"/index.html\"\n else:\n self.file_name = HttpClient.create_file_location(self.file_name)\n\n if self.http_command == \"HEAD\":\n recv_raw, _ = self.recv_header()\n recv = recv_raw.decode(self.format_body)\n recv_with_updated_imgs = self.update_images(recv)\n self.write_to_html_file(recv_with_updated_imgs)\n elif self.http_command == \"PUT\":\n recv_raw = self.recv_all_data()\n if recv_raw != b'':\n recv = recv_raw.decode(self.format_body)\n recv_with_updated_imgs = self.update_images(recv)\n self.write_to_html_file(recv_with_updated_imgs)\n elif self.http_command == \"POST\":\n recv_raw = self.recv_all_data()\n if recv_raw != b'':\n recv = recv_raw.decode(self.format_body)\n recv_with_updated_imgs = self.update_images(recv)\n self.write_to_html_file(recv_with_updated_imgs)\n else: # http_command == \"GET\" or it is a bad request\n recv_raw = self.recv_all_data()\n recv = recv_raw.decode(self.format_body)\n recv_with_updated_imgs = self.update_images(recv)\n self.write_to_html_file(recv_with_updated_imgs)\n\n self.disconnect()\n print(\"[CONNECTION] Client terminated\")", "def main():\n host = ''\n port = 8088\n HTTPServer((host, port), HandleRequests).serve_forever()", "async def main():\n # Create the queue of work\n work_queue = asyncio.Queue()\n\n # Put some work in the queue\n for url in [\n \"http://google.com\",\n \"http://yahoo.com\",\n \"http://linkedin.com\",\n \"http://apple.com\",\n \"http://microsoft.com\",\n \"http://facebook.com\",\n \"http://twitter.com\",\n ]:\n await work_queue.put(url)\n\n browser = HTTPBrowser(2,\n work_queue.qsize(),\n LoadBalancing()\n )\n\n await browser.browser_session(work_queue)", "def run(self):\n while True:\n req = self._requests.get()[1]\n req.start()\n logging.info('Running request %s', req)", "def request_handler(self):\n\n size = 1024\n while True:\n # accept message from client\n clientSock, addr = self.socket.accept()\n self.printLine()\n print('connect to {}'.format(addr))\n\n # print client message content\n msg = clientSock.recv(size).decode('utf-8')\n self.printLine()\n print(\"sent message :\")\n print(msg)\n\n self.printLine()\n self._set_fileName(msg)\n\n # check for existance of file in the server (with name of file.txt)\n data, isFileExist = self._send_file_handler()\n\n self.printLine()\n print('data of file :')\n print(data)\n\n # create header for response message\n if isFileExist:\n header = self._generate_headers(200)\n else:\n header = self._generate_headers(404)\n\n response = header.encode() + data.encode()\n\n # send response in http protocol\n clientSock.send(response)\n\n # close the signal\n clientSock.close()", "def server():", "def server():", "def image_server():\n yield from http_server_thread(ImageHandler)", "def client():", "def run(self):\n while self._num_workers > 0:\n self.server.handle_request()\n self._graph = None", "def run(self):\n try:\n # connect method implementation should return a TaskInfo object\n info = self.connect()\n except:\n # if an error has occurred create a TaskHeadError object\n info = TaskHeadError(self.url.host, 0)\n finally:\n self.data_queue.put(info) # put the result in the queue\n self.ready.set() # and mark the thread as completed", "def run(self):\n print('ClientThread[{}] is running!'.format(self.threadID))\n while True:\n request = self.receive()\n try:\n requestcode = request.split(',')[0]\n if requestcode == 'SYNCFROM':\n self.syncToClient()\n continue\n elif requestcode == 'SYNCTO':\n self.syncFromClient()\n continue\n elif requestcode == 'GETINDEX':\n self.sendIndex()\n continue\n elif requestcode == 'CLOSE':\n print('Connection to {}:{} closed'.format(self.ip,self.port))\n self.tcpsock.close()\n break\n elif not request:\n continue\n else:\n print(request, type(request))\n raise Exception('Unexpected bytes from client.')\n except KeyboardInterrupt:\n sys.exit()\n except Exception as err:\n traceback.print_exc()\n continue\n self.tcpsock.close()\n print('ClientThread[{}] exiting..'.format(self.threadID))", "def handle(self):\n self.request.recv(1024)\n self.request.sendall(pickle.dumps(self.server.lymphocytes_getter()))", "def process_thread(self):", "async def handle_client(self, reader: StreamReader, writer: StreamWriter):\n peername = writer.transport.get_extra_info(\"peername\")\n log.info(\"handle_client : %s\", peername)\n\n try:\n\n remote_host, remote_port, req = await parse_http_request_header(\n reader, writer\n )\n\n remote_reader, remote_writer = await asyncio.open_connection(\n remote_host, remote_port\n )\n if req:\n log.info(\"req: %s\", req)\n remote_writer.write(req)\n\n asyncio.create_task(http_channel(remote_reader, writer))\n\n asyncio.create_task(http_channel(reader, remote_writer))\n\n except Exception as ex:\n log.exception(ex)", "def make_request_thread(self, service, request):\n requestResponse = self.callbacks.makeHttpRequest(service, request)\n print self.helpers.analyzeRequest(requestResponse).getUrl().toString()", "def threadget(self, url, *args):\n\n self.logger.debug(\"Starting a thread to simulate a GET request to %s\" % url)\n api_get = threading.Thread(target=self.get, args=(url, *args,))\n api_get.start()", "def client_request(self, evt):\n threads.deferToThread(self.cli_db.accept, evt)", "def run(self):\n to_client_request_thread = threading.Thread(target=self._dispatch_to_client_request, daemon=True)\n to_client_request_thread.start()\n\n from_client_request_thread = threading.Thread(target=self._dispatch_from_client_request, daemon=True)\n from_client_request_thread.start()\n\n from_client_commands_thread = threading.Thread(target=self._from_client_commands, daemon=True)\n from_client_commands_thread.start()\n\n to_client_update_state_thread = threading.Thread(target=self._to_client_update_state, daemon=True)\n to_client_update_state_thread.start()\n\n server_control_thread = threading.Thread(target=self._server_control, daemon=True)\n server_control_thread.start()\n\n # Wait for threads to finish\n to_client_request_thread.join()\n from_client_request_thread.join()\n from_client_commands_thread.join()\n to_client_update_state_thread.join()\n server_control_thread.join()\n \n # Close server connection\n self._to_client_request.close()\n self._from_client_request.close()", "def run(self):\n self.initialize()\n\n # run the start callback\n tools.run_callback(\"start\", {'request': self._request})\n\n data = self._request.getData()\n pyhttp = self._request.getHttp()\n config = self._request.getConfiguration()\n\n # allow anyone else to handle the request at this point\n handled = tools.run_callback(\"handle\", \n {'request': self._request},\n mappingfunc=lambda x,y:x,\n donefunc=lambda x:x)\n\n if not handled == 1:\n blosxom_handler(self._request)\n\n # do end callback\n tools.run_callback(\"end\", {'request': self._request})", "async def worker(self) -> None:\n session = aiohttp.ClientSession()\n while 1:\n try:\n data = await self.task_queue.get()\n await self.add_page_views(url_sub=data, session=session)\n except Exception as e:\n print(e)\n await asyncio.sleep(delay=self.wait_frequency)", "def run(self):\n # bind to the port\n self.serversocket.bind(('0.0.0.0', PORT))\n print(\"Listening on: \" + Colors.BOLD + HOST + \":\" + str(PORT) + Colors.ENDC)\n print(\"... waiting for a connection\", file=sys.stderr)\n try:\n while True:\n # queue up to 5 requests\n self.serversocket.listen(5)\n clientsocket, addr = self.serversocket.accept()\n print(\"Got a connection from \" + Colors.WARNING + \"%s\" % str(addr) + Colors.ENDC)\n self.clientsocket.append(clientsocket)\n newthread = ClientThread(addr, self.clientsocket[-1])\n newthread.start()\n finally:\n for cskt in self.clientsocket:\n cskt.close()", "def threading_handler(client_connection, client_address):\r\n\r\n while True:\r\n request_method, request_path, request_proto, request_data = parse_request(client_connection)\r\n\r\n if not request_data:\r\n break\r\n\r\n collect_request_data(request_path, client_address)\r\n write_data(access_entries)\r\n\r\n if request_method == 'GET':\r\n if request_path in allowed_path:\r\n requested_file = open(os.getcwd() + request_path, 'rb')\r\n data = requested_file.read(os.path.getsize(os.getcwd() + request_path))\r\n requested_file.close()\r\n request_handler(client_connection, request_path, request_proto, '200', 'OK', data, True)\r\n break\r\n elif '/www' == request_path or '/' == request_path:\r\n request_handler(client_connection, request_path, request_proto, '403', 'Forbidden', '403 Unauthorized',\r\n False)\r\n break\r\n elif '/www/access_entries.html' == request_path:\r\n final_tag = construct_access_entries_data()\r\n request_handler(client_connection, '/www/access_list.txt', request_proto, '200', 'OK', final_tag, True)\r\n break\r\n else:\r\n request_handler(client_connection, request_path, request_proto, '404', 'Not found', '404 Not Found',\r\n False)\r\n break\r\n else:\r\n request_handler(client_connection, request_path, request_proto, '405', 'Method Not Allowed',\r\n '405 Method Not Allowed', False)\r\n\r\n client_connection.close()\r\n print (\" --- Connection Closed --- \", client_address)", "def handle(self):\n global log_th\n sent = 1\n msg_body = ''\n get_recv = True\n get_data = True\n empty_check = 0\n # Looping session requests\n while 1:\n try:\n # If enabled sleep feauture\n if self.sleep_between != 0:\n time.sleep(self.sleep_between)\n # If no answer feauture\n if self.no_answer != 0:\n time.sleep(1)\n continue\n # Changing receive size if receiving data part\n if sent == 3 or sent == 4:\n data = self.request.recv(self.data_recv_size)\n else:\n data = self.request.recv(self.std_recv_size)\n if sent != 5:\n self.command_w_th_inc.write_commands(\n data=bytes(data).decode().encode('ascii', 'ignore')\n .decode().rstrip(), qid=self.message_id)\n # To many empty line received, closed thread\n if self.func_empty_check(data):\n if empty_check >= 3:\n break\n else:\n empty_check += 1\n continue\n # Logging session requests if steps not equal to data section\n if sent != 5:\n log_th.log_info('{} - {} client executed : \"{}\"'.format(\n self.message_id, self.client_ip, bytes(data).decode().rstrip()))\n # Break the loop\n if self.func_quit(data):\n break\n except Exception as ae:\n log_th.log_warning('{} encounter an error from {} thread : {}'.format(\n self.client_ip, threading.current_thread().name, str(ae)))\n break\n else:\n try:\n # Checking the all steps\n if self.func_rset(data):\n sent = 2\n continue\n if self.func_auth(data):\n continue\n if self.func_auth_plain(data):\n continue\n if self.func_starttls(data):\n continue\n # Starting the sent steps\n # Ehlo/hello\n if sent == 1:\n if self.func_ehlo(data) or self.func_helo(data):\n sent += 1\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('command not found'))\n # Mail from, rcpt to, data\n elif sent == 2:\n if bytes(data).decode().encode('ascii', 'ignore').decode().rstrip().splitlines().__len__() > 2:\n get_data = False\n get_recv = False\n elif bytes(data).decode().encode('ascii',\n 'ignore').decode().rstrip().splitlines().__len__() > 1:\n get_recv = False\n if self.func_from(data, get_recv):\n sent += 1\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('mail from'))\n if not get_recv:\n if self.func_to(data, get_recv, get_data):\n sent += 1\n get_recv = True\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('rcpt to'))\n if not get_data:\n if self.func_data(data, get_recv, get_data):\n sent += 1\n get_data = True\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('data'))\n # rcpt to and data\n elif sent == 3:\n if bytes(data).decode().encode('ascii', 'ignore').decode().rstrip().splitlines().__len__() > 1:\n get_data = False\n if self.func_to(data, get_recv, get_data):\n sent += 1\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('rcpt to'))\n if not get_data:\n if self.func_data(data, get_recv, get_data):\n sent += 1\n get_data = True\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('data'))\n # data\n elif sent == 4:\n if self.func_to(data, get_recv, get_data):\n continue\n if self.func_data(data, get_recv, get_data):\n sent += 1\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('data'))\n # content writing to file (if enabled) and quit statement\n elif sent == 5:\n data_list = bytes(data).decode().split('\\r\\n')\n for line in data_list:\n if str(line) == '.':\n if self.mail_save_enable != 0:\n out_file = open(self.mail_save_path + '/'\n + self.message_id + '.eml', 'w')\n out_file.write(msg_body)\n out_file.close()\n self.func_data_ok()\n sent = 1\n break\n else:\n msg_body += str(line) + '\\r\\n'\n except IndexError:\n if sent == 2:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('mail from'))\n elif sent == 3:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('rcpt to'))", "def thread(self, request, *args, **kwargs):\n thread = self.get_thread()\n protocol = request.is_secure() and 'wss://' or 'ws://'\n url = protocol + request.get_host() + settings.WEBSOCKET_URL + 'thread-%i' % thread.id\n return Response(url)", "def run(self):\n httpd = HTTPServer((self.host, self.port), self._Handler)\n sa = httpd.socket.getsockname()\n serve_message = \"Serving HTTP on {host} port {port} (http://{host}:{port}/) ...\"\n print(serve_message.format(host=sa[0], port=sa[1]))\n try:\n httpd.serve_forever()\n except KeyboardInterrupt:\n print(\"\\nKeyboard interrupt received, exiting.\")\n httpd.shutdown()", "def run(self):\n self.log_id = 0\n\n # all active websockets and their state\n self.websocks = {}\n\n # all active python interpreter sessions\n self.pysessions = {}\n\n if self.DISABLE_REQUESTS_DEBUG_LOGS:\n disable_requests_debug_logs()\n\n self.threadpool = ThreadPool(self.THREADPOOL_WORKERS)\n\n self.api = None\n\n # tornado app object\n base_handlers = self.prepare_base_handlers()\n handlers = self.prepare_handlers()\n self.template_loader = TemplateLoader([resolve_path(self.TEMPLATE_PATH)])\n _ = self.prepare_template_loader(self.template_loader)\n if _ is not None:\n self.template_loader = _\n\n shclass = CustomStaticFileHandler\n shclass.PATHS.append(resolve_path(self.STATIC_PATH))\n _ = self.prepare_static_paths(shclass.PATHS)\n if _ is not None:\n shclass.PATHS = _\n\n self.static_handler_class = shclass\n\n self.nav_tabs = [(\"Home\", \"/\")]\n if self.args.debug:\n self.nav_tabs += [(\"Console\", \"/console\"), (\"Logs\", \"/logs\")]\n self.nav_tabs = self.prepare_nav_tabs(self.nav_tabs)\n\n settings = {\n \"static_path\": \"<DUMMY-INEXISTENT-PATH>\",\n \"static_handler_class\": self.static_handler_class,\n \"template_loader\": self.template_loader,\n \"compress_response\": True,\n \"debug\": self.args.debug,\n }\n\n all_handlers = handlers + base_handlers\n self.app = self.APP_CLASS(**settings)\n self.app.add_handlers(self.VIRTUAL_HOST, all_handlers)\n\n sys.funcserver = self.app.funcserver = self\n\n self.api = self.prepare_api()\n if self.api is not None and not hasattr(self.api, \"log\"):\n self.api.log = self.log\n\n if self.args.port != 0:\n self.app.listen(self.args.port)\n\n tornado.ioloop.IOLoop.instance().start()", "def start(self):\n threading.Thread(target=self.serve_forever).start()", "def main():\n server = ThreadedServer(MasterControllerService, port=5000)\n server.start()", "def getServerInThread(data, onlyOnce=False):\n\tclass Handler(BaseHTTPServer.BaseHTTPRequestHandler):\n\t\tdef do_GET(self):\n\t\t\tself.wfile.write(data)\n\t\tdo_POST = do_GET\n\t\n\tport = 34000\n\thttpd = BaseHTTPServer.HTTPServer(('', port), Handler)\n\n\tif onlyOnce:\n\t\tserve = httpd.handle_request\n\telse:\n\t\tserve = httpd.serve_forever\n\n\tt = threading.Thread(target=serve)\n\tt.setDaemon(True)\n\tt.start()\n\treturn httpd, t, \"http://localhost:%s\"%port", "def run(self, host):\n\n # Trying to connect to given host.\n while self.csrftoken == None:\n try:\n self.client.get(self.url)\n self.csrftoken = self.client.cookies['csrftoken']\n self.cookies = dict(self.client.cookies)\n except:\n pass\n\n while True:\n if self.queue.empty():\n self.sending = False\n time.sleep(1)\n else:\n self.sending = True\n\n # sending operations when queue reached batching size\n # after about 5 seconds send anyway...\n qsize = self.queue.qsize()\n if qsize < BATCH_SIZE and qsize != self.old_qsize:\n time.sleep(1)\n self.old_qsize = qsize\n elif qsize < BATCH_SIZE and qsize == self.old_qsize and self.count < 5:\n time.sleep(1)\n self.count += 1\n else:\n data_list = []\n while not self.queue.empty():\n op = self.queue.get()\n data_list.append(op)\n # just for testing\n # shuffle(data_list)\n data_dict = dict(list=json.dumps(data_list)) \n\n while True:\n try:\n self.send_post(data_dict)\n break\n except requests.exceptions.RequestException:\n time.sleep(2)\n print \"[THREAD \" + str(host['id']) + \"] Can't reach host \" + str(host['port'])\n continue\n\n self.count = 0", "def do_GET(self):\r\n self.logger.debug(\"SseHTTPRequestHandler(Thread-%s): do_GET, path=%s\",\r\n threading.current_thread().ident, self.path)\r\n threading.current_thread().name = (self.path + \"_thread_\" +\r\n str(threading.current_thread().ident))\r\n if self.path == SseHTTPRequestHandler.eventsource_path:\r\n self._start_event_stream()\r\n else:\r\n http.server.SimpleHTTPRequestHandler.do_GET(self)", "def __image_request_handler(self):\n self.__logger.info(\"Image Request Handling Thread started\")\n ticker = Ticker(2)\n while self._running:\n timeout = ticker.end_tick(False)\n try:\n task = self.__image_queue.get(True, timeout)\n except Queue.Empty:\n ticker.start_tick()\n continue\n\n # There is a task to process\n ticker.start_tick()\n source, connection = task\n\n # Check if the connection has been closed. If it was,\n # do not bother processing the request.\n if not connection.connected():\n self.__logger.info(\"Skipping request for image of source %s\" \\\n \" because requesting client disconnected\" \\\n % source)\n self.__image_queue.task_done()\n continue \n\n # Obtain new image\n error = \"No image available\"\n image = None\n mtime = time.time()\n if source in self.__video_modules:\n try:\n mtime, image = self.__get_image(source)\n except Exception as err:\n error = \"Obtaining image failed: %s\" % repr(err)\n else:\n error = \"Video source %s has not been started\" % source\n\n if connection.connected():\n if image:\n # Valid image was obtained\n img_str = image.tostring()\n data = {'name': 'image',\n 'source': source,\n 'time': mtime,\n 'shape': (image.width, image.height),\n 'depth': image.depth,\n 'nChannels': image.nChannels}\n else:\n # An error occured, notify the vision module\n self.__logger.info(\"Failed to obtain image for source %s. \"\\\n \" Error message: %s\" % (source, error))\n img_str = \"\"\n data = {'name': 'image',\n 'source': source,\n 'time': mtime,\n 'error': error}\n # Send the data to the vision module.\n if not connection.sendall(data, img_str):\n self.__logger.warning(\"Failed to send data to client. \" \\\n \"Probably disconnected\")\n else:\n self.__logger.info(\"Image of source %s obtained but not \" \\\n \"sending because requesting client \" \\\n \"disconnected\" % source)\n self.__image_queue.task_done()\n self.__logger.info(\"Image Request Handling Thread ended\")", "def _worker(self, args):\n pass", "def run(self):\n self._connection = self.open_connection()\n self._connection.ioloop.start()", "def handle_client(self):\n e = threading.Event()\n reg_t = threading.Thread(target=self.handle_reg_client, args=(e,))\n stream_t = threading.Thread(target=self.handle_stream_client,\n args=(e,))\n reg_t.start()\n stream_t.start()", "def start(self):\n if self._params.arq:\n s = udpsocket.UDPSocket()\n else:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n try:\n s.bind(('', self._params.port))\n s.listen(5)\n self.check_and_print_debug_message(\n 'HTTP Server is running on port: ' + str(self._params.port))\n while True:\n c, addr = s.accept()\n Thread(\n target=self.process_http_request,\n args=(c, addr)\n ).start()\n except Exception as e:\n self.check_and_print_debug_message(str(e))\n s.close()\n finally:\n self.check_and_print_debug_message(\n \"HTTP Server connection is closed.\")\n s.close()", "def serve_forever(self):\n\t\tself.serverActive = 1\n\t\twhile self.serverActive:\n\t\t\trfds = self.procHelper.ReadFDs\n\t\t\tsockFile = self.socket.fileno()\n\t\t\trfds.append(sockFile)\n\t\t\ttry_select = 1\n\t\t\t#print \"HTTP RFDS:\", rfds\n\t\t\tif len(rfds) == 1:\n\t\t\t\tprint \"RPC-HTTP: We are lost our parents or socket closed.. Exiting Normally !\"\n\t\t\t\tself.procHelper.exit()\n\t\t\t\tprint \"Error.. We can't exit :(\"\n\n\t\t\twhile try_select:\n\t\t\t\ttry:\n\t\t\t\t\trds = select.select(rfds, [], [], 3)\n\t\t\t\t\ttry_select = 0\n\t\t\t\t\t#print \"Connection from:\", rds, rfds\n\t\t\t\texcept:\n\t\t\t\t\tprint \"Select Exception. Can be a SIGCHLD ?\"\n\n\t\t\tif rds != None and len(rds) == 1:\n\t\t\t\tprint \"RPC-HTTP: We are lost our parents or socket closed.. Exiting Normally !\"\n\t\t\t\tself.procHelper.exit()\n\t\t\txsock = 0\n\t\t\tif rds:\n\t\t\t\tfor rfd in rds[0]:\n\t\t\t\t\tif rfd == sockFile:\n\t\t\t\t\t\txsock = rfd\n\t\t\t\t\t\t#print \"HTTP Main Loop: Socket I/O\"\n\t\t\t\t\telse:\n\t\t\t\t\t\tio = self.procHelper.readFD2io(rfd)\n\t\t\t\t\t\tsrcpid = self.procHelper.rfd2PID(rfd)\n\t\t\t\t\t\tppid = self.procHelper.parentRFD()\n\t\t\t\t\t\tcmd = io.getCommand()\n\t\t\t\t\t\t#print self.procHelper.myPID, os.getpid(), \"HTTP Main Loop: Read a command:\", str(cmd)[:20], ppid, srcpid\n\t\t\t\t\t\tif io.cmdrpoll.poll(1)[0][1] & select.POLLHUP:\n\t\t\t\t\t\t\tself.procHelper.removeChild(srcpid)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tpkPid = int(cmd[0])\n\t\t\t\t\t\t\tpkTid = int(cmd[1])\n\t\t\t\t\t\t\tpkData = cmd[3]\n\t\t\t\t\t\t\tcommand = cmd[2]\n\t\t\t\t\t\t\t#print \"ACCEPT READ FROM PIPE:\",rfd, ppid, str(cmd)[:20]\n\t\t\t\t\t\t\tif rfd != ppid:\n\t\t\t\t\t\t\t\t#print \"Send To Parent:\", pkPid, command\n\t\t\t\t\t\t\t\tself.procHelper.checkUpCmd(dir=\"P\", srcpid=srcpid, cmd=command, pktpid=pkPid, tid=pkTid, data=pkData)\n\t\t\t\t\t\t\t\tif cmd:\n\t\t\t\t\t\t\t\t\tself.procHelper.sendParentCommand(command, pkPid, pkTid, pkData)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tif cmd:\n\t\t\t\t\t\t\t\t\tself.procHelper.checkUpCmd(dir=\"C\", srcpid=srcpid, cmd=command, pktpid=pkPid, tid=pkTid, data=pkData)\n\t\t\t\t\t\t\t\t\t#print \"Send To Child:\", pkPid, command\n\t\t\t\t\t\t\t\t\tself.procHelper.sendCommand(pkPid, command, pkPid, pkTid, pkData)\n\t\t\t\tif xsock > 0:\n\t\t\t\t\tself.handle_request()\n\t\t\telse:\n\t\t\t\tfor i in l:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tx = os.waitpid(i, os.WNOHANG)\n\t\t\t\t\t\tprint \"Child info:\", i, x\n\t\t\t\t\t\tif os.WIFEXITED(x[1]):\n\t\t\t\t\t\t\tprint \"Child Deleted:\", i, x\n\t\t\t\t\t\t\tdel l[p]\n\t\t\t\t\texcept:\n\t\t\t\t\t\tpass", "def process_request(self, request, client_address):\n\t\t#traceback.print_stack()\n\t\tself.collect_children()\n\t\tchldPID = self.procHelper.makeChild()\n\t\tprint \"HTTP New Connection: Go Fork for accept: \", self.procHelper.PID2io(chldPID), chldPID\n\t\tparentrpid = os.getpid()\n\t\tpid = os.fork()\n\t\tif pid:\n\t\t\t# Parent process\n\t\t\tprint \"HTTPD Conn Parent:\", self.procHelper.myPID ,chldPID\n\t\t\tself.procHelper.setIODebug(chldPID, 0, \"HTTP->Connection\")\n\t\t\tself.procHelper.initForParent(chldPID)\n\t\t\tself.procHelper.registerChild(chldPID, self.procHelper.myPID)\n\t\t\tsigset = self.procHelper\n\t\t\tself.procHelper.sendCommand(child = chldPID, command = \"INTU_MCL\", PID = chldPID, TID = 0, data = None)\n\t\t\ttry:\n\t\t\t\tselect.select([self.procHelper.PID2rfd(chldPID)], [],[], 0.1)\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\ttp = self.procHelper.readConn(chldPID)\n\t\t\tpid = int(tp[3])\n\t\t\tl = sigset.getUserData()\n\t\t\tif l == None:\n\t\t\t\tl = []\n\t\t\tl.append(pid)\n\t\t\tsigset.setUserData(l[:])\n\t\t\tsigchldhandler(0, sigset)\n\t\t\tif self.active_children is None:\n\t\t\t\tself.active_children = []\n\t\t\tself.active_children.append(pid)\n\t\t\tself.close_request(request)\n\t\t\t#print \"PID\", chldPID, \"created ppid:\", pid, sigset.getUserData()\n\t\t\treturn\n\t\telse:\n\t\t\t# Child process.\n\t\t\t# This must never return, hence os._exit()!\n\t\t\tself.procHelper.setIODebug(chldPID, 0, \"Connection->HTTP\")\n\t\t\tgloPIO = self.procHelper.PID2io(chldPID)\n\t\t\tgloPPid = self.procHelper.myPID + 0\n\t\t\tnew_ph = CHLDHELPER.childHelper(gloPIO, gloPPid, chldPID)\n\t\t\tnew_ph.setIODebug(chldPID, 0, \"Connection->HTTP\")\n\t\t\tnew_ph.initForChild(gloPPid)\n\t\t\tself.procHelper = new_ph\n\t\t\tself.procHelper.parentppid = parentrpid\n\t\t\tprint \"HTTPD Conn Child:\", chldPID, os.getpid(), self.procHelper.parentppid, self.procHelper.myPID, self.procHelper.gloPPid\n\t\t\ttry:\n\t\t\t\t#print \"I am new child:\", os.getpid()\n\t\t\t\tself.procHelper.waitForParentCmd(timeout = 1)\n\t\t\t\tpcmd = self.procHelper.getParentCommand()\n\t\t\t\tself.procHelper.sendParentCommand(cmd = \"IRSU_PPID\", pid = chldPID, tid = 0, data=str(os.getpid()))\n\t\t\t\tself.finish_request(request, client_address)\n\t\t\t\tos._exit(0)\n\t\t\texcept:\n\t\t\t\ttry:\n\t\t\t\t\tself.handle_error(request, client_address)\n\t\t\t\tfinally:\n\t\t\t\t\tos._exit(1)", "def __ServiceClient(self,Client):\n\t\twhile True:\n\t\t\tDataClient = Client.recv(1024)\n\t\t\tprint(DataClient)\n\t\t\t# your source code here\n\t\t\tmessage = DataClient\n\t\t\t# data to be sent to api\n\t\t\tdata = {'message': message}\n\t\t\t# sending post request and saving response as response object\n\t\t\tr = requests.post(url = self.API_ENDPOINT, data = data)\n\t\t\t# extracting response text\n\t\t\t#pastebin_url = r.text\n\t\t\t#print(\"The pastebin URL is:%s\"%pastebin_url)", "def handle(self):\n data = self.request.recv(1024)\n self.request.send(data)", "def run(self):\n print(\"Client: Started\", flush=True)\n ack = Packet()\n ack_data = b''\n\n request = \"download\"\n req_pkt = Packet(0, request)\n req_packed = req_pkt.pkt_pack()\n\n self.client_socket.sendto(req_packed, self.server_addr)\n\n ack_data = self.client_socket.recv(self.pkt_size)\n ack.pkt_unpack(ack_data)\n\n self.recv_img(self.img_save_to)\n\n ack = Packet()\n ack_data = b''\n request = \"upload\"\n req_pkt = Packet(0, request)\n req_packed = req_pkt.pkt_pack()\n\n self.client_socket.sendto(req_packed, self.server_addr)\n\n ack_data = self.client_socket.recv(self.pkt_size)\n ack.pkt_unpack(ack_data)\n\n self.send_img(self.img_to_send)\n\n sleep(5)\n\n ack = Packet()\n ack_data = b''\n request = \"exit\"\n req_pkt = Packet(0, request)\n req_packed = req_pkt.pkt_pack()\n\n self.client_socket.sendto(req_packed, self.server_addr)\n\n ack_data = self.client_socket.recv(self.pkt_size)\n ack.pkt_unpack(ack_data)\n\n print(\"Client: Exiting...\")\n # close socket when finished\n self.client_socket.close()", "async def worker(\n self, queue: asyncio.Queue, session: aiohttp.ClientSession\n ) -> None:\n while True:\n url = await queue.get()\n await self.fetch(url, session)\n queue.task_done()", "def serve(self):\n\t\tself.keep_running=1\n\t\tif self.debug:\n\t\t\tprint \"server started\"\n\t\ttry:\n\t\t\twhile self.keep_running:\n\t\t\t\tself.handle_request()\n\t\tfinally:\n\t\t\tif self.debug:\n\t\t\t\tprint \"server finished\"\n\t\t\tself.keep_running=0\n\t\t\tself.close()", "def start(self):\n #Starting the thread able to handle the administrator request\n t2 = threading.Thread(target=self.handleServer)\n t2.start()\n self.ActiveThreads.append(t2)\n #Listen continously\n while True:\n self.server.listen(50)\n self.Log.log(\"Waiting for connections...\")\n #Obtaining the parameters like the socket and the address/port of the incoming connection\n (conn, (ip,port)) = self.server.accept()\n #Creating a new thread able to handle the new connection with the client\n newClient = ClientHandler(conn,ip,port,self.DB,self.Users,self.Log,self.XML);\n #Starting the new thread\n newClient.start()\n #Appending the thread to the list of active threads\n self.ActiveThreads.append(newClient)", "def run(self):\n self.debug(__name__ + \".run(): self.threadName=\" + str(self.threadName) + \"\\n\")\n self.debug(__name__ + \".run(): self.statusFile=\" + str(self.statusFile) + \"\\n\")\n self.debug(__name__ + \".run(): self.recvData=\" + str(self.recvData) + \"\\n\")\n self.debug(__name__ + \".run(): self.socketConn=\" + str(self.socketConn) + \"\\n\")\n\n status = True\n data = self.getFileData()\n self.mySocketObj.serverSend(self.socketConn, data)\n if self.socketConn: self.socketConn.close()\n # self.updateCounts()\n self.status = status\n if status:\n self.appendMsg(__name__ + \".run(): Completed successfully for \" + str(self.threadName) + \"\\n\")\n else:\n self.appendMsg(__name__ + \".run(): Failed for \" + str(self.threadName) + \"\\n\")\n # Endif", "def _make_thread(self):\r\n pass", "def _process_request(self):\n if not self._requests:\n if self._stream:\n self._stream.close()\n self._stream = None\n if self._processing:\n self._processing = False\n Engine.instance().stop()\n return\n\n request = self._requests[0]\n\n request.append(\n Engine.instance().defer(request[5], self._request_timeout, request))\n\n port = request[2].port\n if not port:\n if request[2].scheme.lower() == 'https':\n port = 443\n else:\n port = 80\n\n host = \"%s:%d\" % (request[2].hostname, port)\n\n if self._stream:\n if not self._server == host.lower() or not \\\n self._is_secure == (request[2].scheme.lower() == 'https'):\n self._stream.end()\n return\n\n if not self._stream:\n # Store the current server.\n self._server = host.lower()\n\n # Create a Stream, hook into it, and connect.\n self._stream = Stream()\n\n self._stream.on_close = self._on_close\n self._stream.on_connect = self._on_connect\n\n self._is_secure = request[2].scheme.lower() == 'https'\n if self._is_secure:\n raise Exception(\"SSL has not yet been implemented in this version of Pants.\")\n self._stream.startTLS()\n\n self._stream.connect((request[2].hostname, port))\n return\n\n # If we got here, we're connected, and to the right server. Do stuff.\n self._stream.write('%s %s HTTP/1.1%s' % (request[0], request[8], CRLF))\n for k, v in request[3].iteritems():\n self._stream.write('%s: %s%s' % (k, v, CRLF))\n\n if request[4]:\n self._stream.write('%s%s' % (CRLF, request[4]))\n else:\n self._stream.write(CRLF)\n\n # Now, wait for a response.\n self._stream.on_read = self._read_headers\n self._stream.read_delimiter = DOUBLE_CRLF", "def run(self):\n self.logger.debug(\"Resource manager main thread started\")\n\n while not self._stop_flag:\n try:\n self._handle_requests()\n self._accept_requests()\n\n except Exception as ex:\n self.logger.exception(\"Resource manager failed. \"\n \"Reason: %s\", ex)\n\n self.logger.debug(\"Resource manager thread is down\")", "def done(self, *arg, **kw):\r\n\r\n # ----------------------------------------\r\n # persistent connection management\r\n # ----------------------------------------\r\n\r\n # --- BUCKLE UP! ----\r\n\r\n connection = http_server.get_header(http_server.CONNECTION,self.header)\r\n connection = connection.lower()\r\n\r\n close_it = 0\r\n wrap_in_chunking = 0\r\n globbing = 1\r\n\r\n if self.version == '1.0':\r\n if connection == 'keep-alive':\r\n if not 'Content-Length' in self:\r\n close_it = 1\r\n else:\r\n self['Connection'] = 'Keep-Alive'\r\n else:\r\n close_it = 1\r\n elif self.version == '1.1':\r\n if connection == 'close':\r\n close_it = 1\r\n elif not 'Content-Length' in self:\r\n if 'Transfer-Encoding' in self:\r\n if not self['Transfer-Encoding'] == 'chunked':\r\n close_it = 1\r\n elif self.use_chunked:\r\n self['Transfer-Encoding'] = 'chunked'\r\n wrap_in_chunking = 1\r\n # globbing slows down tail -f output, so only use it if\r\n # we're not in chunked mode\r\n globbing = 0\r\n else:\r\n close_it = 1\r\n elif self.version is None:\r\n # Although we don't *really* support http/0.9 (because\r\n # we'd have to use \\r\\n as a terminator, and it would just\r\n # yuck up a lot of stuff) it's very common for developers\r\n # to not want to type a version number when using telnet\r\n # to debug a server.\r\n close_it = 1\r\n\r\n outgoing_header = producers.simple_producer(self.build_reply_header())\r\n\r\n if close_it:\r\n self['Connection'] = 'close'\r\n\r\n if wrap_in_chunking:\r\n outgoing_producer = deferring_chunked_producer(\r\n deferring_composite_producer(self.outgoing)\r\n )\r\n # prepend the header\r\n outgoing_producer = deferring_composite_producer(\r\n [outgoing_header, outgoing_producer]\r\n )\r\n else:\r\n # prepend the header\r\n self.outgoing.insert(0, outgoing_header)\r\n outgoing_producer = deferring_composite_producer(self.outgoing)\r\n\r\n # hook logging into the output\r\n outgoing_producer = deferring_hooked_producer(outgoing_producer,\r\n self.log)\r\n\r\n if globbing:\r\n outgoing_producer = deferring_globbing_producer(outgoing_producer)\r\n\r\n self.channel.push_with_producer(outgoing_producer)\r\n\r\n self.channel.current_request = None\r\n\r\n if close_it:\r\n self.channel.close_when_done()", "def run(self):\n ioloop.IOLoop.current().start()", "def _run(self) -> None:\n\n log.debug(\"Volumio 2 Web Service client starting ...\")\n log.debug(f\"Connecting to Volumio 2 Web Service on {self._server}:{self._port}\")\n\n with SocketIO(self._server, self._port) as socketIO:\n log.debug(\"Connected to Volumio 2 Web Service\")\n socketIO.on(\"pushState\", self._on_state_response)\n socketIO.emit(\"GetState\", on_GetState_response)\n\n # Request initial values\n socketIO.emit(\"getState\", \"\")\n\n while not self._stop_event.is_set():\n # rely on Volumio to push states mostly, but request an update\n # at a low frequency to get some lacy update.\n socketIO.wait_for_callbacks(seconds=10)\n socketIO.emit(\"getState\", \"\")", "def run(self):\n \n try:\n logging.info('Thread:{} starting'.format(self.thread_id))\n\n self.crawl_url()\n self.parse_html()\n except IOError as e:\n self.thread_post_processing()\n logging.error('CrawlUrlError url:{} msg:{}'.format(self.url, e))\n\n self.thread_post_processing()", "def process_request_thread(self):\n while True:\n try:\n request, client_address = self._request_queue.get(\n timeout=self.timeout_on_get,\n )\n except Queue.Empty:\n # You wouldn't believe how much crap this can end up leaking,\n # so we clear the exception.\n sys.exc_clear()\n if self._shutdown_event.isSet():\n return\n continue\n try:\n self.finish_request(request, client_address)\n self.shutdown_request(request)\n except:\n self.handle_error(request, client_address)\n self.shutdown_request(request)\n self._request_queue.task_done()", "def proxy_thread(self, conn, client_addr):\n req = conn.recv(self.config['MAX_REQUEST_LENGTH'])\n line1 = req.split(b'\\n')[0]\n x = line1.split(b' ')\n if len(x) > 1:\n url = x[1]\n else:\n return\n \n # Check if the file requested is css/js file\n if bool(self.regex.findall(url.decode())):\n # send cached version if present\n if self.cache.retrieve(url.decode()):\n time_elapsed = time()\n conn.send(self.cache.retrieve(url.decode()).data)\n conn.close()\n time_elapsed = time() - time_elapsed\n print('Retrieving from cache: ' + url.decode())\n print('Time taken: {}'.format(time_elapsed))\n else:\n self.log(\"INFO\", client_addr, \"Request: \" + str(line1))\n time_elapsed = time()\n http_pos = url.find(b'://')\n if http_pos == -1:\n temp = url\n else:\n temp = url[(http_pos + 3):]\n port_pos = temp.find(b':')\n webserver_pos = temp.find(b'/')\n if webserver_pos == -1:\n webserver_pos = len(temp)\n\n webserver = \"\"\n port = -1\n if port_pos == -1 or webserver_pos < port_pos:\n port = 80\n webserver = temp[:webserver_pos]\n else:\n port = int((temp[port_pos + 1:])[:webserver_pos-port_pos-1])\n webserver = temp[:port_pos]\n\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(self.config['CONNECTION_TIMEOUT'])\n s.connect((webserver, port))\n s.sendall(req)\n while True:\n data = s.recv(self.config['MAX_REQUEST_LENGTH'])\n if len(data) > 0:\n self.cache.add(url.decode(), data)\n conn.send(data)\n else:\n break\n s.close()\n conn.close()\n\n except socket.error as error_msg:\n self.log('ERROR', client_addr, error_msg)\n if s:\n s.close()\n if conn:\n conn.close()\n self.log(\"WARNING\", client_addr, \"Peer Reset \" + str(line1))\n print('Recieving from website: ' + url.decode())\n time_elapsed = time() - time_elapsed\n print('Time Elapsed: {}'.format(time_elapsed))\n else:\n http_pos = url.find(b'://')\n if http_pos == -1:\n temp = url\n else:\n temp = url[(http_pos + 3):]\n port_pos = temp.find(b':')\n webserver_pos = temp.find(b'/')\n if webserver_pos == -1:\n webserver_pos = len(temp)\n\n webserver = \"\"\n port = -1\n if port_pos == -1 or webserver_pos < port_pos:\n port = 80\n webserver = temp[:webserver_pos]\n else:\n port = int((temp[port_pos + 1:])[:webserver_pos - port_pos - 1])\n webserver = temp[:port_pos]\n\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(self.config['CONNECTION_TIMEOUT'])\n s.connect((webserver, port))\n s.sendall(req)\n while True:\n data = s.recv(self.config['MAX_REQUEST_LENGTH'])\n if len(data) > 0:\n conn.send(data)\n else:\n break\n s.close()\n conn.close()\n except socket.error:\n if s:\n s.close()\n if conn:\n conn.close()\n self.log(\"WARNING\", client_addr, \"Peer Reset \" + str(line1))", "def __init__(self, client_ident):\n\t\tthreading.Thread.__init__(self, None)\n\t\tself.client_ident\t\t= client_ident\n\t\tself.start()", "def handle(self):\n\n global batch_no\n\n if self.client_address[0] in workers: # Established worker\n # self.request is a TCP socket connected to the client\n print('Worker connected... handling')\n self.data = recv_all(self.request, bytes_expected)\n\n try:\n network_weight_updates = deque(struct.unpack('{}f'.format(num_weights), self.data))\n print('\\tReceived data unpacked')\n except:\n print('\\tError unpacking data')\n else:\n print('\\tTraining master network')\n nn.train_master(network_weight_updates)\n batch_no += 1\n print('\\tCompleted batch #{} of {}'.format(batch_no, total_batches))\n\n print('\\tGetting network weights')\n network_weights = nn.get_weights()\n print('\\tPacking response')\n response = struct.pack('{}f'.format(num_weights), *network_weights)\n print('\\tSending back weights')\n self.request.sendall(response)\n\n elif self.client_address[0] in validators: # Established validator\n print('Validator connected... handling')\n print('\\tGetting network weights')\n network_weights = nn.get_weights()\n print('\\tPacking response')\n response = struct.pack('{}f'.format(num_weights), *network_weights)\n print('\\tSending back weights')\n self.request.sendall(response)\n\n else: # New node connected\n # Initialize client into set of workers\n print('Contacted by new node...')\n self.data = self.request.recv(1024).strip()\n print('\\tReceived init string')\n\n # New nodes should send b'w' to become a worker, or b'v' to become a validator\n if self.data == b'w':\n print('\\tNew WORKER node... handling')\n workers.add(self.client_address[0]) # Add IP to list of workers\n elif self.data == b'v':\n print('\\tNew VALIDATOR node... handling')\n validators.add(self.client_address[0]) # Add IP to list of validators\n else:\n print('\\tUnknown node request! Moving on...')\n return\n\n # Gather current network weights, pack as bytes, and send back to client\n network_weights = nn.get_weights()\n response = struct.pack('{}f'.format(num_weights), *network_weights)\n print('\\tSending network weights')\n self.request.sendall(response)\n\n print('\\tDONE with node')\n\n if batch_no > total_batches:\n # Done with desired number of batches! Finalize network training\n save_pkl(nn, 'saved_nn/iris_nn_relu_16n8n_100e_16b_l2.pkl') # Save trained network\n\n print('Eval training...')\n eval_tr = nn.eval_classification(train)\n print('Eval validation...')\n eval_v = nn.eval_classification(validation)\n print('Eval testing...')\n eval_t = nn.eval_classification(test)\n\n print('Batches: {}\\tTrain: {:.3f}\\tValidation: {:.3f}\\tTest: {:.3f}'\n .format(batch_no, eval_tr, eval_v, eval_t))\n exit()", "def run(self):\r\n self.rpc_server.serve_forever(0.5)", "def doctest_BackgroundWorkerThread():", "def workerProcess(self):\r\n\r\n if self.postForkCallback:\r\n self.postForkCallback()\r\n\r\n while self.isRunning.value == True:\r\n try:\r\n client = self.serverTransport.accept()\r\n self.serveClient(client)\r\n except (KeyboardInterrupt, SystemExit):\r\n return 0\r\n except Exception, x:\r\n logging.exception(x)", "def main():\n\n httpd = BaseHTTPServer.HTTPServer(\n ('localhost', int(ADDON.getSetting(\"port\"))),\n StupidHTTPRequestHandler)\n httpd_thread = threading.Thread(target=httpd.serve_forever)\n httpd_thread.start()\n\n monitor = xbmc.Monitor()\n \n while not monitor.abortRequested():\n # Sleep/wait for abort for 10 seconds\n if monitor.waitForAbort(10):\n # Abort was requested while waiting. We should exit\n break\n\n httpd.shutdown()\n httpd.server_close()", "def http_server():\n #Set up the server socket.\n server_socket = socket.socket(\n socket.AF_INET,\n socket.SOCK_STREAM,\n socket.IPPROTO_IP)\n\n try:\n #Connect the server socket.\n server_socket.bind(('127.0.0.1', 50000))\n server_socket.listen(1)\n\n #Loop indefinitely while waiting for connections.\n\n while True:\n try:\n conn, addr = server_socket.accept()\n msg = receive_message(conn)\n uri = parse_request(msg)\n resource, mimetype = map_uri(uri)\n\n except (Error404, ParseException) as err:\n response = build_response(err.message, 'text/plain', err.code)\n\n except:\n response = build_response(\"500 Internal Server Error\",\n 'text/plain', '500')\n\n else:\n response = build_response(resource, mimetype)\n\n finally:\n conn.sendall(response)\n # conn.shutdown(socket.SHUT_WR)\n conn.close()\n\n finally:\n #Make sure the socket is closed if we can't continue.\n server_socket.close()", "def do_GET(self):\n server_ip = Setup.parse_options()['ip_address']\n uri = \"http://\" + server_ip + self.path\n response = urllib.urlopen(uri)\n self.copyfile(response, self.wfile)\n headers = self.generate_header_dic(self.headers.headers)\n ip_address = self.client_address[0] # get client iP address\n if Setup.system_status != 'green':\n self.process_request(ip_address, headers, self.path)\n self.process_response(ip_address, response.headers)", "def main(self):\n while True:\n if not self.data_server_command.empty():\n command_data_server = self.data_server_command.get()\n if command_data_server[0] == 4:\n thread.start_new_thread(self.get_file, (command_data_server[1],))\n else:\n self.data_server_command_def[command_data_server[0]](command_data_server[1])", "def do_work(self):", "def server(conn, address):\n print(\"Client Connection Open\")\n while True:\n request = server_read(conn)\n if request:\n print(request)\n manage_client(request, conn)", "def handle_one_request(self):\n \n try:\n \n self.raw_requestline = self.rfile.readline(65537)\n \n if len(self.raw_requestline) > 65536:\n \n self.requestline = ''\n \n self.request_version = ''\n \n self.command = ''\n \n self.send_error(414)\n \n return\n \n if not self.raw_requestline:\n \n self.close_connection = 1\n \n return\n \n if not self.parse_request():\n \n # An error code has been sent, just exit\n \n return\n \n mname = 'do_' + self.command\n \n if not hasattr(self, mname):\n \n self.send_error(501, \"Unsupported method (%r)\" % self.command)\n \n return\n \n method = getattr(self, mname)\n \n print \"before call do_Get\"\n \n method()\n \n #增加 debug info 及 wfile 判断是否已经 close\n \n print \"after call do_Get\"\n \n if not self.wfile.closed:\n self.wfile.flush() #actually send the response if not already done.\n \n print \"after wfile.flush()\"\n \n except socket.timeout, e:\n \n #a read or a write timed out. Discard this connection\n self.log_error(\"Request timed out: %r\", e)\n self.close_connection = 1\n return", "async def _handler(self, reader, writer):\n gc.collect()\n\n try:\n req = request(reader)\n resp = response(writer)\n # Read HTTP Request with timeout\n await asyncio.wait_for(self._handle_request(req, resp),\n self.request_timeout)\n\n # OPTIONS method is handled automatically\n if req.method == b'OPTIONS':\n resp.add_access_control_headers()\n # Since we support only HTTP 1.0 - it is important\n # to tell browser that there is no payload expected\n # otherwise some webkit based browsers (Chrome)\n # treat this behavior as an error\n resp.add_header('Content-Length', '0')\n await resp._send_headers()\n return\n\n # Ensure that HTTP method is allowed for this path\n if req.method not in req.params['methods']:\n raise HTTPException(405)\n\n # Handle URL\n gc.collect()\n if hasattr(req, '_param'):\n await req.handler(req, resp, req._param)\n else:\n await req.handler(req, resp)\n # Done here\n except (asyncio.CancelledError, asyncio.TimeoutError):\n pass\n except OSError as e:\n # Do not send response for connection related errors - too late :)\n # P.S. code 32 - is possible BROKEN PIPE error (TODO: is it true?)\n if e.args[0] not in (errno.ECONNABORTED, errno.ECONNRESET, 32):\n try:\n await resp.error(500)\n except Exception as e:\n log.exc(e, \"\")\n except HTTPException as e:\n try:\n await resp.error(e.code)\n except Exception as e:\n log.exc(e)\n except Exception as e:\n # Unhandled expection in user's method\n log.error(req.path.decode())\n log.exc(e, \"\")\n try:\n await resp.error(500)\n # Send exception info if desired\n if self.debug:\n sys.print_exception(e, resp.writer.s)\n except Exception:\n pass\n finally:\n await writer.aclose()\n # Max concurrency support -\n # if queue is full schedule resume of TCP server task\n if len(self.conns) == self.max_concurrency:\n self.loop.create_task(self._server_coro)\n # Delete connection, using socket as a key\n del self.conns[id(writer.s)]", "def main(self):\n addr = (self.uri, self.port)\n try:\n self.client.connect(addr)\n except socket.gaierror:\n print(\"[ERROR] not a valid URI. Try again please...\")\n else:\n print(\"[SETUP] client connected to IPv4 address\", self.uri, \"on port\", self.port)\n self.handler()", "def run(self):\n self._server = self._get_server()\n self._server.serve_forever()", "def run(self):\n self.submit()\n self.start()", "async def run(self) -> None:", "async def run(self) -> None:", "def start(self):\n #url = '{}://{}:{}/'.format('http',\n # self.ip,\n # self.port)\n #self.service_info = ServiceInfo(\n # '_webthing._sub._http._tcp.local.',\n # '{}._http._tcp.local.'.format(self.name),\n # address=socket.inet_aton(self.ip),\n # port=self.port,\n # properties={\n # 'url': url,\n # },\n # server='{}.local.'.format(socket.gethostname()))\n #self.zeroconf = Zeroconf()\n #self.zeroconf.register_service(self.service_info)\n\n # If WebSocketS used and NOT running in thread, and WebServer IS\n # running in thread make shure WebServer has enough stack size to\n # handle also the WebSocket requests.\n log.info('Starting Web Server')\n self.server.Start(threaded=srv_run_in_thread, stackSize=8192)", "def _bg_thread_main(self) -> None:\n while not self._done:\n self._run_server_cycle()", "async def main():\n await serve_websocket(handle_server, SERVER, PORT, ssl_context=None)", "def main_thread_http_entry(self, msg):\n # V/nsHttp HttpBaseChannel::Init [this=c30d000]\n if msg['message'].startswith('HttpBaseChannel::Init'):\n match = re.search(r'^HttpBaseChannel::Init \\[this=(?P<channel>[\\w\\d]+)]',\n msg['message'])\n if match:\n self.http['current_channel'] = match.groupdict().get('channel')\n # D/nsHttp nsHttpChannel::Init [this=c30d000]\n elif 'current_channel' in self.http and msg['message'].startswith('nsHttpChannel::Init'):\n del self.http['current_channel']\n # V/nsHttp uri=http://www.webpagetest.org/?bare=1\n elif 'current_channel' in self.http and msg['message'].startswith('uri='):\n match = re.search(r'^uri=(?P<url>[^ \\r\\n]+)', msg['message'])\n if match:\n self.http['channels'][self.http['current_channel']] = \\\n match.groupdict().get('url')\n # V/nsHttp Creating nsHttpTransaction @0x7f88bb130400\n elif msg['message'].startswith('Creating nsHttpTransaction '):\n match = re.search(r'^Creating nsHttpTransaction @(?P<id>[\\w\\d]+)', msg['message'])\n if match:\n self.http['creating_trans_id'] = match.groupdict().get('id')\n # D/nsHttp nsHttpChannel c30d000 created nsHttpTransaction c138c00\n elif msg['message'].startswith('nsHttpChannel') and \\\n msg['message'].find(' created nsHttpTransaction ') > -1:\n match = re.search(r'^nsHttpChannel (?P<channel>[\\w\\d]+) created '\\\n r'nsHttpTransaction (?P<id>[\\w\\d]+)', msg['message'])\n if match:\n channel = match.groupdict().get('channel')\n if channel in self.http['channels']:\n url = self.http['channels'][channel]\n del self.http['channels'][channel]\n if 'creating_trans_id' in self.http:\n trans_id = self.http['creating_trans_id']\n del self.http['creating_trans_id']\n else:\n trans_id = match.groupdict().get('id')\n # If there is already an existing transaction with the same ID,\n # move it to a unique ID.\n if trans_id in self.http['requests']:\n tmp_request = self.http['requests'][trans_id]\n del self.http['requests'][trans_id]\n self.unique_id += 1\n new_id = '{0}.{1:d}'.format(trans_id, self.unique_id)\n self.http['requests'][new_id] = tmp_request\n self.http['requests'][trans_id] = {'url': url,\n 'request_headers': [],\n 'response_headers': [],\n 'status': None,\n 'bytes_in': 0,\n 'chunks': []}\n # D/nsHttp nsHttpTransaction::Init [this=c138c00 caps=21]\n elif msg['message'].startswith('nsHttpTransaction::Init '):\n match = re.search(r'^nsHttpTransaction::Init \\[this=(?P<id>[\\w\\d]+)', msg['message'])\n if match:\n trans_id = match.groupdict().get('id')\n self.http['current_transaction'] = trans_id\n # D/nsHttp nsHttpTransaction c138c00 SetRequestContext c15ba00\n elif 'current_transaction' in self.http and \\\n msg['message'].startswith('nsHttpTransaction ') and \\\n msg['message'].find(' SetRequestContext ') > -1:\n del self.http['current_transaction']\n # I/nsHttp http request [\n elif 'current_transaction' in self.http and msg['message'] == 'http request [':\n self.http['request_headers'] = self.http['current_transaction']\n elif 'request_headers' in self.http and msg['message'] == ']':\n del self.http['request_headers']\n # Individual request headers\n elif 'request_headers' in self.http and msg['message'][0:2] == ' ':\n trans_id = self.http['request_headers']\n if trans_id in self.http['requests']:\n self.http['requests'][trans_id]['request_headers'].append(msg['message'][2:])", "def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n #self.logged_in = False\n\n # Loop that listens for messages from the client\n while True:\n received_string = self.connection.recv(4096).strip()\n if received_string:\n jsonObject = json.loads(received_string)\n request = jsonObject.get('request')\n #print(received_string)\n #self.handle_data(received_string)\n if request == 'login':\n print 'logging in'\n self.login(jsonObject)\n elif request == 'logout':\n self.logout()\n elif request == 'msg':\n self.send(jsonObject)\n elif request == 'names':\n self.getNames()\n elif request == 'help':\n return \"geiegohruuhiegr\"\n else:\n return \"you suck\"\n\n else:\n print('The client is disconnected.')\n break \n # TODO: Add handling of received payload from client", "async def _main(self):\n while True:\n time.sleep(1)", "def serve_requests(self):\n while True:\n self.server_socket.listen(self.request_queue_size)\n client_connection, client_address = self.server_socket.accept()\n self.request_handler(client_connection)", "def run(self):\n try:\n self.server = socket(AF_INET, SOCK_STREAM)\n self.server.bind(self.address)\n self.server.listen(5) # Allows up to 5 waiting clients\n\n while True:\n self.myView.updateStatus('Waiting for connection ...')\n client, address = self.server.accept()\n self.myView.updateStatus('... connected from ' + str(address))\n handler = ClientHandler(client, self.bank, self.myView)\n handler.start()\n\n except Exception as message:\n self.myView.updateStatus(message)\n self.server.close()\n self.myView.updateStatus(\"Server shutting down.\")", "def background_stuff():\n print \"BACKGROUND\"\n\n\n\n # # global socketio\n\n # # print(wsClient.url, wsClient.products)\n # while (wsClient.MessageCount < 30):\n # print(\"\\nMessageCount =\", \"%i \\n\" % wsClient.MessageCount)\n # # time.sleep(1)\n # # socketio.emit('my response', {'data': [\"TEST\"]}, namespace=\"/test\", broadcast=True)\n # #\n # wsClient.close()\n #\n # while True:\n # time.sleep(1)\n # t = str(time.clock())\n # print t\n\n def minute_passed(oldepoch):\n return time.time() - oldepoch >= .1\n\n global wsClient\n\n # t = time.time()\n # for i in range(3000):\n # # while True:\n # # print time.clock(), t\n # # if time.clock() > ( t + .1 ):\n # # show = True #minute_passed(t)\n # # if show:\n # # print show, time.time(), t, time.time() - t\n # t = time.time()\n # cur_time = str(t)\n # socketio.emit('message', {'data': cur_time, \"msg\": wsClient.message['price'] }, namespace=\"/test\", broadcast=True)\n\n # global thread\n # thread = None", "def handle(self):\n thread_name = get_threadname()\n\n style_args = {}\n style_args['label'] = thread_name\n\n axis_args = {}\n axis_args['name'] = 'default'\n axis_args['x_label'] = ''\n axis_args['y_label'] = ''\n\n # client_init blocks until client sends 'BEGIN'\n first_value = self._handle_client_init(style_args, axis_args)\n if type(first_value) != float:\n if first_value == \"RETURN\":\n # done with client\n return\n else:\n print \"Unknown status\"\n return\n\n self._handle_setup_axis(axis_args)\n\n print \"Assigning to axis:\", axis_args['name']\n axes = self.server.axes[axis_args['name']]\n line_name = self._handle_create_line(axes, style_args)\n self._handle_update_legend(axes)\n\n # NOTE: client_read_data will block until client disconnects.\n self._handle_client_read_data(first_value, line_name)\n print \"Exiting:\", thread_name", "def run(self):\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect(self.address)\n\n #send dummy data\n sock.sendall(bytes(\"Give me\", \"utf-8\"))\n received = sock.recv(1024)\n while True:\n data = sock.recv(1024)\n if not data: break\n received += data\n lymphocytes = pickle.loads(received)\n self.lymphocytes_setter(lymphocytes)\n except ConnectionRefusedError:\n #Don't bother. May be it's better to add more logic to determine\n #permanent connection errors.\n pass\n finally:\n sock.close()", "def main():\n try:\n # persistent('infected', os.path.abspath(__file__)) # DON'T REMOVE\n\n # rename me to client.py\n # os.rename(__file__, os.getcwd() + \"/client1.py\")\n # print os.path.abspath(__file__)\n global my_socket\n my_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n my_socket.connect((SERVER_ADDRESS, PORT))\n\n handle = Handle()\n handle.say_hello()\n print my_socket.getsockname()\n msg = ''\n while True:\n # ----------send data----------\n if msvcrt.kbhit():\n char = msvcrt.getch()\n if char == chr(MY_CHAR):\n break\n if msg == '':\n sys.stdout.write('[Me] ')\n msg += char\n sys.stdout.write(char)\n sys.stdout.flush()\n if char == chr(CHAR_ENTER):\n my_socket.send(msg)\n msg = ''\n sys.stdout.write('\\n')\n\n # ----------receive data-------------\n rlist, wlist, xlist = select.select([my_socket], [my_socket], [], 1)\n if my_socket in rlist:\n data = my_socket.recv(MAX_SIZE_RESPONSE)\n if not data or data == \"\":\n my_socket.close()\n print \"Connection with server closed.\"\n return\n handle.handle_request(data)\n if msg != '':\n sys.stdout.write('[Me] ' + msg)\n sys.stdout.flush()\n\n except Exception as e:\n print e.message\n my_socket.close()\n print 'connection closed'\n return\n\n my_socket.close()\n print 'connection closed'", "def _worker(self):\n while True:\n request = self.queue.get()\n self.worker(request)\n self.queue.task_done()", "def process_request(self, request, client_address):\r\n t = self.thread_class(target = self.process_request_thread,\r\n args = (request, client_address))\r\n if self.daemon_threads:\r\n t.setDaemon (1)\r\n t.start()" ]
[ "0.6994863", "0.68837047", "0.6724171", "0.65650207", "0.6561778", "0.64669704", "0.63919336", "0.63876474", "0.63811946", "0.63524044", "0.6348768", "0.63193417", "0.6318059", "0.62665766", "0.6266232", "0.6265155", "0.6248112", "0.6233416", "0.6225023", "0.61834395", "0.61834395", "0.6169332", "0.6168503", "0.6135096", "0.61297274", "0.60723007", "0.6069672", "0.60695297", "0.6059198", "0.6048037", "0.60373145", "0.60232526", "0.60130686", "0.6008499", "0.6003302", "0.6001352", "0.59938157", "0.5978662", "0.5965577", "0.5951893", "0.59441096", "0.59320986", "0.59305847", "0.5928787", "0.59268147", "0.5911027", "0.5905872", "0.58825266", "0.5879967", "0.5869474", "0.5857926", "0.5855451", "0.5825319", "0.582088", "0.58168495", "0.58137697", "0.5811733", "0.5782301", "0.5763887", "0.57631385", "0.57605606", "0.57407117", "0.57394683", "0.57355434", "0.5732799", "0.5710418", "0.5707434", "0.57032603", "0.5700601", "0.569782", "0.56840533", "0.56836206", "0.5683258", "0.5682616", "0.56819683", "0.5681463", "0.5677108", "0.5659664", "0.56479955", "0.56425446", "0.56407624", "0.5601558", "0.5600113", "0.5590967", "0.5588472", "0.55857646", "0.55857646", "0.5585009", "0.55765456", "0.5574871", "0.5571103", "0.55665356", "0.5558183", "0.55521184", "0.55502343", "0.5550072", "0.5548582", "0.55438894", "0.55416256", "0.5539582", "0.55382574" ]
0.0
-1
Performs a single iteration of the algorithm. This includes collecting the data, updating the parameters, and adding the metrics of interest to the logger. Does not update the `curr_iter` attribute.
def step(self, snapshot_mode: str, meta_info: dict = None): # Save snapshot to save the correct iteration count self.save_snapshot() if self.curr_checkpoint == -1: self.train_teachers(snapshot_mode, None) self.reached_checkpoint() # setting counter to 0 if self.curr_checkpoint == 0: # Sample observations ros, rets, all_lengths = self.sample() # Log current progress self.logger.add_value("max return", np.max(rets), 4) self.logger.add_value("median return", np.median(rets), 4) self.logger.add_value("avg return", np.mean(rets), 4) self.logger.add_value("min return", np.min(rets), 4) self.logger.add_value("std return", np.std(rets), 4) self.logger.add_value("std var", self.expl_strat.std.item(), 4) self.logger.add_value("avg rollout len", np.mean(all_lengths), 4) self.logger.add_value("num total samples", np.sum(all_lengths)) # Save snapshot data self.make_snapshot(snapshot_mode, np.mean(rets), meta_info) # Update policy and value function self.update(rollouts=ros)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iterate(self, data):\n \n # Append data to self.data\n self.data = np.append(self.data, data)\n \n for i, d in enumerate(data):\n update = self.current*self.likelihood(d)\n self.current = self._normalize(update)\n self.posterior = np.concatenate((self.posterior,[self.current]))\n \n print(str(len(data)) + \" iterations completed!\")\n \n return None", "def update():\n global iteration, result\n iteration += 1\n # Stop iterating after max_iterations\n if iteration >= max_iterations:\n timer.stop()\n print \"Output is\", result\n else:\n result = get_next(result)", "def _optimization_loop(self, iteration=0):\n self.logger.print_optimization_header()\n\n while iteration < self.iterations:\n try:\n self._execute_experiment()\n except RepeatedExperimentError:\n # G.debug_(F'Skipping repeated Experiment: {_ex!s}\\n')\n if len(self.similar_experiments) + len(self.tested_keys) >= self.search_space_size:\n G.log_(f\"Hyperparameter search space has been exhausted\")\n break\n self.skipped_iterations += 1\n continue\n except StopIteration:\n if len(self.similar_experiments) + len(self.tested_keys) >= self.search_space_size:\n G.log_(f\"Hyperparameter search space has been exhausted\")\n break\n # G.debug_(f'Re-initializing hyperparameter grid after testing {len(self.tested_keys)} keys')\n self._set_hyperparameter_space()\n continue\n\n self.logger.print_result(\n self.current_hyperparameters_list,\n self.current_score,\n experiment_id=self.current_experiment.experiment_id,\n )\n\n if (\n (self.best_experiment is None) # First evaluation\n or (self.do_maximize and (self.best_score < self.current_score)) # New best max\n or (not self.do_maximize and (self.best_score > self.current_score)) # New best min\n ):\n self.best_experiment = self.current_experiment.experiment_id\n self.best_score = self.current_score\n\n iteration += 1", "def run(self):\n if not self._no_progress and self._verbose:\n from progressbar import ProgressBar\n progress = ProgressBar()\n iter_range = progress(range(self._iters))\n else:\n iter_range = range(self._iters)\n\n if self._no_progress and self._time_iters:\n from time import time\n\n i = 0\n try:\n for i in iter_range:\n if self._verbose and self._no_progress:\n print(\"Iteration \" + repr(i))\n\n if self._no_progress and self._time_iters:\n start = time()\n\n self.iteration += 1\n\n self._forward(self._p_k, self._v_k)\n sigma_k = measure(self._p_k, self._v_k)\n alpha_k = self._rho_k / sigma_k\n if self._double:\n update_m_double(self._m, alpha_k, self._p_k)\n sub_scaled_vector_double(self._residual_k,\n self._residual_k,\n alpha_k, self._v_k)\n else:\n update_m(self._m, alpha_k, self._p_k)\n sub_scaled_vector(self._residual_k, self._residual_k,\n alpha_k, self._v_k)\n self._v_k = gpuarray_copy(self._residual_k)\n rho_k_plus_1 = measure(self._v_k, self._residual_k)\n rho_k_t = np.abs(rho_k_plus_1)\n\n if (rho_k_t / self._rho_0 <= self._relative_tolerance) \\\n or (rho_k_t <= self._absolute_tolerance):\n print(\"Converged.\")\n self.converged = True\n break\n\n if self._double:\n add_scaled_vector_double(self._p_k, self._v_k,\n rho_k_plus_1/self._rho_k,\n self._p_k)\n else:\n add_scaled_vector(self._p_k, self._v_k,\n rho_k_plus_1/self._rho_k, self._p_k)\n\n self._rho_k = rho_k_plus_1\n\n if self._noisy:\n print(\" Residual=\" + str(rho_k_t))\n\n if self._no_progress and self._time_iters:\n print(\"Elapsed time for iteration \" + str(i) + \": \" +\n str(time() - start) + \" seconds\")\n\n if self._save_images:\n save_image(np.abs(self._m.get().reshape(self._data.nX1,\n self._data.nX2)),\n self._out_dir, i, self._image_format)\n if self._save_matlab:\n save_matlab(self._m.get().reshape(self._data.nX1,\n self._data.nX2),\n self._out_dir, i)\n except KeyboardInterrupt:\n print(\"Reconstruction aborted (CTRL-C) at iteration \" + str(i))\n finally:\n if self._save_images:\n save_image(np.abs(self._m.get().reshape(self._data.nX1,\n self._data.nX2)),\n self._out_dir, \"result\", self._image_format)\n if self._save_matlab:\n save_matlab(self._m.get().reshape(self._data.nX1,\n self._data.nX2),\n self._out_dir, \"result\")\n self.iteration = i+1\n return (self._m.get().reshape(self._data.nX1, self._data.nX2),\n self.iteration)", "def process(self, data_itr):\n for data in data_itr:\n self.update(data)\n while True:\n try:\n out = self.next()\n yield out\n except StopIteration:\n break", "def run(self):\n self.evaluate()\n self.accumulate()\n self.summarize()", "def _iteration(self, data_loader):\n\n iter_rho = AverageMeter('Iter rho')\n iter_nmse = AverageMeter('Iter nmse')\n iter_loss = AverageMeter('Iter loss')\n iter_time = AverageMeter('Iter time')\n time_tmp = time.time()\n\n for batch_idx, (sparse_gt, raw_gt) in enumerate(data_loader):\n sparse_gt = sparse_gt.to(self.device)\n sparse_pred = self.model(sparse_gt)\n loss = self.criterion(sparse_pred, sparse_gt)\n rho, nmse = evaluator(sparse_pred, sparse_gt, raw_gt)\n\n # Log and visdom update\n iter_loss.update(loss)\n iter_rho.update(rho)\n iter_nmse.update(nmse)\n iter_time.update(time.time() - time_tmp)\n time_tmp = time.time()\n\n # plot progress\n if (batch_idx + 1) % self.print_freq == 0:\n logger.info(f'[{batch_idx + 1}/{len(data_loader)}] '\n f'loss: {iter_loss.avg:.3e} | rho: {iter_rho.avg:.3e} | '\n f'NMSE: {iter_nmse.avg:.3e} | time: {iter_time.avg:.3f}')\n\n logger.info(f'=> Test rho:{iter_rho.avg:.3e} NMSE: {iter_nmse.avg:.3e}\\n')\n\n return iter_loss.avg, iter_rho.avg, iter_nmse.avg", "def run_all_iterations(self):\n self.start_time = time.time()\n for _ in xrange(self.iterations):\n self.run_iteration()\n self.elapsed_time = time.time() - self.start_time\n\n self.print_statistics()", "def iterate(self, update_func):\n self.pre_loop()\n logger.info('%s: start iterations', self.__class__.__name__)\n with recording_exit_reason(self.datastore):\n for gen_step in range(self.iterations):\n self.post_update(gen_step, update_func(gen_step))\n logger.info('%s: maximum iterations reached', self.__class__.__name__)", "def _run_one_iteration(self, iteration):\n statistics = iteration_statistics.IterationStatistics()\n logging.info('Starting iteration %d', iteration)\n num_episodes_train, average_reward_train, average_steps_per_second = (\n self._run_train_phase(statistics))\n active_num_episodes_eval, active_average_reward_eval = self._run_eval_phase(\n statistics, 'active')\n passive_num_episodes_eval, passive_average_reward_eval = (\n self._run_eval_phase(statistics, 'passive'))\n\n self._save_tensorboard_summaries(iteration, num_episodes_train,\n average_reward_train,\n active_num_episodes_eval,\n active_average_reward_eval,\n passive_num_episodes_eval,\n passive_average_reward_eval,\n average_steps_per_second)\n return statistics.data_lists", "def iterate(self, update_func):\n if self.disc_param_save_on_error:\n update_func = param_file.wrap_with_save_on_error(\n self.gan.discriminator,\n self.datastore.path('disc_param', 'pre_error.npz'),\n self.datastore.path('disc_param', 'post_error.npz'),\n )(update_func)\n\n self.pre_loop()\n logger.info('%s: start iterations', self.__class__.__name__)\n with recording_exit_reason(self.datastore):\n for gen_step in range(self.iterations):\n self.post_update(gen_step, update_func(gen_step))\n logger.info('%s: maximum iterations reached', self.__class__.__name__)", "def _run_iter(self, data):\n\n global_cost = 0\n\n # We want to iterate over data randomly so as not to unintentionally\n # bias the word vector contents\n shuffle(data)\n\n for (v_main, v_context, b_main, b_context, gradsq_W_main, gradsq_W_context,\n gradsq_b_main, gradsq_b_context, cooccurrence) in data:\n weight = (cooccurrence / self._x_max) ** self._alpha if cooccurrence < self._x_max else 1\n\n # Compute inner component of cost function, which is used in\n # both overall cost calculation and in gradient calculation\n #\n # $$ J' = w_i^Tw_j + b_i + b_j - log(X_{ij}) $$\n cost_inner = (v_main.dot(v_context)\n + b_main[0] + b_context[0]\n - log(cooccurrence))\n\n # Compute cost\n #\n # $$ J = f(X_{ij}) (J')^2 $$\n cost = weight * (cost_inner ** 2)\n\n # Add weighted cost to the global cost tracker\n global_cost += 0.5 * cost\n\n # Compute gradients for word vector terms.\n #\n # NB: `main_word` is only a view into `W` (not a copy), so our\n # modifications here will affect the global weight matrix;\n # likewise for context_word, biases, etc.\n grad_main = weight * cost_inner * v_context\n grad_context = weight * cost_inner * v_main\n\n # Compute gradients for bias terms\n grad_bias_main = weight * cost_inner\n grad_bias_context = weight * cost_inner\n\n # Now perform adaptive updates\n v_main -= (self._learning_rate * grad_main / np.sqrt(gradsq_W_main))\n v_context -= (self._learning_rate * grad_context / np.sqrt(gradsq_W_context))\n\n b_main -= (self._learning_rate * grad_bias_main / np.sqrt(gradsq_b_main))\n b_context -= (self._learning_rate * grad_bias_context / np.sqrt(\n gradsq_b_context))\n\n # Update squared gradient sums\n gradsq_W_main += np.square(grad_main)\n gradsq_W_context += np.square(grad_context)\n gradsq_b_main += grad_bias_main ** 2\n gradsq_b_context += grad_bias_context ** 2\n\n return global_cost", "def __call__(self, param):\n count = param.nbatch\n if self.last_count > count:\n self.init = False\n self.last_count = count\n\n if self.init:\n if count % self.frequent == 0:\n speed = self.frequent * self.batch_size / (time.time() - self.tic)\n s = ''\n if param.eval_metric is not None:\n name, value = param.eval_metric.get()\n s = \"Epoch[%d] Batch [%d]\\tSpeed: %.2f samples/sec\\tTrain-\" % (param.epoch, count, speed)\n for n, v in zip(name, value):\n s += \"%s=%f,\\t\" % (n, v)\n else:\n s = \"Iter[%d] Batch [%d]\\tSpeed: %.2f samples/sec\" % (param.epoch, count, speed)\n\n logging.info(s)\n print(s)\n self.tic = time.time()\n else:\n self.init = True\n self.tic = time.time()", "def evaluate(self):\n iterator = self._iterators['main']\n\n if self.eval_hook:\n self.eval_hook(self)\n\n summary = reporting.DictSummary()\n\n progress = IterationStatus(len(iterator))\n if self._progress_bar:\n pbar = _IteratorProgressBar(iterator=progress)\n\n last_iter = len(iterator) - 1\n with _in_eval_mode(self._targets.values()):\n for idx, batch in enumerate(iterator):\n last_batch = idx == last_iter\n progress.current_position = idx\n observation = {}\n with reporting.report_scope(observation):\n if isinstance(batch, (tuple, list)):\n outs = self.eval_func(*batch)\n elif isinstance(batch, dict):\n outs = self.eval_func(**batch)\n else:\n outs = self.eval_func(batch)\n for metric in self._metrics:\n metric(batch, outs, last_batch)\n summary.add(observation)\n\n if self._progress_bar:\n pbar.update()\n\n if self._progress_bar:\n pbar.close()\n\n return summary.compute_mean()", "def report(self, individuals):\n print(f'Iteration: {self.current}')\n self.current += 1", "def step(self):\n for p, grad, v, square_grad_avg, delta_x_acc in self.params:\n # Compute the running average of the squared gradients \n square_grad_avg.mul_(self.rho)\n square_grad_avg.addcmul_(grad, grad, value = 1 - self.rho)\n # Compute the RMS of the previous squared gradients (eps to avoid numerical issues later for division)\n std = (square_grad_avg.add_(self.eps)).sqrt_()\n # Compute the accumulated update\n delta_x = ((delta_x_acc.add_(self.eps)).sqrt_()) * grad / std\n # Accumulate the updates\n delta_x_acc.mul_(self.rho)\n delta_x_acc.addcmul_(delta_x, delta_x, value = 1 - self.rho) \n # Update the parameters\n p.add_(delta_x, alpha = - self.lr)", "def __call__(self, param):\n count = param.nbatch\n if self.last_count > count:\n self.init = False\n self.last_count = count\n\n if self.init:\n if count % self.frequent == 0:\n # #11504\n try:\n speed = self.frequent * self.batch_size / (time.time() - self.tic)\n except ZeroDivisionError:\n speed = float('inf')\n if param.eval_metric is not None:\n name_value = param.eval_metric.get_name_value()\n if self.auto_reset:\n param.eval_metric.reset()\n msg = 'Epoch[%d] Batch [%d-%d]\\tSpeed: %.2f samples/sec'\n msg += '\\t%s=%f'*len(name_value)\n logging.info(msg, param.epoch, count-self.frequent, count, speed, *sum(name_value, ()))\n else:\n msg = 'Epoch[%d] Batch [0-%d]\\tSpeed: %.2f samples/sec'\n msg += '\\t%s=%f'*len(name_value)\n logging.info(msg, param.epoch, count, speed, *sum(name_value, ()))\n else:\n logging.info(\"Iter[%d] Batch [%d]\\tSpeed: %.2f samples/sec\",\n param.epoch, count, speed)\n self.tic = time.time()\n else:\n self.init = True\n self.tic = time.time()", "def __iter__(self):\n self.current_epoch = self.current_epoch + 1\n self.num_samples_yield = -1\n return self", "def __iter__(self):\n self.current_epoch = self.current_epoch + 1\n self.num_samples_yield = -1\n return self", "def _run(self):\n self._algorithm(self._list, self)", "def step(self) -> ResultDict:\n # Do we have to run `self.evaluate()` this iteration?\n # `self.iteration` gets incremented after this function returns,\n # meaning that e. g. the first time this function is called,\n # self.iteration will be 0.\n evaluate_this_iter = (\n self.config.evaluation_interval is not None\n and (self.iteration + 1) % self.config.evaluation_interval == 0\n )\n\n # Results dict for training (and if appolicable: evaluation).\n results: ResultDict = {}\n\n # Parallel eval + training: Kick off evaluation-loop and parallel train() call.\n if evaluate_this_iter and self.config.evaluation_parallel_to_training:\n (\n results,\n train_iter_ctx,\n ) = self._run_one_training_iteration_and_evaluation_in_parallel()\n # - No evaluation necessary, just run the next training iteration.\n # - We have to evaluate in this training iteration, but no parallelism ->\n # evaluate after the training iteration is entirely done.\n else:\n results, train_iter_ctx = self._run_one_training_iteration()\n\n # Sequential: Train (already done above), then evaluate.\n if evaluate_this_iter and not self.config.evaluation_parallel_to_training:\n results.update(self._run_one_evaluation(train_future=None))\n\n # Attach latest available evaluation results to train results,\n # if necessary.\n if not evaluate_this_iter and self.config.always_attach_evaluation_results:\n assert isinstance(\n self.evaluation_metrics, dict\n ), \"Algorithm.evaluate() needs to return a dict.\"\n results.update(self.evaluation_metrics)\n\n if hasattr(self, \"workers\") and isinstance(self.workers, WorkerSet):\n # Sync filters on workers.\n self._sync_filters_if_needed(\n central_worker=self.workers.local_worker(),\n workers=self.workers,\n config=self.config,\n )\n # TODO (avnishn): Remove the execution plan API by q1 2023\n # Collect worker metrics and add combine them with `results`.\n if self.config._disable_execution_plan_api:\n episodes_this_iter = collect_episodes(\n self.workers,\n self._remote_worker_ids_for_metrics(),\n timeout_seconds=self.config.metrics_episode_collection_timeout_s,\n )\n results = self._compile_iteration_results(\n episodes_this_iter=episodes_this_iter,\n step_ctx=train_iter_ctx,\n iteration_results=results,\n )\n\n # Check `env_task_fn` for possible update of the env's task.\n if self.config.env_task_fn is not None:\n if not callable(self.config.env_task_fn):\n raise ValueError(\n \"`env_task_fn` must be None or a callable taking \"\n \"[train_results, env, env_ctx] as args!\"\n )\n\n def fn(env, env_context, task_fn):\n new_task = task_fn(results, env, env_context)\n cur_task = env.get_task()\n if cur_task != new_task:\n env.set_task(new_task)\n\n fn = functools.partial(fn, task_fn=self.config.env_task_fn)\n self.workers.foreach_env_with_context(fn)\n\n return results", "def update(self, iteration):\n pass", "def iterate_data(dataset,iter_no=5,pixel_mask=None,plot_clear=True,algo=\"FordRollett\",unit_weights=False):\n import overlap\n start_gain = array.ones(len(dataset))\n if unit_weights is True:\n weights = array.ones_like(dataset)\n else:\n weights = 1.0/dataset.var\n # Use weights as the mask\n if pixel_mask is not None:\n weights = weights*pixel_mask\n if algo == \"FordRollett\":\n gain,first_ave,ar,esds,k = overlap.find_gain_fr(dataset,weights,start_gain,pixel_mask=pixel_mask)\n else:\n raise ValueError(\"No such algorithm: %s\" % algo)\n chisquared,residual_map = overlap.get_statistics_fr(gain,first_ave,dataset,dataset.var,pixel_mask)\n old_result = first_ave #store for later\n chisq_history = [chisquared]\n k_history = [k]\n if iter_no > 0: \n no_iters = iter_no\n else:\n no_iters = abs(iter_no)\n for cycle_no in range(no_iters+1):\n esdflag = (cycle_no == no_iters) # need esds as well, and flags the last cycle\n print 'Cycle %d' % cycle_no\n if cycle_no > 3 and iter_no < 0:\n esdflag = (esdflag or (abs(chisq_history[-2]-chisq_history[-1]))<0.005)\n if algo == \"FordRollett\":\n gain,interim_result,ar,esds,k = overlap.find_gain_fr(dataset,weights,gain,arminus1=ar,pixel_mask=pixel_mask,errors=esdflag)\n chisquared,residual_map = overlap.get_statistics_fr(gain,interim_result,dataset,dataset.var,pixel_mask)\n chisq_history.append(chisquared)\n k_history.append(k)\n if esdflag is True:\n break\n print 'Chisquared: ' + `chisq_history`\n print 'K: ' + `k_history`\n print 'Total cycles: %d' % cycle_no\n print 'Maximum shift/error: %f' % max(ar/esds)\n return gain,dataset,interim_result,residual_map,chisq_history,esds,first_ave,weights", "def do_iteration(self, mutation_fn, aggression):\n raise NotImplementedError()", "def iterate(self):\n raise NotImplementedError()", "def __iter__(self):\n self._loop_idx = 0\n self._target_time = time.time()\n return self", "def run(self):\n for i,p in enumerate(self.pairs):\n self.forPointPair(i)\n if i % 100000 == 0:\n print('Percentage Processed: ' + str(round(i * 100 / len(self.pairs), 3)) + '. Existing Cluster Labels: ', len(np.unique(self.labels)))", "def Optimize(self):\n self._OpenOutputFiles()\n while self.n_iter < self.n_maxiter and not self.is_converged:\n self.n_iter += 1\n self._ChooseStepDirection(self.opt_type)\n self._LineSearch(-1.0 * self.step_dir)\n self._UpdateEnergy()\n self._UpdateGradient()\n self.traj.AppendStep(self.mol)\n self._UpdateCriteria()\n self._CheckConvergence()\n self._PrintStatus()\n self._CloseOutputFiles()", "def _report_iteration(self):\n # Call report_iteration_items for a subclass-friendly function\n self._report_iteration_items()\n self._reporter.write_timestamp(self._iteration)\n self._reporter.write_last_iteration(self._iteration)", "def oneIteration(self):\n\t\traise NotImplementedError", "def log_stats(self):\n while True:\n for stats in self.stats.values():\n stats.log_stats()\n\n yield from asyncio.sleep(stats_delay)", "def update(self, itr, algorithm, agent, traj_sample_lists, pol_sample_lists):\n if self._first_update:\n self._output_column_titles(algorithm)\n self._first_update = False\n\n costs = [np.mean(np.sum(algorithm.prev[m].cs, axis=1)) for m in range(algorithm.M)]\n self._update_iteration_data(itr, algorithm, costs, pol_sample_lists)\n self._cost_plotter.update(costs, t=itr)\n if END_EFFECTOR_POINTS in agent.x_data_types:\n self._update_trajectory_visualizations(algorithm, agent,\n traj_sample_lists, pol_sample_lists)\n\n self._fig.canvas.draw()\n self._fig.canvas.flush_events() # Fixes bug in Qt4Agg backend", "def run_one_iteration(self, current_state, current_log_pdf):\n # Start the loop over nsamples - this code uses the parallel version of the stretch algorithm\n all_inds = np.arange(self.nchains)\n inds = all_inds % 2\n accept_vec = np.zeros((self.nchains, ))\n # Separate the full ensemble into two sets, use one as a complementary ensemble to the other and vice-versa\n for split in range(2):\n set1 = (inds == split)\n\n # Get current and complementary sets\n sets = [current_state[inds == j01, :] for j01 in range(2)]\n curr_set, comp_set = sets[split], sets[1 - split] # current and complementary sets respectively\n ns, nc = len(curr_set), len(comp_set)\n\n # Sample new state for S1 based on S0\n unif_rvs = Uniform().rvs(nsamples=ns, random_state=self.random_state)\n zz = ((self.scale - 1.) * unif_rvs + 1.) ** 2. / self.scale # sample Z\n factors = (self.dimension - 1.) * np.log(zz) # compute log(Z ** (d - 1))\n multi_rvs = Multinomial(n=1, p=[1. / nc, ] * nc).rvs(nsamples=ns, random_state=self.random_state)\n rint = np.nonzero(multi_rvs)[1] # sample X_{j} from complementary set\n candidates = comp_set[rint, :] - (comp_set[rint, :] - curr_set) * np.tile(\n zz, [1, self.dimension]) # new candidates\n\n # Compute new likelihood, can be done in parallel :)\n logp_candidates = self.evaluate_log_target(candidates)\n\n # Compute acceptance rate\n unif_rvs = Uniform().rvs(nsamples=len(all_inds[set1]), random_state=self.random_state).reshape((-1,))\n for j, f, lpc, candidate, u_rv in zip(\n all_inds[set1], factors, logp_candidates, candidates, unif_rvs):\n accept = np.log(u_rv) < f + lpc - current_log_pdf[j]\n if accept:\n current_state[j] = candidate\n current_log_pdf[j] = lpc\n accept_vec[j] += 1.\n\n # Update the acceptance rate\n self._update_acceptance_rate(accept_vec)\n return current_state, current_log_pdf", "def run(self) -> Dict[str, Union[float, str]]:\n try:\n self.is_run = True\n deque(self, maxlen=0) # feed the entire iterator into a zero-length deque\n info = gather_info(\n self.start_time, self.train_collector, self.test_collector,\n self.best_reward, self.best_reward_std\n )\n finally:\n self.is_run = False\n\n return info", "def __call__(self, d):\n try:\n with self.statistics_lock:\n self.statistics_data['input']+=1\n if self.batch_size<=1:\n try:\n self.output(self._work(d))\n except Discard:\n with self.statistics_lock:\n self.statistics_data['discarded']+=1\n except Exception, e:\n logging.exception(\"Caught exception, Data discarded\")\n with self.statistics_lock:\n self.statistics_data['discarded']+=1\n finally:\n self.task_done()\n else:\n self.batch_repo.append(d)\n if len(self.batch_repo)>=self.batch_size:\n self._batch_work()\n except:\n pass", "def __iter__(self):\n while True:\n if self.stop:\n return\n for item in self.get_next_batch():\n yield item", "def __iter__(self):\n return iter((self.train_stats, self.preprocessed_data, self.output_directory))", "def callback(self, parameters):\n print(\"\\nFinsished Iteration: {}\".format(self.current_iteration), flush=True)\n print(\"Current Parameters: {}\".format(parameters), flush=True)\n\n # If getting the value history, perform an evaluation with current parameters\n\n print(\"Starting Next Iteration...\", flush=True)\n\n # Update currrent_iteration index and add new blank history\n self.current_iteration += 1", "def run(self, data):\n for idx, block in enumerate(data):\n current = float(np.mean(block['current']['value']))\n print(f'{idx}: {current}')", "def run(self):\n iteration = 0\n\n while not self._is_converged():\n # (1) Compute current operating point and update last one.\n xs, us, costs = self._compute_operating_point()\n self._last_operating_point = self._current_operating_point\n self._current_operating_point = (xs, us, costs)\n\n # If this is the first time through, then set up reference deviation\n # costs and add to player costs. Otherwise, just update those costs.\n if self._reference_deviation_weight is not None and iteration == 0:\n self._x_reference_cost = ReferenceDeviationCost(xs)\n self._u_reference_costs = [\n ReferenceDeviationCost(ui) for ui in us]\n\n for ii in range(self._num_players):\n self._player_costs[ii].add_cost(\n self._x_reference_cost, \"x\",\n self._reference_deviation_weight)\n self._player_costs[ii].add_cost(\n self._u_reference_costs[ii], ii,\n self._reference_deviation_weight)\n elif self._reference_deviation_weight is not None:\n self._x_reference_cost.reference = self._last_operating_point[0]\n for ii in range(self._num_players):\n self._u_reference_costs[ii].reference = \\\n self._last_operating_point[1][ii]\n\n # Visualization.\n if self._visualizer is not None:\n traj = {\"xs\" : xs}\n for ii in range(self._num_players):\n traj[\"u%ds\" % (ii + 1)] = us[ii]\n\n self._visualizer.add_trajectory(iteration, traj)\n# self._visualizer.plot_controls(1)\n# plt.pause(0.01)\n# plt.clf()\n# self._visualizer.plot_controls(2)\n# plt.pause(0.01)\n# plt.clf()\n self._visualizer.plot()\n plt.pause(0.01)\n plt.clf()\n\n # (2) Linearize about this operating point. Make sure to\n # stack appropriately since we will concatenate state vectors\n # but not control vectors, so that\n # ``` x_{k+1} - xs_k = A_k (x_k - xs_k) +\n # sum_i Bi_k (ui_k - uis_k) ```\n As = []\n Bs = [[] for ii in range(self._num_players)]\n for k in range(self._horizon):\n A, B = self._dynamics.linearize_discrete(\n xs[k], [uis[k] for uis in us])\n As.append(A)\n\n for ii in range(self._num_players):\n Bs[ii].append(B[ii])\n\n # (3) Quadraticize costs.\n Qs = [[] for ii in range(self._num_players)]\n ls = [[] for ii in range(self._num_players)]\n Rs = [[[] for jj in range(self._num_players)]\n for ii in range(self._num_players)]\n for ii in range(self._num_players):\n for k in range(self._horizon):\n _, l, Q, R = self._player_costs[ii].quadraticize(\n xs[k], [uis[k] for uis in us], k)\n\n Qs[ii].append(Q)\n ls[ii].append(l)\n\n for jj in range(self._num_players):\n Rs[ii][jj].append(R[jj])\n\n # (4) Compute feedback Nash equilibrium of the resulting LQ game.\n Ps, alphas = solve_lq_game(As, Bs, Qs, ls, Rs)\n\n # Accumulate total costs for both players.\n total_costs = [sum(costis).item() for costis in costs]\n print(\"Total cost for all players: \", total_costs)\n\n # Log everything.\n if self._logger is not None:\n self._logger.log(\"xs\", xs)\n self._logger.log(\"us\", us)\n self._logger.log(\"total_costs\", total_costs)\n self._logger.dump()\n\n # Update the member variables.\n self._Ps = Ps\n self._alphas = alphas\n\n # (5) Linesearch.\n self._linesearch()\n iteration += 1", "def iterate(self):\n for i in range(self.generations):\n sorted_polulation = sorted(self.data, key=lambda item: - item.fitness_function(item))\n print(\n [item.to_string() for item in sorted_polulation[:8]],\n [round(item.fitness_function(item),2) for item in sorted_polulation]\n )\n\n # print([item.to_string() for item in self.data])\n\n self.step()\n print(\"result\")\n sorted_polulation = sorted(self.data, key=lambda item: - item.fitness_function(item))\n print([str(item) for item in sorted_polulation])", "def run(self):\n for msr in self.msrs:\n # find state transition matrix\n phi_p, state_prop = self._compute_stm(msr.time)\n\n # use stm to propagate perturbation and covariance\n cov_m = np.matmul(phi_p, np.matmul(self.cov_list[-1],\n np.transpose(phi_p)))\n\n # compute observation deviation, obs_state matrix\n y_i, h_tilde = self._msr_resid(msr, state_prop)\n\n # calculate kalman gain\n k_gain = self._calc_k_gain(cov_m, h_tilde, msr.cov)\n\n # measurement update\n cov_p, state_est = self._measurement_update(y_i,\n h_tilde,\n k_gain,\n cov_m,\n state_prop)\n\n # update the state lists\n self.residuals.append(y_i)\n self.prop_state_list.append(state_est)\n self.estimates.append(state_est)\n self.cov_list.append(cov_p)\n self.times.append(msr.time)", "def logging_loop(self, num_gpus):\n # Launch the test worker to get performance metrics\n self.test_worker = self_play.SelfPlay.options(\n num_cpus=0, num_gpus=num_gpus,\n ).remote(\n self.checkpoint,\n self.Game,\n self.config,\n self.config.seed + self.config.num_workers,\n )\n self.test_worker.continuous_self_play.remote(\n self.shared_storage_worker, None, True\n )\n\n # Write everything in TensorBoard\n writer = SummaryWriter(self.config.results_path)\n\n print(\n \"\\nTraining...\\nRun tensorboard --logdir ./results and go to http://localhost:6006/ to see in real time the training performance.\\n\"\n )\n\n # Save hyperparameters to TensorBoard\n hp_table = [\n f\"| {key} | {value} |\" for key, value in self.config.__dict__.items()\n ]\n writer.add_text(\n \"Hyperparameters\",\n \"| Parameter | Value |\\n|-------|-------|\\n\" + \"\\n\".join(hp_table),\n )\n # Save model representation\n writer.add_text(\n \"Model summary\", self.summary,\n )\n # Loop for updating the training performance\n counter = 0\n keys = [\n \"total_reward\",\n \"wormzero_reward\",\n \"opponent_reward\",\n \"episode_length\",\n \"mean_value\",\n \"training_step\",\n \"lr\",\n \"total_loss\",\n \"value_loss\",\n \"policy_loss\",\n \"num_played_games\",\n \"num_played_steps\",\n \"num_reanalysed_games\",\n ]\n info = ray.get(self.shared_storage_worker.get_info.remote(keys))\n try:\n while info[\"training_step\"] < self.config.training_steps:\n info = ray.get(self.shared_storage_worker.get_info.remote(keys))\n writer.add_scalar(\n \"1.Total_reward/1.Total_reward\", info[\"total_reward\"], counter,\n )\n writer.add_scalar(\n \"1.Total_reward/2.Mean_value\", info[\"mean_value\"], counter,\n )\n writer.add_scalar(\n \"1.Total_reward/3.Episode_length\", info[\"episode_length\"], counter,\n )\n writer.add_scalar(\n \"1.Total_reward/4.WormZero_reward\", info[\"wormzero_reward\"], counter,\n )\n writer.add_scalar(\n \"1.Total_reward/5.Opponent_reward\",\n info[\"opponent_reward\"],\n counter,\n )\n writer.add_scalar(\n \"2.Workers/1.Self_played_games\", info[\"num_played_games\"], counter,\n )\n writer.add_scalar(\n \"2.Workers/2.Training_steps\", info[\"training_step\"], counter\n )\n writer.add_scalar(\n \"2.Workers/3.Self_played_steps\", info[\"num_played_steps\"], counter\n )\n writer.add_scalar(\n \"2.Workers/4.Reanalysed_games\",\n info[\"num_reanalysed_games\"],\n counter,\n )\n writer.add_scalar(\n \"2.Workers/5.Training_steps_per_self_played_step_ratio\",\n info[\"training_step\"] / max(1, info[\"num_played_steps\"]),\n counter,\n )\n writer.add_scalar(\"2.Workers/6.Learning_rate\", info[\"lr\"], counter)\n writer.add_scalar(\n \"3.Loss/1.Total_weighted_loss\", info[\"total_loss\"], counter\n )\n writer.add_scalar(\"3.Loss/Value_loss\", info[\"value_loss\"], counter)\n writer.add_scalar(\"3.Loss/Policy_loss\", info[\"policy_loss\"], counter)\n print(\n f'Last test reward: {info[\"total_reward\"]:.2f}. Training step: {info[\"training_step\"]}/{self.config.training_steps}. Played games: {info[\"num_played_games\"]}. Loss: {info[\"total_loss\"]:.2f}',\n end=\"\\r\",\n )\n counter += 1\n time.sleep(0.5)\n except KeyboardInterrupt:\n pass\n\n self.terminate_workers()\n\n if self.config.save_model:\n # Persist replay buffer to disk\n print(\"\\n\\nPersisting replay buffer games to disk...\")\n pickle.dump(\n {\n \"buffer\": self.replay_buffer,\n \"num_played_games\": self.checkpoint[\"num_played_games\"],\n \"num_played_steps\": self.checkpoint[\"num_played_steps\"],\n \"num_reanalysed_games\": self.checkpoint[\"num_reanalysed_games\"],\n },\n open(os.path.join(self.config.results_path, \"replay_buffer.pkl\"), \"wb\"),\n )", "def on_iterate(self, data: Any = None):\n raise NotImplementedError", "def _run(self, **params):\n# if softEvidence is None:\n# self.softEvidence = self.mln.softEvidence\n# else:\n# self.softEvidence = softEvidence\n # initialize chains\n chains = MCMCInference.ChainGroup(self)\n for i in range(self.chains):\n chain = GibbsSampler.Chain(self, self.queries)\n chains.chain(chain)\n# if self.softEvidence is not None:\n# chain.setSoftEvidence(self.softEvidence)\n # do Gibbs sampling\n# if verbose and details: print \"sampling...\"\n converged = 0\n steps = 0\n if self.verbose:\n bar = ProgressBar(color='green', steps=self.maxsteps)\n while converged != self.chains and steps < self.maxsteps:\n converged = 0\n steps += 1\n print('STEP {} / {}'.format(steps, self.maxsteps))\n for chain in chains.chains:\n chain.step()\n if self.verbose:\n bar.inc()\n bar.label('%d / %d' % (steps, self.maxsteps))\n# if self.useConvergenceTest:\n# if chain.converged and numSteps >= minSteps:\n# converged += 1\n# if verbose and details:\n# if numSteps % infoInterval == 0:\n# print \"step %d (fraction converged: %.2f)\" % (numSteps, float(converged) / numChains)\n# if numSteps % resultsInterval == 0:\n# chainGroup.getResults()\n# chainGroup.printResults(shortOutput=True)\n # get the results\n return chains.results()[0]", "def run(self, iteration_key):\n record_provider = SqlDocumentProvider(iteration_key, self.case_accessor())\n logger = SQLBasedProgressLogger(iteration_key)\n processor = BulkDocProcessor(record_provider, self.doc_processor(self.domain),\n progress_logger=logger)\n processed, skipped = processor.run()\n return processed, skipped, logger.logs", "def __call__(self, stage, itr):\n if not self.logWeights:\n weightNames = list(stage.logWeights.keys())\n getLogger(__name__).info(\"Start collecting log weights. \"\n \"Found %s at trajectory %d.\", weightNames, itr)\n self.logWeights = {name: [] for name in weightNames}\n for key, val in stage.logWeights.items():\n self.logWeights[key].append(complex(val))", "def run_one_learning_iteration(\n current_beta, current_mu, batch_size, true_data_mean, max_k_updates,\n max_mu_updates, global_iteration, optimal_vals_logliks_mus,\n within_run_vals_logliks):\n count = 0\n while count < max_k_updates:\n count += 1\n updated_beta, value_delta = update_discriminator(\n current_beta, current_mu, batch_size, true_data_mean)\n current_beta = updated_beta\n\n count = 0\n while count < max_mu_updates:\n count += 1\n updated_mu, value_delta = update_generator(current_beta, current_mu,\n batch_size, true_data_mean,\n global_iteration)\n current_mu = updated_mu\n\n val_loglik = compute_val_loglik(current_beta, current_mu, true_data_mean,\n batch_size)\n val_loglik_mu = val_loglik + (current_mu,)\n optimal_vals_logliks_mus.append(val_loglik_mu)\n\n num_within_run_graphs = 30\n within_run_vals_logliks.append([\n compute_val_loglik(current_beta, m, true_data_mean, batch_size) for m\n in np.linspace(-3, true_data_mean + 3, num_within_run_graphs)])\n\n\n print 'UPDATING BETA...\\n beta PRE: {}'.format(current_beta)\n print ' beta POST: {}'.format(updated_beta)\n print 'UPDATING MU...\\n mu PRE: {}'.format(current_mu)\n print ' mu POST: {}'.format(updated_mu)\n return current_beta, current_mu, optimal_vals_logliks_mus, \\\n within_run_vals_logliks", "def run(self):\n import time\n\n\n for msr in self.msrs:\n # find state transition matrix\n phi_p, state_prop = self._compute_stm(msr.time)\n\n # use stm to propagate perturbation and covariance\n pert_m = np.matmul(phi_p, self.pert_vec[-1])\n cov_m = np.matmul(phi_p, np.matmul(self.cov_list[-1],\n np.transpose(phi_p)))\n\n\n # compute observation deviation, obs_state matrix\n y_i, h_tilde = self._msr_resid(msr, state_prop)\n\n # calculate kalman gain\n k_gain = self._calc_k_gain(cov_m, h_tilde, msr.cov)\n\n # measurement update\n cov_p, pert_p = self._measurement_update(y_i,\n h_tilde,\n pert_m,\n k_gain,\n cov_m)\n\n # update the state lists\n self.residuals.append(y_i)\n self.prop_state_list.append(state_prop)\n self.estimates.append(np.add(state_prop, np.transpose(pert_p))[0])\n self.cov_list.append(cov_p)\n self.pert_vec.append(pert_p)\n self.times.append(msr.time)", "def run_optimization(self):\n # Get batch\n (obs, action, old_logp, old_value, return_, advantage) = self.buffer.eject()\n\n # Train pi\n print(\"-\" * 20 + \"\\nPi Update\" + \"\\n\" + \"-\" * 20)\n (policy_loss, entropy,\n kl_divergence, clipping_fraction, steps) = self.update_actor(obs, action, old_logp, advantage)\n\n # Train value function\n print(\"-\" * 20 + \"\\nValue Function Update\" + \"\\n\" + \"-\" * 20)\n (value_loss,\n explained_variance) = self.update_critic(obs, old_value, return_)\n\n # Logging\n self.update_counter += 1\n self.log_update(policy_loss, entropy, kl_divergence, clipping_fraction,\n value_loss, explained_variance, steps)\n\n # Update learning rate\n self.decay_lr()\n\n # Save current weights (overwrites previous weights)\n self.save_weights()\n\n # Empty scenario counter\n self.scenario_counter = dict.fromkeys(self.scenario_counter, 0)", "def eval(self, algorithm: RLAlgorithm, step_metric_values: Dict[str, int]):\n with alf.summary.push_summary_writer(self._summary_writer):\n logging.info(\"Start evaluation\")\n metrics = evaluate(self._env, algorithm,\n self._config.num_eval_episodes)\n common.log_metrics(metrics)\n for metric in metrics:\n metric.gen_summaries(\n train_step=alf.summary.get_global_counter(),\n other_steps=step_metric_values)", "def iterate(self, datapoint, reset=False):\n val = self.compute_local_statistic(datapoint)\n return super().iterate(datapoint=val, reset=reset)", "def nextIter(self):\n\t\tpass", "def __next__(self) -> Union[None, Tuple[int, Dict[str, Any], Dict[str, Any]]]:\n self.epoch += 1\n self.iter_num += 1\n\n if self.iter_num > 1:\n\n # iterator exhaustion check\n if self.epoch > self.max_epoch:\n raise StopIteration\n\n # exit flag 1, when stop_fn succeeds in train_step or test_step\n if self.stop_fn_flag:\n raise StopIteration\n\n # set policy in train mode\n self.policy.train()\n\n epoch_stat: Dict[str, Any] = dict()\n\n if self.show_progress:\n progress = tqdm.tqdm\n else:\n progress = DummyTqdm\n\n # perform n step_per_epoch\n with progress(\n total=self.step_per_epoch, desc=f\"Epoch #{self.epoch}\", **tqdm_config\n ) as t:\n while t.n < t.total and not self.stop_fn_flag:\n data: Dict[str, Any] = dict()\n result: Dict[str, Any] = dict()\n if self.train_collector is not None:\n data, result, self.stop_fn_flag = self.train_step()\n t.update(result[\"n/st\"])\n if self.stop_fn_flag:\n t.set_postfix(**data)\n break\n else:\n assert self.buffer, \"No train_collector or buffer specified\"\n result[\"n/ep\"] = len(self.buffer)\n result[\"n/st\"] = int(self.gradient_step)\n t.update()\n\n self.policy_update_fn(data, result)\n t.set_postfix(**data)\n\n if t.n <= t.total and not self.stop_fn_flag:\n t.update()\n\n # for offline RL\n if self.train_collector is None:\n self.env_step = self.gradient_step * self.batch_size\n\n if not self.stop_fn_flag:\n self.logger.save_data(\n self.epoch, self.env_step, self.gradient_step, self.save_checkpoint_fn\n )\n # test\n if self.test_collector is not None:\n test_stat, self.stop_fn_flag = self.test_step()\n if not self.is_run:\n epoch_stat.update(test_stat)\n\n if not self.is_run:\n epoch_stat.update({k: v.get() for k, v in self.stat.items()})\n epoch_stat[\"gradient_step\"] = self.gradient_step\n epoch_stat.update(\n {\n \"env_step\": self.env_step,\n \"rew\": self.last_rew,\n \"len\": int(self.last_len),\n \"n/ep\": int(result[\"n/ep\"]),\n \"n/st\": int(result[\"n/st\"]),\n }\n )\n info = gather_info(\n self.start_time, self.train_collector, self.test_collector,\n self.best_reward, self.best_reward_std\n )\n return self.epoch, epoch_stat, info\n else:\n return None", "def _step(self):\n # Make a minibatch of training data\n num_train = self.X_train.shape[0]\n # random choose the samples\n batch_mask = np.random.choice(num_train, self.batch_size)\n X_batch = self.X_train[batch_mask]\n y_batch = self.y_train[batch_mask]\n\n # Compute loss and gradient\n loss, grads = self.model.loss(X_batch, y_batch)\n self.loss_history.append(loss)\n\n # Perform a parameter update\n for p, w in self.model.params.items():\n dw = grads[p]\n config = self.optim_configs[p]\n next_w, next_config = self.update_rule(w, dw, config)\n self.model.params[p] = next_w\n self.optim_configs[p] = next_config", "async def process_reports(self):\n features = [features for (__, features) in self.updates]\n\n # Faster way to deep flatten a list of lists compared to list comprehension\n feature_dataset = list(chain.from_iterable(features))\n\n # Training the model using all the features received from the client\n sampler = all_inclusive.Sampler(feature_dataset)\n self.algorithm.train(feature_dataset, sampler,\n Config().algorithm.cut_layer)\n\n # Test the updated model\n self.accuracy = self.trainer.test(self.testset)\n logging.info('[Server #{:d}] Global model accuracy: {:.2f}%\\n'.format(\n os.getpid(), 100 * self.accuracy))\n\n await self.wrap_up_processing_reports()", "def iterate(self):", "def _evaluate(self, training_state, val_iter, val_metric):\n val_iter.reset()\n val_metric.reset()\n\n for nbatch, eval_batch in enumerate(val_iter):\n self.module.forward(eval_batch, is_train=False)\n self.module.update_metric(val_metric, eval_batch.label)\n\n for name, val in val_metric.get_name_value():\n logger.info('Checkpoint [%d]\\tValidation-%s=%f', training_state.checkpoint, name, val)\n\n return self.training_monitor.eval_end_callback(training_state.checkpoint, val_metric)", "def __iter__(self):\n if not self.loading:\n self.reset_loading()\n self.current_batch_index = 0\n return self", "def process(self, accumulator: [FLContext], fl_ctx: FLContext):\n # The model data is in model.params as a dict.\n model = fl_ctx.get_model()\n vars_to_aggregate = [set(item.get_model().params) for item in accumulator]\n vars_to_aggregate = set.union(*vars_to_aggregate)\n\n for v_name in vars_to_aggregate:\n n_local_iters, np_vars = [], []\n for item in accumulator:\n data = item.get_model()\n if v_name not in data.params:\n continue # this item doesn't have the variable from client\n\n # contribution is a protobuf msg\n # it has `n_iter` which represents number of local iterations \n # used to compute this contribution \n acc = item.get_prop('_contribution')\n float_n_iter = np.float(acc.n_iter)\n n_local_iters.append(float_n_iter)\n\n # weighted using local iterations\n weighted_value = proto_to_ndarray(data.params[v_name]) * float_n_iter\n np_vars.append(weighted_value)\n if not n_local_iters:\n continue # didn't receive this variable from any clients\n new_val = np.sum(np_vars, axis=0) / np.sum(n_local_iters)\n new_val += proto_to_ndarray(model.params[v_name])\n\n # Update the params in model using CopyFrom because it is a ProtoBuf structure\n model.params[v_name].CopyFrom(ndarray_to_proto(new_val))\n return False", "def apply(self, simulation):\n t = simulation.time\n dt = simulation.timeStep\n if main_rank == 0:\n simulation.printState()\n # OpenCL update\n self.numMethod(self.gpu_field.gpu_data[self.component],\n self.color)\n self.window.widget.updateGL()\n if simulation.currentIteration > 1:\n self.window.label.setText(\n self.labelText + \"t={0:6.2f}, fps={1:6.2f}\".format(\n t + dt,\n 1. / (self.timer.f_timers.values()[0].t - self.ctime)))\n self.ctime = self.timer.f_timers.values()[0].t", "def thread_main(self, sess):\n for x_h, x_m, y in self.data_iterator():\n sess.run(self.enqueue_op, feed_dict={self.x_h:x_h, self.x_m:x_m})", "def single_step(self):\n # Make a minibatch of training data by choosing \"batch_size\" elements with replacement\n num_train = self.X_train.shape[0]\n batch_mask = np.random.choice(num_train, self.batch_size) # random choice with replacement\n X_batch = self.X_train[batch_mask]\n y_batch = self.y_train[batch_mask]\n\n # Compute loss and gradient\n loss, grads = self.model.loss(X_batch, y_batch)\n self.latest_loss = loss\n\n # Perform a parameter update based on chosen optimiser\n for p, w in self.model.params.items():\n dw = grads[p] # current gradients\n config = self.optim_configs[p] # moments of gradients and learning rate till previous accuracy() call\n next_w, next_config = optimiser_type(self.optim_type, w, dw, config) # sent to choice of optimising technique\n self.model.params[p] = next_w # model params updated\n self.optim_configs[p] = next_config # # moments of gradients updated", "def deepmd_single_process_continue_iter(deepmd_graph_dir: str,\n deepmd_data: Dict,\n iter_index: int,\n need_continue: bool):\n # Training and freezing the model\n deepmd_run(iter_index, deepmd_graph_dir, deepmd_data, need_continue)", "def go(self):\n if self.model_initializer is None:\n raise ValueError(\"Experiment guidelines must be set before starting optimization\")\n\n _reporter_params = dict(dict(do_maximize=self.do_maximize), **self.reporter_parameters)\n self.logger = OptimizationReporter([_.name for _ in self.dimensions], **_reporter_params)\n\n self.tested_keys = []\n self._set_hyperparameter_space()\n self._find_similar_experiments()\n\n loop_start_time = datetime.now()\n self._optimization_loop()\n loop_end_time = datetime.now()\n G.log_(f\"Optimization loop completed in {loop_end_time - loop_start_time}\")\n G.log_(f'Best score was {self.best_score} from Experiment \"{self.best_experiment}\"')\n self._clean_up_optimization()", "def run(self):\n print('PSO start running...')\n self.init_population()\n self.iterator()\n print(\"Iteration completed.\")\n self.plot_curve()\n print_params(self.GlobalBest_Pos, self.candidate, net=self.net)", "def intermediate(self, alg_mod, iter_count, obj_value,\n inf_pr, inf_du, mu, d_norm, regularization_size,\n alpha_du, alpha_pr, ls_trials):\n # TODO: Document the arguments\n pass", "def _process(self):\n while True:\n with Timer() as data_timer:\n frame = self._frames_q.get()\n\n with Timer() as agent_timer:\n s, frame_metadata = self._unwrap_frame(frame)\n s = np.expand_dims(s, 0) # batch\n act = self.pred(s)[0][0].argmax()\n put_overwrite(self._actions_q, self._wrap_action(act, frame_metadata))\n\n print('.', end='', flush=True)\n if self.verbose:\n print('Avg data wait time: %.3f' % data_timer.time())\n print('Avg agent neural net eval time: %.3f' % agent_timer.time())", "def thread_main(self, sess):\n for x_h, x_m, y in self.data_iterator():\n sess.run(self.enqueue_op, feed_dict={self.x_h:x_h, self.x_m:x_m, self.y:y})", "def __iter__(self):\n while True:\n results = self.poll()\n for x in results:\n yield x\n if not results:\n time.sleep(self.poll_delay)", "def run(self, obs_data, eps, step, n_samples, logger=sys.stdout, info=False, rng=np.random):\n\n ps = []\n n_accepted = 0\n cur_dist = None\n n_dim = self.cur_ps.size\n\n logger = open(os.devnull, 'w') if logger is None else logger\n\n for i in range(n_samples):\n\n prop_ps = self.cur_ps + step * rng.randn(n_dim)\n prop_data = self.sim_model(prop_ps, rng=rng)\n prop_dist = calc_dist(prop_data, obs_data)\n\n # acceptance / rejection step\n if prop_dist < eps:\n\n prop_log_prior = self.prior.eval(prop_ps, log=True)\n\n if rng.rand() < np.exp(prop_log_prior - self.cur_log_prior):\n\n self.cur_ps = prop_ps\n self.cur_log_prior = prop_log_prior\n cur_dist = prop_dist\n n_accepted += 1\n\n ps.append(self.cur_ps.copy())\n\n logger.write('iter = {0}, dist = {1:.3}, acc rate = {2:.2%}\\n'.format(i, cur_dist, float(n_accepted) / (i+1)))\n\n ps = np.array(ps)\n acc_rate = float(n_accepted) / n_samples\n\n if info:\n return ps, acc_rate\n else:\n return ps", "def epoch_logs( self, progress_bar, iteration:int, output: SimpleNamespace, prev_mechanism_weights: List[float], next_mechanism_weights: List[float] ):\n self_uid = self.metagraph.hotkeys.index( self.wallet.hotkey.public_key )\n stake = self.metagraph.S[ self_uid ].item()\n rank = self.metagraph.R[ self_uid ].item()\n incentive = self.metagraph.I[ self_uid ].item()\n info = {\n 'GS': colored('{}'.format(self.global_step), 'red'),\n 'LS': colored('{}'.format(iteration), 'blue'),\n 'Epoch': colored('{}'.format(self.epoch+1), 'green'),\n 'Loss': colored('{:.4f}'.format(self.epoch_loss), 'yellow'),\n 'Best': colored('{:.4f}'.format(self.best_epoch_loss), 'red'),\n 'L-loss': colored('{:.4f}'.format(output.local_target_loss.item()), 'blue'),\n 'R-loss': colored('{:.4f}'.format(output.remote_target_loss.item()), 'green'),\n 'D-loss': colored('{:.4f}'.format(output.distillation_loss.item()), 'yellow'),\n 'nPeers': colored(self.metagraph.n.item(), 'red'),\n 'Stake(\\u03C4)': colored('{:.3f}'.format(stake), 'green'),\n 'Rank(\\u03C4)': colored('{:.3f}'.format(rank), 'blue'),\n 'Incentive(\\u03C4/block)': colored('{:.6f}'.format(incentive), 'yellow'),\n }\n for uid in self.metagraph.uids.tolist():\n if next_mechanism_weights[uid] != 0:\n weight_dif = next_mechanism_weights[uid] - prev_mechanism_weights[uid]\n if weight_dif > 0:\n info[colored(str(uid), 'green')] = colored('{:.4f}'.format(next_mechanism_weights[uid]), 'green')\n elif weight_dif == 0:\n info[str(uid)] = colored('{:.4f}'.format(next_mechanism_weights[uid]), 'white')\n else:\n info[colored(str(uid), 'red')] = colored('{:.4f}'.format(next_mechanism_weights[uid]), 'red')\n\n progress_bar.set_infos( info )\n\n if self.config.neuron.use_tensorboard:\n self.tensorboard.add_scalar('R-loss', output.remote_target_loss.item(), self.global_step)\n self.tensorboard.add_scalar('L-loss', output.local_target_loss.item(), self.global_step)\n self.tensorboard.add_scalar('D-loss', output.distillation_loss.item(), self.global_step)", "def loop_nonThreaded():\n nonlocal index, total\n nonlocal d_tree\n nonlocal fn_inputReadCallback\n nonlocal fn_analysisCallback\n nonlocal fn_outputWriteCallback\n nonlocal dret_inputSet\n nonlocal dret_analyze\n nonlocal dret_outputSet\n nonlocal str_desc\n\n b_analyzeStatusHist: bool = False\n b_inputStatusHist: bool = False\n b_outputStatusHist: bool = False\n\n if int(self.verbosityLevel) and self.toConsole():\n iterator = tqdm( self.d_inputTree.items(),\n desc = str_desc)\n else:\n iterator = self.d_inputTree.items()\n\n for path, data in iterator:\n dret_inputSet = {}\n dret_analyze = {}\n dret_outputSet = {}\n # Read (is sometimes skipped) / Analyze / Write (also sometimes skipped)\n if fn_inputReadCallback:\n dret_inputSet = inputSet_read(path, data)\n try:\n b_inputStatusHist = b_inputStatusHist or dret_inputSet['status']\n except:\n pass\n if fn_analysisCallback:\n try:\n dret_analyze = analysis_do(path, d_tree[path], index)\n except:\n dret_analyze['status'] = False\n self.dp.qprint(\"Analysis failed\", comms = 'error')\n try:\n b_analyzeStatusHist = b_analyzeStatusHist or dret_analyze['status']\n except:\n pass\n if fn_outputWriteCallback:\n if 'status' in dret_analyze.keys():\n if dret_analyze['status']:\n dret_outputSet = outputSet_write(path, d_tree[path])\n try:\n b_outputStatusHist = b_outputStatusHist or dret_outputSet['status']\n except:\n pass\n index += 1\n dret_inputSet['status'] = b_inputStatusHist\n dret_analyze['status'] = b_analyzeStatusHist\n dret_outputSet['status'] = b_outputStatusHist\n tree_removeDeadBranches()", "def _train_loop(self, X, update_counter, context_mask):\n\n epoch = 0\n prev_activation = np.zeros((self.map_dim, self.data_dim))\n influences, learning_rates = self._param_update(0, len(update_counter))\n\n for idx, x in enumerate(progressbar(X)):\n\n prev_activation = self._example(x, influences, prev_activation=prev_activation)\n\n if idx in update_counter:\n\n epoch += 1\n influences, learning_rate = self._param_update(epoch, len(update_counter))\n\n if idx in context_mask:\n\n prev_activation = np.zeros((self.map_dim, self.data_dim))", "def __iter__(self):\n yield from self.calls", "def run_and_store(self):\n # Initialization assumptions\n z = self.draw_normal_initial()\n gradient = self.cv_gradient_initial(z)\n gradient[np.isnan(gradient)] = 0\n variance = np.power(gradient,2) \n final_parameters = self.current_parameters()\n final_samples = 1\n\n # Create optimizer\n if self.optimizer == 'ADAM':\n self.optim = ADAM(final_parameters, variance, self.learning_rate, 0.9, 0.999)\n elif self.optimizer == 'RMSProp':\n self.optim = RMSProp(final_parameters, variance, self.learning_rate, 0.99)\n\n # Stored updates\n stored_means = np.zeros((self.iterations,len(final_parameters)/2))\n stored_predictive_likelihood = np.zeros(self.iterations)\n\n # Record elbo\n if self.record_elbo is True:\n elbo_records = np.zeros(self.iterations)\n else:\n elbo_records = None\n\n for i in range(self.iterations):\n gradient = self.cv_gradient(self.draw_normal())\n gradient[np.isnan(gradient)] = 0\n new_parameters = self.optim.update(gradient)\n self.change_parameters(new_parameters)\n\n stored_means[i] = self.optim.parameters[::2]\n stored_predictive_likelihood[i] = self.neg_posterior(stored_means[i])\n\n if self.printer is True:\n self.print_progress(i,self.optim.parameters[::2])\n\n # Construct final parameters using final 10% of samples\n if i > self.iterations-round(self.iterations/10):\n final_samples += 1\n final_parameters = final_parameters+self.optim.parameters\n\n if self.record_elbo is True:\n elbo_records[i] = self.get_elbo(self.optim.parameters[::2])\n\n final_parameters = final_parameters/float(final_samples)\n self.change_parameters(final_parameters)\n final_means = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2==0])\n final_ses = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2!=0])\n\n if not self.quiet_progress:\n print(\"\")\n print(\"Final model ELBO is \" + str(-self.neg_posterior(final_means)-self.create_normal_logq(final_means)))\n return self.q, final_means, final_ses, stored_means, stored_predictive_likelihood, elbo_records", "def run_and_store(self):\n # Initialization assumptions\n z = self.draw_normal_initial()\n gradient = self.cv_gradient_initial(z)\n gradient[np.isnan(gradient)] = 0\n variance = np.power(gradient,2) \n final_parameters = self.current_parameters()\n final_samples = 1\n\n # Create optimizer\n if self.optimizer == 'ADAM':\n self.optim = ADAM(final_parameters, variance, self.learning_rate, 0.9, 0.999)\n elif self.optimizer == 'RMSProp':\n self.optim = RMSProp(final_parameters, variance, self.learning_rate, 0.99)\n\n # Stored updates\n stored_means = np.zeros((self.iterations,len(final_parameters)/2))\n stored_predictive_likelihood = np.zeros(self.iterations)\n\n # Record elbo\n if self.record_elbo is True:\n elbo_records = np.zeros(self.iterations)\n else:\n elbo_records = None\n\n for i in range(self.iterations):\n gradient = self.cv_gradient(self.draw_normal())\n gradient[np.isnan(gradient)] = 0\n new_parameters = self.optim.update(gradient)\n self.change_parameters(new_parameters)\n\n stored_means[i] = self.optim.parameters[::2]\n stored_predictive_likelihood[i] = self.neg_posterior(stored_means[i])\n\n if self.printer is True:\n self.print_progress(i,self.optim.parameters[::2])\n\n # Construct final parameters using final 10% of samples\n if i > self.iterations-round(self.iterations/10):\n final_samples += 1\n final_parameters = final_parameters+self.optim.parameters\n\n if self.record_elbo is True:\n elbo_records[i] = self.get_elbo(self.optim.parameters[::2])\n\n final_parameters = final_parameters/float(final_samples)\n self.change_parameters(final_parameters)\n final_means = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2==0])\n final_ses = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2!=0])\n\n if not self.quiet_progress:\n print(\"\")\n print(\"Final model ELBO is \" + str(-self.full_neg_posterior(final_means)-self.create_normal_logq(final_means)))\n return self.q, final_means, final_ses, stored_means, stored_predictive_likelihood, elbo_records", "def run(self, verbose=True, max_total_iterations=50000):\n self.verbose = verbose\n\n # Upper bounds on number of evaluations\n self.max_total_iterations = max_total_iterations\n\n self.initialise_mean_and_count()\n self.directed_edges = []\n self.active_strategy_profiles = []\n self.initialise_queue()\n\n # Forced initial exploration\n self.forced_exploration()\n\n # Keep evaluating nodes until check method declares that we're finished\n iterations = 0\n edges_resolved_this_round = []\n while self.total_interactions < max_total_iterations:\n # Add nodes to queue\n self.add_to_queue(removed=edges_resolved_this_round)\n\n # Evaluate the nodes and log results\n for v, _ in self.evaluate_strategy_profile():\n if verbose:\n print(v)\n\n # Recompute confidence bounds, eliminate, stop etc.\n edges_resolved_this_round = self.check_confidence()\n\n if not self.edges_remaining:\n break\n iterations += 1\n\n # Fill in missing edges if max iters reached without resolving all edges\n self.compute_graph()\n\n # Compute objects to be returned\n if verbose:\n total_steps = self.compute_total_steps()\n print('\\nTotal steps taken = {}'.format(total_steps))\n results = {}\n results['interactions'] = int(np.sum(self.count[0]))\n graph = self._construct_digraph(self.directed_edges)\n results['graph'] = graph\n return results", "def _on_step(self) -> None:\n self._n_calls += 1\n # Account for multiple environments\n # each call to step() corresponds to n_envs transitions\n if self._n_calls % max(self.target_update_interval // self.n_envs, 1) == 0:\n polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau)\n # Copy running stats, see GH issue #996\n polyak_update(self.batch_norm_stats, self.batch_norm_stats_target, 1.0)\n\n self.exploration_rate = self.exploration_schedule(self._current_progress_remaining)\n self.logger.record(\"rollout/exploration_rate\", self.exploration_rate)", "def batch_and_learn(i, lock=threading.Lock()):\n nonlocal step, stats\n timings = prof.Timings()\n while step < flags.total_steps:\n timings.reset()\n batch, agent_state = get_batch(\n flags,\n free_queue,\n full_queue,\n buffers,\n initial_agent_state_buffers,\n timings,\n )\n stats = learn(\n flags, model, learner_model, batch, agent_state, optimizer, scheduler\n )\n timings.time(\"learn\")\n with lock:\n to_log = dict(step=step)\n to_log.update({k: stats[k] for k in stat_keys})\n plogger.log(to_log)\n step += T * B\n\n if i == 0:\n logging.info(\"Batch and learn: %s\", timings.summary())", "def iterator(self, *args, **kwargs):\n kwargs['logger'] = self\n return MonitorIterator(*args, **kwargs)", "def update_learning_rate(self, it):\n self.scheduler.step()\n for param_group in self.optimizer.param_groups:\n v = param_group['lr']\n self.tb_logger.add_scalar('train/lr', v, it)", "def __call__(self):\n if self.numbatches is None:\n pool = self.pooler()\n if self.batchsize is None:\n self.batchsize = self.pooler.nInPool()\n self.numbatches = self.pooler.nInPool()//self.batchsize\n for i in xrange(self.numbatches):\n pool = self.pooler()\n self._reset_batch()\n if self.samplemethod == 'balance' and len(self.keysamplers)>0:\n batchinds,keyids = self._samplebalanced(pool)\n elif self.samplemethod == 'uniform':\n batchinds,keyids = self._sampleuniform(pool)\n else:\n batchinds,keyids = self._samplesequential(i)\n batch = self._extractInds(pool,batchinds,keyids)\n for k in batch:\n batch[k][np.isnan(batch[k])] = self.nanreplacement\n yield batch", "def iteration(self):\n T = self.generate_T()\n R = self.reproduce(T)\n self.P = self.choose_mi_best(R)\n #print(self.P)", "def run(self):\n self._stats.reset_results()\n for row in self._rows:\n self._process_row(row)\n self.__worker.wait()\n self._handle_if_errors()\n return self._stats._results", "def batch_and_learn(i, lock=threading.Lock()):\n nonlocal step, stats\n timings = prof.Timings()\n while step < flags.total_steps:\n timings.reset()\n batch, agent_state = get_batch(\n flags,\n free_queue,\n full_queue,\n buffers,\n initial_agent_state_buffers,\n timings,\n )\n stats = learn(\n flags, model, learner_model, batch, agent_state, optimizer, scheduler\n )\n timings.time(\"learn\")\n with lock:\n to_log = dict(step=step)\n to_log.update({k: stats[k] for k in stat_keys})\n plogger.log(to_log)\n step += T #* B # just count the parallel steps \n # end batch_and_learn\n \n if i == 0:\n logging.info(\"Batch and learn: %s\", timings.summary())", "def train(self, current_hyper_params):\n train_loss = 0\n train_n_iter = 0\n # Set model to train mode\n self.model.train()\n # Iterate over train data\n print(\"Iterating over training data...\")\n for i, batch in enumerate(tqdm(self.train_loader)):\n loss = self._train_batch(batch)\n # Statistics\n train_loss += loss.item()\n train_n_iter += 1\n self.stats.train_loss_history.append(train_loss / train_n_iter)", "def __call__(self, y, *args, **kwargs):\n\t\tself.ylast = y.copy()\n\n\t\tfor k in range(self.params['maxit']):\n\t\t\ty = self.F(y,*args,**kwargs)\n\t\t\tif self.stopit(k, y, self.ylast, self) and k >= self.params['minit']:\n\t\t\t\tbreak\n\t\t\tnp.copyto(self.ylast,y)\n\t\telse:\n\t\t\tException(\"No convergence after %i iterations.\"%k)\n\n\t\tself.stats['niterations'] += k\n\t\tself.stats['ncalls'] += 1\n\t\tself.stats['niterations/call'] = self.stats['niterations']/self.stats['ncalls']\n\t\treturn y", "def algorithm_loop(self):", "def run(self):\n\n # Initialization assumptions\n z = self.draw_normal_initial()\n gradient = self.cv_gradient_initial(z)\n gradient[np.isnan(gradient)] = 0\n variance = np.power(gradient, 2) \n final_parameters = self.current_parameters()\n final_samples = 1\n\n # Create optimizer\n if self.optimizer == 'ADAM':\n self.optim = ADAM(final_parameters, variance, self.learning_rate, 0.9, 0.999)\n elif self.optimizer == 'RMSProp':\n self.optim = RMSProp(final_parameters, variance, self.learning_rate, 0.99)\n\n # Record elbo\n if self.record_elbo is True:\n elbo_records = np.zeros(self.iterations)\n else:\n elbo_records = None\n\n for i in range(self.iterations):\n x = self.draw_normal()\n gradient = self.cv_gradient(x)\n gradient[np.isnan(gradient)] = 0\n self.change_parameters(self.optim.update(gradient))\n\n if self.printer is True:\n self.print_progress(i, self.optim.parameters[::2])\n\n # Construct final parameters using final 10% of samples\n if i > self.iterations-round(self.iterations/10):\n final_samples += 1\n final_parameters = final_parameters+self.optim.parameters\n\n if self.record_elbo is True:\n elbo_records[i] = self.get_elbo(self.optim.parameters[::2])\n\n final_parameters = final_parameters/float(final_samples)\n self.change_parameters(final_parameters)\n final_means = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2==0])\n final_ses = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2!=0])\n if not self.quiet_progress:\n print(\"\")\n print(\"Final model ELBO is \" + str(-self.neg_posterior(final_means)-self.create_normal_logq(final_means)))\n return self.q, final_means, final_ses, elbo_records", "def _batch_iter(self, source, target, i: int):\n # send data to device\n source = source.to(self.device)\n target = target.to(self.device)\n\n # the result and loss\n result = self.model(source)\n loss = self.criterion(result, target)\n\n # optimization and backward\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n # update the loss\n self.epoch_loss.update(loss.item(), source.size(0))\n\n # print the information\n if self.info:\n print(f\"\\rEpoch: { self.epoch } | Batch: { i } | loss: { self.epoch_loss.avg }\", end=\"\")\n\n # clean the data\n del source, target\n\n return result", "def run(self):\n\n # Initialization assumptions\n z = self.draw_normal_initial()\n gradient = self.cv_gradient_initial(z)\n gradient[np.isnan(gradient)] = 0\n variance = np.power(gradient, 2) \n final_parameters = self.current_parameters()\n final_samples = 1\n\n # Create optimizer\n if self.optimizer == 'ADAM':\n self.optim = ADAM(final_parameters, variance, self.learning_rate, 0.9, 0.999)\n elif self.optimizer == 'RMSProp':\n self.optim = RMSProp(final_parameters, variance, self.learning_rate, 0.99)\n\n # Record elbo\n if self.record_elbo is True:\n elbo_records = np.zeros(self.iterations)\n else:\n elbo_records = None\n\n for i in range(self.iterations):\n x = self.draw_normal()\n gradient = self.cv_gradient(x)\n gradient[np.isnan(gradient)] = 0\n self.change_parameters(self.optim.update(gradient))\n\n if self.printer is True:\n self.print_progress(i, self.optim.parameters[::2])\n\n # Construct final parameters using final 10% of samples\n if i > self.iterations-round(self.iterations/10):\n final_samples += 1\n final_parameters = final_parameters+self.optim.parameters\n\n if self.record_elbo is True:\n elbo_records[i] = self.get_elbo(self.optim.parameters[::2])\n\n final_parameters = final_parameters/float(final_samples)\n self.change_parameters(final_parameters)\n final_means = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2==0])\n final_ses = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2!=0])\n if not self.quiet_progress:\n print(\"\")\n print(\"Final model ELBO is \" + str(-self.full_neg_posterior(final_means)-self.create_normal_logq(final_means)))\n return self.q, final_means, final_ses, elbo_records", "def step(self):\r\n self.datacollector.collect(self)\r\n self.datacollector2.collect(self)\r\n self.datacollector3.collect(self)\r\n self.datacollector4.collect(self)\r\n self.datacollector5.collect(self)\r\n self.datacollector6.collect(self)\r\n self.datacollector7.collect(self)\r\n self.datacollector8.collect(self)\r\n self.datacollector9.collect(self)\r\n self.datacollector10.collect(self)\r\n self.datacollector11.collect(self)\r\n self.datacollector12.collect(self)\r\n self.datacollector13.collect(self)\r\n\r\n self.datacollector14.collect(self)\r\n self.datacollector15.collect(self)\r\n self.datacollector16.collect(self)\r\n self.datacollector17.collect(self)\r\n self.datacollector18.collect(self)\r\n self.datacollector19.collect(self)\r\n self.datacollector20.collect(self)\r\n self.datacollector21.collect(self)\r\n self.datacollector22.collect(self)\r\n self.datacollector23.collect(self)\r\n self.datacollector24.collect(self)\r\n self.datacollector25.collect(self)\r\n self.datacollector26.collect(self)\r\n self.schedule.step()", "def run(self, context):\n self.start()\n while not context.is_finished():\n self.update_predictions(context)\n new_results, instances = self.evaluate()\n if new_results:\n self.write_results(new_results, instances)\n self.update_plot(new_results, instances)\n\n if self.plot_file:\n self.visualizer.savefig(self.plot_file)\n if self.show_plot:\n self.visualizer.show()\n self.end()", "def _fit(self,\n train_iter: data_io.ParallelBucketSentenceIter,\n val_iter: data_io.ParallelBucketSentenceIter,\n output_folder: str,\n max_params_files_to_keep: int,\n metrics: List[AnyStr],\n max_updates: int,\n checkpoint_frequency: int,\n max_num_not_improved: int,\n min_num_epochs: Optional[int] = None,\n mxmonitor: Optional[mx.monitor.Monitor] = None):\n metric_train = self._create_eval_metric(metrics)\n metric_val = self._create_eval_metric(metrics)\n\n tic = time.time()\n\n training_state_dir = os.path.join(output_folder, C.TRAINING_STATE_DIRNAME)\n if os.path.exists(training_state_dir):\n train_state = self.load_checkpoint(training_state_dir, train_iter)\n else:\n train_state = _TrainingState(\n num_not_improved=0,\n epoch=0,\n checkpoint=0,\n updates=0,\n samples=0\n )\n\n next_data_batch = train_iter.next()\n\n while max_updates == -1 or train_state.updates < max_updates:\n\n # <EcoSys> Added the profiler start and end point.\n import numba.cuda as cuda\n\n if train_state.updates == 501:\n cuda.profile_start()\n if train_state.updates == 511:\n cuda.profile_stop()\n # </EcoSys>\n\n if not train_iter.iter_next():\n train_state.epoch += 1\n train_iter.reset()\n\n # process batch\n batch = next_data_batch\n\n if mxmonitor is not None:\n mxmonitor.tic()\n\n self.module.forward_backward(batch)\n self.module.update()\n\n if mxmonitor is not None:\n results = mxmonitor.toc()\n if results:\n for _, k, v in results:\n logger.info('Monitor: Batch [{:d}] {:s} {:s}'.format(train_state.updates, k, v))\n\n if train_iter.iter_next():\n # pre-fetch next batch\n next_data_batch = train_iter.next()\n self.module.prepare(next_data_batch)\n\n self.module.update_metric(metric_train, batch.label)\n\n self.training_monitor.batch_end_callback(train_state.epoch, train_state.updates, metric_train)\n train_state.updates += 1\n train_state.samples += train_iter.batch_size\n\n if train_state.updates > 0 and train_state.updates % checkpoint_frequency == 0:\n train_state.checkpoint += 1\n self._save_params(output_folder, train_state.checkpoint)\n cleanup_params_files(output_folder, max_params_files_to_keep,\n train_state.checkpoint, self.training_monitor.get_best_checkpoint())\n self.training_monitor.checkpoint_callback(train_state.checkpoint, metric_train)\n\n toc = time.time()\n logger.info(\"Checkpoint [%d]\\tUpdates=%d Epoch=%d Samples=%d Time-cost=%.3f\",\n train_state.checkpoint, train_state.updates, train_state.epoch,\n train_state.samples, (toc - tic))\n tic = time.time()\n\n for name, val in metric_train.get_name_value():\n logger.info('Checkpoint [%d]\\tTrain-%s=%f', train_state.checkpoint, name, val)\n metric_train.reset()\n\n # evaluation on validation set\n has_improved, best_checkpoint = self._evaluate(train_state, val_iter, metric_val)\n if self.lr_scheduler is not None:\n self.lr_scheduler.new_evaluation_result(has_improved)\n\n if has_improved:\n best_path = os.path.join(output_folder, C.PARAMS_BEST_NAME)\n if os.path.lexists(best_path):\n os.remove(best_path)\n actual_best_fname = C.PARAMS_NAME % best_checkpoint\n os.symlink(actual_best_fname, best_path)\n train_state.num_not_improved = 0\n else:\n train_state.num_not_improved += 1\n logger.info(\"Model has not improved for %d checkpoints\", train_state.num_not_improved)\n\n if max_num_not_improved >= 0 and train_state.num_not_improved >= max_num_not_improved:\n logger.info(\"Maximum number of not improved checkpoints (%d) reached: %d\",\n max_num_not_improved, train_state.num_not_improved)\n stop_fit = True\n\n if min_num_epochs is not None and train_state.epoch < min_num_epochs:\n logger.info(\"Minimum number of epochs (%d) not reached yet: %d\",\n min_num_epochs,\n train_state.epoch)\n stop_fit = False\n\n if stop_fit:\n logger.info(\"Stopping fit\")\n self.training_monitor.stop_fit_callback()\n final_training_state_dirname = os.path.join(output_folder, C.TRAINING_STATE_DIRNAME)\n if os.path.exists(final_training_state_dirname):\n shutil.rmtree(final_training_state_dirname)\n break\n\n self._checkpoint(train_state, output_folder, train_iter)\n cleanup_params_files(output_folder, max_params_files_to_keep,\n train_state.checkpoint, self.training_monitor.get_best_checkpoint())", "def __iter__(self):\n index = len(self._logrecords)\n while index > 0:\n index -= 1\n yield self._logrecords[index]", "def run(self, num_iter: Union[int, None] =None) -> np.ndarray:\n # update iterations\n if num_iter is not None:\n self.num_iter = num_iter\n\n # run once to get the output shape\n out = np.asarray(self.func(**self.get_next_params(), **self._kwargs))\n\n # output container\n result = np.ones((self.num_iter, out.shape[0])) * np.nan\n\n # build input parameter\n params = (({**self.get_next_params(), **self._kwargs},i) for i in range(self.num_iter))\n\n # build the wrapper \n def wrap(tup):\n # unwrap the tuple\n par, i = tup\n \n # save result\n result[i] = self.func(**par)\n\n # create the parallel worker\n parallel_worker = Parallel(n_jobs=self.n_jobs, require='sharedmem')\n\n # create verbose or non verbose parameter generator\n if self._kwargs.get('verbose', False):\n func_gen = (delayed(wrap)(param) for param in params)\n else:\n func_gen = (delayed(wrap)(param) for param in tqdm(params, total=self.num_iter))\n \n # run the job in parallel\n parallel_worker(func_gen)\n\n # finished - return the result\n return result", "def step(self, estim: EstimBase) -> None:\n _, results = estim.get_last_results()\n results = [self.to_metrics(res) for res in results]\n self.metrics.extend(results)\n if len(self.metrics) >= len(self.population):\n self.population = self._mating(self.population)\n self.metrics = []", "def run(self):\n i = 0\n try:\n for i in range(0, self._iters):\n if self._verbose:\n print(\" Inner CG Iteration \" + repr(i))\n\n self._forward(self._p_k, self._v_k)\n sigma_k = measure(self._p_k, self._v_k)\n alpha_k = self._rho_k / sigma_k\n update_m(self._m, alpha_k, self._p_k)\n sub_scaled_vector(self._residual_k, self._residual_k, alpha_k,\n self._v_k)\n self._v_k = gpuarray_copy(self._residual_k)\n rho_k_plus_1 = measure(self._v_k, self._residual_k)\n rho_k_t = np.abs(rho_k_plus_1)\n\n if (rho_k_t / self._rho_0 <= self._relative_tolerance) \\\n or (rho_k_t <= self._absolute_tolerance):\n if self._verbose:\n print(\"Converged at Iteration \" + str(i) + \".\")\n self.converged = True\n self.iteration = i+1\n return\n\n add_scaled_vector(self._p_k, self._v_k,\n rho_k_plus_1/self._rho_k,\n self._p_k)\n self._rho_k = rho_k_plus_1\n\n if self._verbose >= 3:\n print(\" Residual=\" + repr(rho_k_t))\n except KeyboardInterrupt:\n raise\n finally:\n self.iteration = i+1", "def iter_epoch(self):\n\n # set to train mode\n self._set_train()\n\n # start epoch\n for i, (source, target) in enumerate(self.train_dataset):\n self._batch_iter(source, target, i)\n\n if self.info:\n print(f\"\\rEpoch: { self.epoch } | Average loss: { self.epoch_loss.avg }\")\n\n # update epoch and reset the epoch_loss\n self.epoch_loss.reset()\n self.epoch += 1", "def after_train_iter(self,\n runner,\n batch_idx: int,\n data_batch: DATA_BATCH = None,\n outputs: Optional[dict] = None) -> None:\n # Print experiment name every n iterations.\n if self.every_n_train_iters(\n runner, self.interval_exp_name) or (self.end_of_epoch(\n runner.train_dataloader, batch_idx)):\n exp_info = f'Exp name: {runner.experiment_name}'\n runner.logger.info(exp_info)\n if self.every_n_inner_iters(batch_idx, self.interval):\n tag, log_str = runner.log_processor.get_log_after_iter(\n runner, batch_idx, 'train')\n elif (self.end_of_epoch(runner.train_dataloader, batch_idx)\n and (not self.ignore_last\n or len(runner.train_dataloader) <= self.interval)):\n # `runner.max_iters` may not be divisible by `self.interval`. if\n # `self.ignore_last==True`, the log of remaining iterations will\n # be recorded (Epoch [4][1000/1007], the logs of 998-1007\n # iterations will be recorded).\n tag, log_str = runner.log_processor.get_log_after_iter(\n runner, batch_idx, 'train')\n else:\n return\n runner.logger.info(log_str)\n runner.visualizer.add_scalars(\n tag, step=runner.iter + 1, file_path=self.json_log_path)" ]
[ "0.67205477", "0.6347677", "0.634362", "0.61698365", "0.61429197", "0.6141512", "0.60966563", "0.599339", "0.5989898", "0.59228843", "0.59223664", "0.5900693", "0.5892551", "0.58662176", "0.58467567", "0.584372", "0.582183", "0.58217317", "0.58217317", "0.5800747", "0.5751619", "0.57142967", "0.56881905", "0.56487346", "0.5622366", "0.5618461", "0.5608925", "0.5597325", "0.55813277", "0.5576631", "0.5558902", "0.55454737", "0.5533819", "0.55317396", "0.5528168", "0.55112815", "0.5511232", "0.5506743", "0.55048734", "0.5502616", "0.5493754", "0.5489712", "0.5488822", "0.5488179", "0.5478177", "0.54706573", "0.54671204", "0.54570675", "0.5456217", "0.5453276", "0.54389894", "0.5436288", "0.5424637", "0.5419588", "0.541848", "0.5416497", "0.5416142", "0.5413724", "0.5411625", "0.5402948", "0.53970325", "0.5396287", "0.53938156", "0.5390968", "0.5388772", "0.5384174", "0.5374141", "0.5370007", "0.53694546", "0.5362715", "0.5361494", "0.53478336", "0.53444016", "0.53345597", "0.5325954", "0.53240687", "0.53240687", "0.5309258", "0.52990717", "0.5294978", "0.52925766", "0.5289717", "0.52856946", "0.52817446", "0.5278964", "0.5269137", "0.5253607", "0.5244458", "0.52414775", "0.52402353", "0.5238802", "0.5238032", "0.5235822", "0.5234841", "0.5234079", "0.5233999", "0.5225841", "0.52242386", "0.52235025", "0.5223041", "0.5218089" ]
0.0
-1
Samples observations from several samplers.
def sample(self) -> Tuple[List[List[StepSequence]], np.array, np.array]: ros = [] rets = [] all_lengths = [] for sampler in self.samplers: samples = sampler.sample() ros.append(samples) rets.extend([sample.undiscounted_return() for sample in samples]) all_lengths.extend([sample.length for sample in samples]) return ros, np.array(rets), np.array(all_lengths)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def samples(self):\n pass", "def load_samplers(self):\n for sampler in self.gltf.samplers:\n # Use a sane default sampler if the sampler data is empty\n # Samplers can simply just be json data: \"{}\"\n if sampler.minFilter is sampler.magFilter is None:\n self.samplers.append(\n self.ctx.sampler(\n filter=(moderngl.LINEAR_MIPMAP_LINEAR, moderngl.LINEAR),\n repeat_x=False,\n repeat_y=False,\n anisotropy=16.0,\n )\n )\n else:\n self.samplers.append(\n self.ctx.sampler(\n filter=(sampler.minFilter, sampler.magFilter),\n repeat_x=sampler.wrapS in [REPEAT, MIRRORED_REPEAT],\n repeat_y=sampler.wrapT in [REPEAT, MIRRORED_REPEAT],\n anisotropy=16.0,\n )\n )", "def draw_samplers(self):\n raise NotImplementedError(\" The draw_samplers() method has not been implemented \")", "def set_samplers(self, samplers):\n self._derived_properties[\"samplers\"] = samplers\n self.nsamplers = len(self.samplers)", "def sample(self, num_samples, **kwargs):\n pass", "def build_sampler(self, params):\n\n def sampler(logits_1, logits_2, key):\n q_kwargs = dict(transpose=True) if self.use_transpose else {}\n x = self.model.bind(params).sample(logits_1, key)\n y = self.model.bind(params).sample(logits_2, key, **q_kwargs)\n return jnp.zeros([10, 10]).at[x, y].set(1.)\n\n return sampler", "def sampler(self, *args, **kwargs):\n\n return (samples_subgraphs ** 2).tolist()", "def create_samples(self, skills_sample_fraction=1.0, users_sample_fraction=1.0):\n # Sampling\n self.sample_skills_to_be_covered(skills_sample_fraction)\n self.sample_users(users_sample_fraction)", "def __call__(self, samples_number):\n self.sampler.sample(samples_number)", "def run(self, repetitions, **kwargs):\n\t\tself.sampler.sample(repetitions, **kwargs)", "def _sampler(self, value, sample_args):\n valid = []\n for filter_, sampler in _FILTERS_AND_SAMPLERS:\n if filter_(value):\n valid.append(sampler)\n if not valid:\n raise ValueError('No valid samplers found: value={} sample_args={}'\n .format(value, sample_args))\n return random.choice(valid)", "def get_samplers(test, count):\n samplers = []\n for i in range(count):\n create = require(test.next_call_of(\"vkCreateSampler\"))\n require_equal(VK_SUCCESS, int(create.return_val))\n require_not_equal(0, create.int_device)\n require_not_equal(0, create.hex_pSampler)\n sampler = little_endian_bytes_to_int(require(create.get_write_data(\n create.hex_pSampler, NON_DISPATCHABLE_HANDLE_SIZE)))\n require_not_equal(0, sampler)\n samplers.append(sampler)\n return samplers", "def generate_samples(self, n_samples):", "def generate_samples(self, n_samples):", "def samples(self, gp, Y_metadata=None, samples=1):\n raise NotImplementedError(\"\"\"May be possible to use MCMC with user-tuning, see\n MCMC_pdf_samples in likelihood.py and write samples function\n using this, beware this is a simple implementation\n of Metropolis and will not work well for all likelihoods\"\"\")", "def sample(self,p0=None,nsamp=None): \r\n raise NotImplementedError('Need to implement sample function')", "def samples(self, gp):\r\n raise NotImplementedError", "def _generate(self, **kwargs):\n N = self.parameter_schema['N']\n parameter_count = len(self._parameter_names)\n common_override_kwargs = {}\n override_kwargs = self._sampler_overrides(common_override_kwargs)\n if kwargs:\n kwargs.update(override_kwargs)\n else:\n kwargs = override_kwargs\n __import__(\"SALib.sample\", fromlist=[self.sampler_class])\n sampler = getattr(SALib.sample, self.sampler_class)\n problem = self.parameter_schema[\"problem\"]\n self._samples = sampler.sample(problem, N, **kwargs)\n self._samples = numpy.unique(self._samples, axis=0)\n super()._generate()", "def run_model_sampler(Y, latent_dim, n_iter):\n\tF_sample = []\n\tloading_sample = []\n\tvariance_sample = []\n\ttrace_sample = []\n\tmse_history = []\n\tF = initiate_factors(Y, latent_dim)\n\tfor i in tqdm(range(n_iter)):\n\t\tF, loading_matrix, Y_variance, gp_traces, mse = gibbs_sampling(F, Y)\n\t\tF_sample.append(F)\n\t\tloading_sample.append(loading_matrix)\n\t\tvariance_sample.append(Y_variance)\n\t\ttrace_sample.append(gp_traces)\n\t\tmse_history.append(mse)\n\treturn F_sample, loading_sample, variance_sample, trace_sample, mse_history", "def samples(self, samples):\n\n self._samples = samples", "def set_samplers(self):\n\n if self.length_scale_indexes is None:\n ignore_index = None\n if not self.noise or self.data.get('var_noise') is not None:\n ignore_index = [0, 1]\n\n slice_parameters = {\n 'max_steps_out': self.max_steps_out,\n 'component_wise': True,\n }\n self.slice_samplers.append(SliceSampling(\n wrapper_log_prob, range(self.dimension_parameters), ignore_index=ignore_index,\n **slice_parameters))\n else:\n slice_parameters = {\n 'max_steps_out': self.max_steps_out,\n 'component_wise': False,\n }\n indexes = [i for i in range(self.dimension_parameters) if i not in\n self.length_scale_indexes]\n ignore_index = None\n if not self.noise or self.data.get('var_noise') is not None:\n ignore_index = [0, 1]\n\n if ORNSTEIN_KERNEL in self.type_kernel:\n if ignore_index is None:\n ignore_index = []\n ignore_index += [2]\n\n if len(indexes) != len(ignore_index):\n self.slice_samplers.append(\n SliceSampling(\n wrapper_log_prob, indexes, ignore_index=ignore_index, **slice_parameters))\n\n slice_parameters['component_wise'] = True\n self.slice_samplers.append(SliceSampling(wrapper_log_prob, self.length_scale_indexes,\n **slice_parameters))\n\n if self.start_point_sampler is not None and len(self.start_point_sampler) > 0:\n if len(self.samples_parameters) == 0:\n self.samples_parameters.append(np.array(self.start_point_sampler))\n else:\n self.samples_parameters = []\n self.samples_parameters.append(self.get_value_parameters_model)\n if self.n_burning > 0:\n parameters = self.sample_parameters(float(self.n_burning) / (self.thinning + 1))\n self.samples_parameters = []\n self.samples_parameters.append(parameters[-1])\n self.start_point_sampler = parameters[-1]\n else:\n self.start_point_sampler = self.get_value_parameters_model", "def sample_streams(self, sample_streams):\n\n self._sample_streams = sample_streams", "def sample(self):\n\n # pick sample type according to probability\n samplers = [\"unif\", \"geo\", \"diverse\"]\n sample_idx = np.random.multinomial(\n 1, [self.unif_prob, self.geo_prob, self.diverse_prob])\n idx = np.argmax(sample_idx)\n sampler = samplers[idx]\n\n if sampler == \"unif\":\n return self.unif_sampler()\n if sampler == \"geo\":\n return self.geo_sampler()\n if sampler == \"diverse\":\n return self.diverse_sampler()", "def sample(self):\r\n raise NotImplementedError", "def initSamples(self):\n # Define this (note random.uniform is helpful here!)\n for i in range(self.numParticles):\n # Create particles uniformly and generate same weights for all particles.\n particle = random.uniform(self.minValue, self.maxValue)\n self.samples.append(particle)\n self.weights.append(1/self.numParticles)", "def samples_set(self):\n self.get_samples_set(self.samples_db)\n self.choose_samples(self.chosen_samples_db, self.chosen_hashes)", "def sample(self, *args, **kwargs):", "def create_samples(self):\n for s_id in range(len(self.data[\"sample\"])):\n self.samples.add(Sample(s_id, [self.data[key][s_id] for key in self.data.keys() if key not in WRONG_KEYS],\n self.data[\"label\"][s_id]))", "def samples(self):\n if not self._samples:\n self._samples = defaultdict(list)\n blobs = self.blobs()\n sequencing = self._get_entities('sequencing')\n for s in self._get_entities('sample'):\n s = sample_factory(s, workspace=self, blobs=blobs, sequencing=sequencing, avro_path=self.avro_path)\n self._samples[s.subject_id].append(s)\n if s.missing_sequence:\n self.missing_sequence = s.missing_sequence\n return self._samples", "def create_samples(self):\n self._samples = self.load_samples()\n self.modify_samples()", "def initialise_sampler(self):\n raise NotImplementedError", "def test_validation_correct_samplers():\n samplers = [\n {'type': 'MultiStateSampler', 'locality': 3},\n {'type': 'ReplicaExchangeSampler'},\n # MCMCMove 'single' is defined in get_template_script().\n {'type': 'SAMSSampler', 'mcmc_moves': 'single'},\n {'type': 'ReplicaExchangeSampler', 'number_of_iterations': 5, 'replica_mixing_scheme': 'swap-neighbors'},\n {'type': 'ReplicaExchangeSampler', 'number_of_iterations': 5, 'replica_mixing_scheme': None}\n ]\n exp_builder = ExperimentBuilder(get_template_script())\n for sampler in samplers:\n script = {'samplers': {'sampler1': sampler}}\n yield exp_builder._validate_samplers, script", "def sample(self):\n raise NotImplementedError", "def sample(self):\n raise NotImplementedError", "def samples(self, groups=None):\n groups = check_parameters_for_validity(\n groups, \"groups\", self.groups(), self.groups()\n )\n all_samples = []\n\n if \"train\" in groups:\n all_samples.extend(self.background_model_samples())\n groups.remove(\"train\")\n\n for grp in groups:\n all_samples.extend(_sample_sets_to_samples(self.references(grp)))\n all_samples.extend(_sample_sets_to_samples(self.probes(grp)))\n\n # Add znorm samples. Returning znorm samples for one group of dev or\n # eval is enough because they are duplicated.\n for grp in groups:\n all_samples.extend(_sample_sets_to_samples(self.zprobes(grp)))\n break\n\n # Add tnorm samples.\n all_samples.extend(_sample_sets_to_samples(self.treferences()))\n\n return all_samples", "def sample(self, verbose=False):\n\t\tfor i in range(self.nsamp): self.subsample(verbose)\n\t\treturn self.amps, self.pos, self.irads", "def sample(self, observation):\n raise NotImplementedError", "def __call__(self, params):\r\n return self.sample(params)", "def score_samples(self, X):\n ...", "def oversampling(models, X_train, y_train):\n print(\"SMOTE...\")\n sm = over_sampling.SMOTE(random_state=42, n_jobs=2)\n X_train, y_train = sm.fit_sample(X_train, y_train)\n for m in models:\n nobs = len(X_train[y_train == m])\n\n return X_train, y_train", "def sample(self):\n logger.info(\"%s: collect sensor data\", self.__class__.__name__)\n samples = []\n self._fetch_data(samples)\n return samples", "def easy_sample(self, num, **kwargs):\n return self.preprocess(self.sample(num, **kwargs), **kwargs)", "def sampling(X_train, y_train, X_test, y_test, sampling_instances, model_instances, func):\n\n metrics = []\n # go through all sampling methods\n for sampling_instance in sampling_instances:\n if sampling_instance is not None:\n print('fitting sampling '+ str(sampling_instances.index(sampling_instance) + 1) + ' on ' +\n str(len(sampling_instances)), \" : \", type(sampling_instance).__name__)\n X_train1, y_train1 = sampling_instance.fit_resample(X=X_train, y=y_train)\n else:\n print('fitting sampling '+ str(sampling_instances.index(sampling_instance) + 1) + ' on ' +\n str(len(sampling_instances)), \" : \", type(sampling_instance).__name__)\n X_train1, y_train1 = X_train, y_train\n\n # Go through all models\n for model_instance in model_instances:\n print('fitting model ' + str(model_instances.index(model_instance) + 1) + ' on ' +\n str(len(model_instances)), \" : \", type(model_instance).__name__)\n model_instance.fit(X_train1, y_train1)\n metrics.append(func(y_test, model_instance.predict(X_test)))\n\n models = [type(model).__name__ for model in model_instances]\n methods = [type(sampling).__name__ for sampling in sampling_instances]\n index = [model + '_' + method for model in models for method in methods]\n\n #Dry run of compute metrics with return_index=True to get indexes\n columns = func(y_test, y_test, average='weighted', return_index=True)\n metrics = pd.DataFrame(metrics, columns=columns, index=index)\n\n return metrics", "def sample(self):", "def sample(self, like_params):\n\t\traise NotImplementedError", "def _sample_likelihood_params(self):\r\n self._sample_omega()\r\n self._sample_beta()\r\n self._sample_r()", "def double_sampling(X_train, y_train, X_test, y_test, sampling_instances1, sampling_instances2, model_instances, func):\n\n list_of_df_metrics = []\n\n for sampling_instance1 in sampling_instances1:\n print('First Round:')\n if sampling_instance1 is not None:\n print('fitting sampling of 1st round '+ str(sampling_instances1.index(sampling_instance1) + 1) + ' over ' + str(len(sampling_instances1)) + ': ' + type(sampling_instance1).__name__)\n X_train_1st_round, y_train1st_round = sampling_instance1.fit_resample(X=X_train, y=y_train)\n print('Second Round:')\n else:\n print('No 1st round Sampling methods applied')\n X_train_1st_round, y_train1st_round = X_train, y_train\n\n df_metrics = sampling(X_train_1st_round, y_train1st_round, X_test, y_test, sampling_instances2, model_instances, func)\n\n list_of_df_metrics.append(df_metrics)\n\n df_metrics_all = pd.concat(list_of_df_metrics, keys=[type(x).__name__ for x in sampling_instances1], names=['First Sampling Round'])\n\n return df_metrics_all", "def sample(self):\n return [agent_observation_space.sample() for agent_observation_space in self._agents_observation_space]", "def list_samples(arn=None, nextToken=None):\n pass", "def score_samples(self, x):\n raise NotImplementedError", "def test_sampling1 ():\n cpus = list(range(C.N_PARALLEL))\n affinity = dict(cuda_idx=C.CUDA_IDX, workers_cpus=cpus)\n agent_ = findOptimalAgent(reward=None)\n agent = CategoricalPgAgent(AcrobotNet, \n initial_model_state_dict=agent_.state_dict())\n s0 = np.array([1, 0, 1/np.sqrt(2), 1/np.sqrt(2), 4, 2], dtype=np.float)\n sampler = SerialSampler(\n EnvCls=rlpyt_make,\n env_kwargs=dict(id=C.ENV, reward=None, internalStateFn=C.INTERNAL_STATE_FN, s0=s0),\n batch_T=500,\n batch_B=16,\n max_decorrelation_steps=0,\n )\n sampler.initialize(\n agent=agent,\n affinity=affinity,\n seed=0\n )\n _, traj_info = sampler.obtain_samples(0)\n print(np.mean([t['DiscountedReturn'] for t in traj_info]))", "def get_samples(**project_filters):\n p = get_project(**project_filters)\n return Sample.query.filter_by(_project_id=p.id).all()", "def sample(self, detections):\n\n raise NotImplementedError", "def get_samples(self, min_samples):\n raise NotImplementedError", "def sample(self, processor, batch_size=None):\n pass", "def samples(\n self,\n pcollection_ids: Optional[Iterable[str]] = None\n ) -> beam_fn_api_pb2.SampleDataResponse:\n ret = beam_fn_api_pb2.SampleDataResponse()\n\n with self._samplers_lock:\n samplers = self._samplers.copy()\n\n for pcoll_id in samplers:\n if pcollection_ids and pcoll_id not in pcollection_ids:\n continue\n\n samples = samplers[pcoll_id].flush()\n if samples:\n ret.element_samples[pcoll_id].elements.extend(samples)\n\n return ret", "def mksampler(x):\n if hasattr(x, \"__call__\"):\n return x\n elif type(x) is list:\n # NB: disjoint ranges can be given as nested lists, e.g. [(1,2), (4,5)]\n if len(x) == 2 and type(x[0]) in (int,float) and type(x[1]) in (int,float):\n #print \"MKSAMPLER: Casting %s to UniformSampler\" % str(x)\n return UniformSampler(*x)\n elif len(x) > 2 or (len(x) > 0 and type(x[0]) not in (int,float)):\n #print \"MKSAMPLER: Casting %s to DisjointUniformSampler\" % str(x)\n return DisjointUniformSampler(x)\n if len(x) < 2:\n raise Exception(\"Supplied list could not be converted to a continuous sampler\")\n elif type(x) is tuple:\n #print \"MKSAMPLER: Casting %s to CyclicSeqSampler\" % str(x)\n return CyclicSeqSampler(*x)\n elif type(x) is set:\n #print \"MKSAMPLER: Casting %s to RandomSeqSampler\" % str(x)\n return RandomSeqSampler(*x)\n else:\n #print \"MKSAMPLER: Casting %s to ConstSampler\" % str(x)\n return ConstSampler(x)", "def sample_survey(self, **kwargs):", "def _compute_samples(self, samples):\n return samples", "def sample_stats(self, analytes=None, filt=True,\n stat_fns=[np.nanmean, np.nanstd],\n eachtrace=True):\n if analytes is None:\n analytes = self.analytes\n elif isinstance(analytes, str):\n analytes = [analytes]\n\n self.stats = {}\n self.stats_calced = [f.__name__ for f in stat_fns]\n\n # calculate stats for each sample\n for s in self.samples:\n if self.srm_identifier not in s:\n self.data_dict[s].sample_stats(analytes, filt=filt,\n stat_fns=stat_fns,\n eachtrace=eachtrace)\n\n self.stats[s] = self.data_dict[s].stats", "def samples(self):\n if self._samples:\n return self._samples\n if SAMPLE_DF_KEY not in self or self[SAMPLE_DF_KEY] is None:\n _LOGGER.debug(\"No samples are defined\")\n return []", "def apply(recorders, args) -> None:\n operation = _get(args)\n for recorder in recorders:\n recorder.apply_sample(operation)", "def initialize_samplers(\n self,\n transform_id: str,\n descriptor: beam_fn_api_pb2.ProcessBundleDescriptor,\n coder_factory) -> List[OutputSampler]:\n transform_proto = descriptor.transforms[transform_id]\n with self._samplers_lock:\n if transform_id in self._samplers_by_output:\n return self._samplers_by_output[transform_id]\n\n # Initialize the samplers.\n for pcoll_id in transform_proto.outputs.values():\n # Only initialize new PCollections.\n if pcoll_id in self._samplers:\n continue\n\n # Create the sampler with the corresponding coder.\n coder_id = descriptor.pcollections[pcoll_id].coder_id\n coder = coder_factory(coder_id)\n sampler = OutputSampler(\n coder, self._max_samples, self._sample_every_sec)\n self._samplers[pcoll_id] = sampler\n\n # Next update the lookup table for ElementSamplers for a given PTransform.\n # Operations look up the ElementSampler for an output based on the index\n # of the tag in the PTransform's outputs. The following code intializes\n # the array with ElementSamplers in the correct indices.\n outputs = transform_proto.outputs\n samplers = [self._samplers[pcoll_id] for pcoll_id in outputs.values()]\n self._samplers_by_output[transform_id] = samplers\n\n return samplers", "def sample(self, batchsize, *args, **kwargs):\n raise NotImplementedError", "def search_samples():\n r = req('GET', SUB_API + 'search/samples', params=apply_search_filters())\n samples = []\n for sample in demisto.get(r.json(), 'data.items'):\n samples.append({\n 'ID': demisto.get(sample, 'result'),\n 'Details': demisto.get(sample, 'details')\n })\n demisto.results({\n 'Type': entryTypes['note'],\n 'EntryContext': {'ThreatGrid.Sample': samples},\n 'HumanReadable': tableToMarkdown('ThreatGrid - Sample Search', samples, ['Result', 'Details']),\n 'ContentsFormat': formats['json'],\n 'Contents': r.json()\n })", "def _generate_distribution_samples(self, set_count, parameter_count):\n self._samples = numpy.zeros((set_count, parameter_count))\n for i, distribution in enumerate(self.parameter_distributions.values()):\n self._samples[:, i] = distribution.ppf(self._quantiles[:, i])", "def samples(self):\n return self._values[:self.nsamples]", "def __init__(self, samples):\n self.samples = samples", "def test_sampler_building(self):\n with mmtools.utils.temporary_directory() as tmp_dir:\n template_script = self.get_implicit_template_script(tmp_dir)\n template_script['options']['resume_setup'] = True\n default_number_of_iterations = template_script['options']['default_number_of_iterations']\n\n # Add tested samplers.\n template_script['samplers'] = {\n 'my-sampler1': {\n 'type': 'ReplicaExchangeSampler',\n 'number_of_iterations': 9,\n 'replica_mixing_scheme': 'swap-neighbors',\n },\n 'my-sampler2': {\n 'type': 'MultiStateSampler',\n 'locality': 5\n }\n }\n\n def check_default_number_of_iterations(phase, sampler_description):\n if 'number_of_iterations' not in sampler_description:\n assert phase.sampler.number_of_iterations == default_number_of_iterations\n\n # Test that options are passed to the sampler correctly.\n for sampler_id, sampler_description in template_script['samplers'].items():\n template_script['experiments']['sampler'] = sampler_id\n constructor_description = template_script['samplers'][sampler_id]\n yield (self.check_constructor, template_script, constructor_description,\n 'sampler', None, check_default_number_of_iterations)", "def _samples(self):\n finite_types = \\\n [QuiverMutationType(t) for t in [['A', 1], ['A', 5], ['B', 2], ['B', 5],\n ['C', 3], ['C', 5], ['D', 2], ['D', 5],\n [\"E\", 6], [\"E\", 7], [\"E\", 8], [\"F\", 4],\n [\"G\", 2]]]\n affine_types = \\\n [QuiverMutationType(t) for t in [['A', [1,1], 1], ['A', [4,5], 1], ['D', 4, 1], ['BB', 5, 1]]]\n elliptic_types = \\\n [QuiverMutationType(t) for t in [['E', 6, [1,1]], ['E', 7, [1,1]]]]\n mutation_finite_types = \\\n [QuiverMutationType(t) for t in [['R2',(1,5)], ['R2',(3,5)]]]\n mutation_infinite_types = \\\n [QuiverMutationType(t) for t in [['E',10], ['BE',5], ['GR',(3,10)], ['T',(3,3,4)]]]\n\n return finite_types + affine_types + elliptic_types + mutation_finite_types + mutation_infinite_types", "def sample(self):\n raise NotImplementedError(\"Override me!\")", "def samples(self):\n return self._samples", "def generate_bootstrap_samples(num_samples, test_universe, test_set_sizes):\n for sample_idx, sample_size in zip(range(num_samples), cycle(test_set_sizes)):\n yield random.sample(test_universe, sample_size)", "def initial_samples(lb, ub, method, numSamp):\r\n if not len(lb) == len(ub):\r\n raise AssertionError('Lower and upper bounds have different #s of design variables in initial_samples function.')\r\n assert method == 'random' or method == 'nolh' or method == 'nolh-rp' or method == 'nolh-cdr' or method == 'lhc' or method == 'rand-wor', 'An invalid method was specified for the initial_samples.'\r\n assert (method == 'nolh' or method == 'nolh-rp' or method == 'nolh-cdr') and len(ub) >= 2 and len(ub) <= 29, 'The Phase space dimensions are outside of the bounds for initial_samples.'\r\n for case in Switch(method):\r\n if case('random'):\r\n s = np.zeros((numSamp, len(lb)))\r\n for i in range(0, numSamp, 1):\r\n s[i, :] = lb + (ub - lb) * rand(len(lb))\r\n\r\n break\r\n if case('rand-wor'):\r\n s = np.zeros((numSamp, len(lb)))\r\n for i in range(0, numSamp, 1):\r\n s[i, :] = choice(len(ub), size=len(ub), replace=False)\r\n\r\n break\r\n if case('nolh'):\r\n dim = len(ub)\r\n m, q, r = params(dim)\r\n conf = range(q)\r\n if r != 0:\r\n remove = range(dim - r, dim)\r\n nolh = NOLH(conf, remove)\r\n else:\r\n nolh = NOLH(conf)\r\n s = np.array([ list(lb + (ub - lb) * nolh[i, :]) for i in range(len(nolh[:, 0]))\r\n ])\r\n break\r\n if case('nolh-rp'):\r\n dim = len(ub)\r\n m, q, r = params(dim)\r\n conf = random.sample(range(q), q)\r\n if r != 0:\r\n remove = random.sample(range(q - 1), r)\r\n nolh = NOLH(conf, remove)\r\n else:\r\n nolh = NOLH(conf)\r\n s = np.array([ list(lb + (ub - lb) * nolh[i, :]) for i in range(len(nolh[:, 0]))\r\n ])\r\n break\r\n if case('nolh-cdr'):\r\n dim = len(ub)\r\n m, q, r = params(dim)\r\n conf, remove = get_cdr_permutations(len(ub))\r\n if remove != []:\r\n nolh = NOLH(conf, remove)\r\n else:\r\n nolh = NOLH(conf)\r\n s = np.array([ list(lb + (ub - lb) * nolh[i, :]) for i in range(len(nolh[:, 0]))\r\n ])\r\n break\r\n if case('lhc'):\r\n tmp = lhs(len(lb), samples=numSamp, criterion='center')\r\n s = np.array([ list(lb + (ub - lb) * tmp[i, :]) for i in range(len(tmp[:, 0]))\r\n ])\r\n break\r\n if case():\r\n print 'Somehow you evaded my assert statement - good job!',\r\n print ' However, you still need to use a valid method string.'\r\n\r\n return s", "def collect_samples(self):\n self.replay_buffer = self.collect_initial_batch(\n self.replay_buffer, self.acm_pre_train_samples\n )", "def sample(self, data, chains=2, iter=1024, warmup=None, **kwargs):\n\n kwds = self._validate_stan_inputs(\n data=data, chains=chains, iter=iter, warmup=warmup, **kwargs)\n return self._model.sampling(**kwds)", "def sample(self,burnin,thinning,samples,append=True):\n\n\t\tif hasattr(self, 'sampled_topics') and append == True:\n\n\t\t\tsampled_topics = samplers_lda.sampler(self.docid,self.tokens,self.sampled_topics[self.samples-1,:],\n\t\t\t\t\t\t\t\t\tself.N,self.V,self.K,self.D,self.alpha,self.beta,\n\t\t\t\t\t\t\t\t\tburnin,thinning,samples)\n\n\t\t\ttt_temp = self.tt_comp(sampled_topics)\n\t\t\tdt_temp = self.dt_comp(sampled_topics)\n\n\t\t\tself.sampled_topics = np.concatenate((self.sampled_topics,sampled_topics))\n\t\t\tself.tt = np.concatenate((self.tt,tt_temp),axis=2)\n\t\t\tself.dt = np.concatenate((self.dt,dt_temp),axis=2)\n\n\t\t\tself.samples = self.samples + samples\n\n\t\telse:\n\n\t\t\tself.samples = samples\n\n\t\t\tself.sampled_topics = samplers_lda.sampler(self.docid,self.tokens,self.topic_seed,\n\t\t\t\t\t\t\t\t\t\tself.N,self.V,self.K,self.D,self.alpha,self.beta,\n\t\t\t\t\t\t\t\t\t\tburnin,thinning,self.samples)\n\n\t\t\tself.tt = self.tt_comp(self.sampled_topics)\n\t\t\tself.dt = self.dt_comp(self.sampled_topics)", "def get_samples(self):\n \n # determine correct string to split the data on\n split = {\n 'v1.0-trainval': {True: 'train', False: 'val'},\n 'v1.0-mini': {True: 'mini_train', False: 'mini_val'},\n }[nusc.version][self.is_train]\n\n # filters the appropriate scene names\n scenes = create_splits_scenes()[split]\n \n # collects samples with matching scene names\n samples = [samp for samp in nusc.sample if\n nusc.get('scene', samp['scene_token'])['name'] in scenes]\n\n return samples", "def sampler_to_iterator(dataset, sampler):\n for sample in sampler:\n if isinstance(sample, (list, tuple)):\n # yield a batch\n yield [dataset[i] for i in sample]\n else:\n # yield a single example\n yield dataset[sample]", "def sample(self, x):", "def __iter__(self):\n for sample in self.samples:\n yield sample", "def get_samples_list(self):\n return self.samples_list", "def sample_input_domain(num_samples):\n s1 = np.random.random(num_samples) * 10\n s2 = np.random.random(num_samples) * 2 - 5\n s3 = np.random.random(num_samples)\n s4 = np.random.random(num_samples) * 30 + 20\n return s1, s2, s3, s4", "def samples(self, u=None):\n roots = [u]\n if u is None:\n roots = self.roots\n for root in roots:\n yield from self._sample_generator(root)", "def sample(self, num, **kwargs):\n raise NotImplementedError(f'Should be implemented in derived class!')", "def take_samples(self, num_samples: int) -> List:\n if num_samples > len(self.samples):\n return random.sample(self.samples, len(self.samples))\n return random.sample(self.samples, num_samples)", "def start_sampler_threads(self, _sender, **_unused_msg):\n pass\n # try:\n # for sampler in traverse_nested(sender.get_sampler()):\n # if sampler is None:\n # continue\n # sampler.run_threads(self.num_threads)\n # tf.logging.info('filling queues (this can take a few minutes).')\n # except (NameError, TypeError, AttributeError, IndexError):\n # tf.logging.fatal(\n # \"samplers not running, pop_batch_op operations \"\n # \"are blocked.\")\n # raise", "def evaluate_samples(self, samples, eval_key=None, missing=None):\n pass", "def sample(self, state, model_args, model_kwargs):\n raise NotImplementedError", "def draw_samples(nsubj, ngroups, split_method='default'):\n if split_method == 'default':\n if nsubj > 10 * ngroups:\n samples = split_group(nsubj, ngroups)\n else:\n samples = bootstrap_group(nsubj, ngroups)\n elif split_method == 'bootstrap':\n samples = bootstrap_group(nsubj, ngroups)\n elif split_method == '':\n samples = split_group(nsubj, ngroups)\n else:\n raise ValueError('unknown splitting method')\n\n return samples", "def fixture_samples(sample_single) -> Iterator[dict]:\n _samples = []\n sample_id = sample_single[\"sample_id\"]\n for number in range(3):\n sample = copy.deepcopy(sample_single)\n sample[\"sample_id\"] = \"_\".join([sample_id, str(number)])\n _samples.append(sample)\n return _samples", "def test_get_all_samples(self):\n self.login()\n\n page_size = 20\n\n # hit the API endpoint for both pages\n for page in range(1, 3):\n\n data = {'page': page,\n 'page_size': page_size}\n response = self.client.get(reverse('searchsamples'), data, format='json')\n\n expected = Sample.objects.all().order_by(\"-received\")\n\n paginator = Paginator(expected, page_size)\n res = paginator.page(page)\n\n # format queryset into json for returning\n serializer = SampleSerializer(res, many=True)\n\n context = {\n 'data': serializer.data,\n 'more': (page == 1)\n }\n\n self.assertEqual(response.json(), context)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def input(self, *args, **kwargs):\n return lambda wildcards: self.samples.map(*args, file=\"samples/all/runs/{sample_run}/samples.csv\", **wildcards, **kwargs)", "def sample(self, count):\n batch = deepcopy(random.sample(self.buffer, count))\n batch = list(zip(*batch))\n args, kwargs = batch[:-1], batch[-1]\n args = [np.stack(arr) for arr in args]\n out = dict(zip(self.keywords, list(zip(*[d.values() for d in kwargs]))))\n for k, v in out.items():\n out[k] = np.stack(v)\n return (*args, out)", "def getSamplesList(self):\n return self.sample_names", "async def _samples(cls, responses: SourceResponses) -> AsyncIterator[Samples]:\n rows = await cls.__parse_csv(responses)\n samples = [row for row in rows if not row[\"responseMessage\"].startswith(\"Number of samples in transaction\")]\n labels = {sample[\"label\"] for sample in samples}\n for label in sorted(labels):\n yield [sample for sample in samples if sample[\"label\"] == label]", "def test_sample(system_generator):\n\n name, test = system_generator()\n print(name)\n\n w_F, w_R, N_k = test.sample([10, 8], mode=\"wFwR\")\n w_F, w_R, N_k = test.sample([1, 1], mode=\"wFwR\")\n w_F, w_R, N_k = test.sample([10, 0], mode=\"wFwR\")\n w_F, w_R, N_k = test.sample([0, 5], mode=\"wFwR\")", "def generate_samples(self):\n self.analytic_probability()", "def data_source():\n dataset = [0.1, 0.2, 0.3, 0.4, 0.5]\n while True:\n time.sleep(2)\n yield random.choice(dataset)", "def _sample_synthetic(self, X):\n n_samples = X.shape[0]\n self.y = np.concatenate((np.ones(n_samples), np.zeros(n_samples)))\n \n random_state = _forest.check_random_state(self.random_state) \n\n X_synth = np.asarray([np.apply_along_axis(random_state.choice, 0, X) for _ in range(n_samples)])\n self.X = np.concatenate((X, X_synth))\n\n return self.X, self.y", "def get_data_and_model_samples(self):\n model_samples = (\n self.net_.sample_fantasy(\n x=self.model_samples_[-1],\n num_mc_steps=self.num_sample_mc_steps,\n beta=self.sample_beta,\n mc_dynamics=self.sampler,\n )\n .detach()\n .cpu()\n .numpy()\n )\n data_sample_ixs = torch.randint(\n 0, self.samples.shape[0], size=(model_samples.shape[0],)\n )\n data_samples = self.samples[data_sample_ixs, ...]\n return data_samples, model_samples" ]
[ "0.67290366", "0.66936", "0.66508645", "0.6528074", "0.6460033", "0.6458007", "0.63837016", "0.6380185", "0.6372922", "0.63533103", "0.63098216", "0.6204721", "0.6148462", "0.6148462", "0.6144332", "0.61341447", "0.6125732", "0.61021185", "0.60895056", "0.6007772", "0.59698457", "0.59687287", "0.5955888", "0.5945422", "0.59383196", "0.59179974", "0.5884791", "0.5877266", "0.5874533", "0.5870259", "0.5854025", "0.5853452", "0.58378696", "0.58378696", "0.583063", "0.5822746", "0.58157647", "0.5802257", "0.5797433", "0.57915926", "0.5777567", "0.5775868", "0.5760964", "0.5736618", "0.5733723", "0.5733204", "0.5729045", "0.5727251", "0.57087076", "0.57070976", "0.57025886", "0.5697704", "0.56973517", "0.5683021", "0.5682155", "0.5674125", "0.5672556", "0.5669079", "0.56660926", "0.56433195", "0.5636014", "0.56104857", "0.5608157", "0.56035674", "0.5602729", "0.5598635", "0.55825317", "0.5573346", "0.55727774", "0.55612874", "0.5558158", "0.5556689", "0.5553553", "0.55454963", "0.55421335", "0.55309516", "0.55295664", "0.5527838", "0.55243254", "0.5522784", "0.55189306", "0.5510126", "0.54993385", "0.54962176", "0.54871476", "0.54864854", "0.5477058", "0.5468625", "0.54684466", "0.5464737", "0.5458703", "0.54506695", "0.5449402", "0.5437978", "0.54365534", "0.5436068", "0.5434799", "0.5429538", "0.54253066", "0.5422303", "0.5422026" ]
0.0
-1
Update the policy's (and value functions') parameters based on the collected rollout data.
def update(self, *args: Any, **kwargs: Any): obss = [] losses = [] for t in range(self.num_teachers): concat_ros = StepSequence.concat(kwargs["rollouts"][t]) concat_ros.torch(data_type=to.get_default_dtype()) obss.append(concat_ros.get_data_values("observations")[: self.min_steps]) # Train student for epoch in range(self.num_epochs): self.optimizer.zero_grad() loss = 0 for t_idx, teacher in enumerate(self.teacher_policies): s_dist = self.expl_strat.action_dist_at(self.policy(obss[t_idx])) s_act = s_dist.sample() t_dist = self.teacher_expl_strats[t_idx].action_dist_at(teacher(obss[t_idx])) l = self.teacher_weights[t_idx] * self.criterion(t_dist.log_prob(s_act), s_dist.log_prob(s_act)) loss += l losses.append([t_idx, l.item()]) print(f"Epoch {epoch} Loss: {loss.item()}") loss.backward() self.optimizer.step()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def policy_update_fn(self, data: Dict[str, Any], result: Dict[str, Any]) -> None:", "def update_policy(self, *args, **kwargs):\r\n pass", "def update_policy(self):\n pass", "def update_Policy(self,inputpolicy):\n \n policyob = self.SD_Map.retrieve_ob(inputpolicy)\n policyob.values[-1] = self.PolicyDicts[inputpolicy][self.translate(self.policy_option_vars[inputpolicy].get(),\n input_language = self.language,\n output_language = 'english')]", "def update_policy(self):\n self.trainer_metrics.start_policy_update_timer(\n number_experiences=len(self.training_buffer.update_buffer[\"actions\"]),\n mean_return=float(np.mean(self.cumulative_returns_since_policy_update)),\n )\n self.cumulative_returns_since_policy_update = []\n n_sequences = max(\n int(self.trainer_parameters[\"batch_size\"] / self.policy.sequence_length), 1\n )\n value_total, policy_total = [], []\n advantages = self.training_buffer.update_buffer[\"advantages\"].get_batch()\n self.training_buffer.update_buffer[\"advantages\"].set(\n (advantages - advantages.mean()) / (advantages.std() + 1e-10)\n )\n num_epoch = self.trainer_parameters[\"num_epoch\"]\n for _ in range(num_epoch):\n self.training_buffer.update_buffer.shuffle()\n buffer = self.training_buffer.update_buffer\n for l in range(\n len(self.training_buffer.update_buffer[\"actions\"]) // n_sequences\n ):\n start = l * n_sequences\n end = (l + 1) * n_sequences\n run_out = self.policy.update(\n buffer.make_mini_batch(start, end), n_sequences\n )\n value_total.append(run_out[\"value_loss\"])\n policy_total.append(np.abs(run_out[\"policy_loss\"]))\n self.stats[\"Losses/Value Loss\"].append(np.mean(value_total))\n self.stats[\"Losses/Policy Loss\"].append(np.mean(policy_total))\n for _, reward_signal in self.policy.reward_signals.items():\n update_stats = reward_signal.update(\n self.training_buffer.update_buffer, n_sequences\n )\n for stat, val in update_stats.items():\n self.stats[stat].append(val)\n if self.policy.bc_module:\n update_stats = self.policy.bc_module.update()\n for stat, val in update_stats.items():\n self.stats[stat].append(val)\n self.training_buffer.reset_update_buffer()\n self.trainer_metrics.end_policy_update()", "def updateData(self, *args):\n # if self.move_next_option == \"R\":\n # self.restSampling()\n # elif self.move_next_option == \"A\":\n # self.addExtra()\n # else:\n # self.continueReview()\n for name, value in self.parameter_inputs.items():\n self.parameters[name] = value.value\n # directly change the value of class variables\n logMsg((\"update settings: \", self.ml_classifier_cls, name, value.value))\n setattr(self.ml_classifier_cls, name, value.value)\n\n pass", "def update_policy_values(policy_lookup, board, state, player, action, next_state, reward):\n\t\n\t# compute total expected reward including future rewards\n\tif board.check_end():\n\t\texpected = reward\n\telse:\n\t\tif player == 1:\n\t\t\texpected = reward + discount * min_value(policy_lookup, next_state, 2)\n\t\telif player == 2:\n\t\t\texpected = reward + discount * max_value(policy_lookup, next_state, 1)\n\t# get current policy action value\n\tpolicy_value = get_policy_value(policy_lookup, state, player, action)\n\t# update policy action value\n\tpolicy_lookup[(state, player)][action] += learning_rate * (expected - policy_value)", "def _run_policy(env, policy, num_rollouts):\n start_states, final_states, goal_states, actions, paths = [], [], [], [], []\n\n for i in range(num_rollouts):\n path = rollout(\n env,\n policy,\n max_path_length=100,\n animated=False,\n )\n obs = path[\"observations\"]\n acts = path[\"actions\"]\n\n goal_idx = np.argmax(obs[0, 2:])\n start_x, start_y = obs[0, 0], obs[0, 1]\n acts_x, acts_y = acts[:, 0], acts[:, 1]\n final_x, final_y = obs[len(obs) - 1, 0], obs[len(obs) - 1, 1]\n goal = env.goals[goal_idx]\n goal_x, goal_y = goal[0], goal[1]\n\n start_states.append(np.array([start_x, start_y]))\n final_states.append(np.array([final_x, final_y]))\n goal_states.append(np.array([goal_x, goal_y]))\n actions.append(np.array([acts_x, acts_y]))\n paths.append(path)\n\n return dict(\n start_states=np.array(start_states),\n final_states=np.array(final_states),\n goal_states=np.array(goal_states),\n actions=np.array(actions),\n paths=paths,\n env=env\n )", "def update_policy(self, policy, inverse_policy=None):\n self.make_T_policy_matrix(policy)\n self.inverse_dynamics_by_time = dict()\n self.policy = policy\n self.inverse_policy = inverse_policy", "def update_parameters(self, ob_no, hidden, ac_na, fixed_log_probs, q_n, adv_n):\n self.update_critic(ob_no, hidden, q_n)\n self.update_policy(ob_no, hidden, ac_na, fixed_log_probs, adv_n)", "def policies(self, policies):\n\n self._policies = policies", "def update_parameters(self):\n self.alignment_factor = rospy.get_param('/dyn_reconf/alignment_factor')\n self.cohesion_factor = rospy.get_param('/dyn_reconf/cohesion_factor')\n self.separation_factor = rospy.get_param('/dyn_reconf/separation_factor')\n self.avoid_factor = rospy.get_param('/dyn_reconf/avoid_factor')\n self.max_speed = rospy.get_param('/dyn_reconf/max_speed')\n self.max_force = rospy.get_param('/dyn_reconf/max_force')\n self.friction = rospy.get_param('/dyn_reconf/friction')\n self.crowd_radius = rospy.get_param('/dyn_reconf/crowd_radius')\n self.search_radius = rospy.get_param('/dyn_reconf/search_radius')\n\n rospy.loginfo(rospy.get_caller_id() + \" -> Parameters updated\")\n if DEBUG:\n print('alignment_factor: ', self.alignment_factor)\n print('cohesion_factor: ', self.cohesion_factor)\n print('separation_factor: ', self.separation_factor)\n print('avoid_factor: ', self.avoid_factor)\n print('max_speed: ', self.max_speed)\n print('max_force: ', self.max_force)\n print('friction: ', self.friction)\n print('crowd_radius: ', self.crowd_radius)\n print('search_radius: ', self.search_radius)", "def apply_policy(cls, metadata, policy):\r\n for attr, value in policy.iteritems():\r\n attr = cls._translate(attr)\r\n if attr not in cls.fields:\r\n # Store unknown attributes coming from policy.json\r\n # in such a way that they will export to xml unchanged\r\n metadata['xml_attributes'][attr] = value\r\n else:\r\n metadata[attr] = value", "def policy_update(self):\n mini_batch = random.sample(self.data_buffer, self.batch_size)\n state_batch = [data[0] for data in mini_batch]\n mcts_probs_batch = [data[1] for data in mini_batch]\n winner_batch = [data[2] for data in mini_batch]\n loss, entropy = self.policy_value_net.train_step(\n state_batch,\n mcts_probs_batch,\n winner_batch,\n self.learn_rate)\n return loss, entropy", "def update(self):\n with torch.no_grad():\n self.preprocess_rollout()\n \n # DEEP-RL TUTORIALS: КОСТЫЛЬ\n #self.advantages = self.returns[:-1] - self.values[:-1]\n #self.advantages = (self.advantages - self.advantages.mean()) / (self.advantages.std() + 1e-5)\n \n # going through rollout several (config.epochs) times:\n for epoch in range(self.config.epochs):\n # TODO: drop last = False? What if there is 1 sample?\n sampler = BatchSampler(SubsetRandomSampler(range(self.env.num_envs * self.config.rollout)), self.config.batch_size, drop_last=False)\n \n for indices in sampler:\n # retrieving new batch as part of rollout\n self.returns_b = self.returns.view(-1, *self.config.value_repr_shape)[indices]\n self.old_values_b = self.values.view(-1, *self.config.value_repr_shape)[indices]\n self.old_action_log_probs_b = self.action_log_probs.view(-1)[indices]\n #self.advantages_b = self.advantages.view(-1)[indices] # КОСТЫЛЬ\n \n # calculating current value, action_log_prob, entropy\n dist, self.values_b = self.policy(self.observations.view(-1, *self.config.observation_shape)[indices])\n self.values_b = self.values_b.squeeze() # IMPORTANT ([32] - [32, 1] problem)\n self.action_log_probs_b = dist.log_prob(self.actions.view(-1, *self.config.actions_shape)[indices])#.sum(dim=-1) \n self.entropy_b = dist.entropy()#.sum(dim=-1)\n \n # performing step\n self.gradient_ascent_step()", "def _gen_policy_params(self, state: State) -> Tensor:\n ...", "def update_policy(self):\n n_sequences = max(int(self.trainer_parameters['batch_size'] / self.policy.sequence_length), 1)\n value_total, policy_total, forward_total, inverse_total = [], [], [], []\n advantages = self.training_buffer.update_buffer['advantages'].get_batch()\n self.training_buffer.update_buffer['advantages'].set(\n (advantages - advantages.mean()) / (advantages.std() + 1e-10))\n num_epoch = self.trainer_parameters['num_epoch']\n for k in range(num_epoch):\n self.training_buffer.update_buffer.shuffle()\n buffer = self.training_buffer.update_buffer\n for l in range(len(self.training_buffer.update_buffer['actions']) // n_sequences):\n start = l * n_sequences\n end = (l + 1) * n_sequences\n run_out = self.policy.update(buffer.make_mini_batch(start, end), n_sequences)\n value_total.append(run_out['value_loss'])\n policy_total.append(np.abs(run_out['policy_loss']))\n if self.use_curiosity:\n inverse_total.append(run_out['inverse_loss'])\n forward_total.append(run_out['forward_loss'])\n self.stats['value_loss'].append(np.mean(value_total))\n self.stats['policy_loss'].append(np.mean(policy_total))\n if self.use_curiosity:\n self.stats['forward_loss'].append(np.mean(forward_total))\n self.stats['inverse_loss'].append(np.mean(inverse_total))\n self.training_buffer.reset_update_buffer()", "def update_parameters(self, timestamp, inputs):\n pass", "def update(self):\n\n # get states, actions, rewards and total timesteps from memory\n states, actions, R, T = self.memory.get()\n n_ep = len(R)\n\n # compute value estimates for the states\n v = self.critic(states)\n\n # compute advantages (using GAE) and rewards to go\n A, rtg = utils.gae_rtg((R, v, T), self.gam, self.lam)\n\n # store the initial version of both the policy and the log probs of the\n # actions for later comparison with the future versions (needed for PPO)\n policy_old = copy.deepcopy(self.policy)\n log_probs_old = policy_old(states).log_prob(actions)\n\n # sample from a batch of experiences\n # (\"_\" subscript indicates \"sampled from\")\n for (v_, A_, rtg_, log_probs_old_), i in utils.sample_batch((v, A, rtg, log_probs_old), self.batch_size, self.policy_updates):\n log_probs_ = self.policy(states).log_prob(actions)[i]\n\n # estimate ratio between the new log probs and the old ones\n r_ = torch.exp(log_probs_ - log_probs_old_)\n\n l_1 = r_ * A_\n l_2 = torch.clamp(r_, 1-self.eps, 1+self.eps) * A_\n\n # TODO: implement entropy\n # TODO: merge policy and critic\n\n # surragate loss function for PPO\n l_clip = -torch.mean(torch.min(l_1, l_2))\n\n # update the policy\n self.policy_optimizer.zero_grad()\n l_clip.backward(retain_graph=True)\n self.policy_optimizer.step()\n\n # sample a batch of value estimates and the corresponding rewards to go\n # to update the value function.\n for (v_, rtg_), _ in utils.sample_batch((v, rtg), self.batch_size, self.v_updates):\n # compute the loss\n critic_loss = F.mse_loss(v_, rtg_)\n\n # update the critic\n self.critic_optimizer.zero_grad()\n critic_loss.backward(retain_graph=True)\n self.critic_optimizer.step()\n\n # clear the memory. PPO is an On-Policy method so we don't need these\n # memories anymore\n self.memory.clear()\n\n # return the loss of the value function for display\n return F.mse_loss(v, rtg)", "def apply_policy(self, policy, method):\n action, optimal_value, move = policy(self, method)\n return action, optimal_value, move", "def UpdatePolicy(self, request, global_params=None):\n config = self.GetMethodConfig('UpdatePolicy')\n return self._RunMethod(\n config, request, global_params=global_params)", "def update_policy(self):\n self._sess.run(self._hard_copy_to_target_op);", "def update_policy(self):\n # this is update_policy \n # sample batch of 32 from the memory\n batch_of_samples = self.replay_memory.sample(batch_size=32)\n current_state_samples = batch_of_samples['current_state_samples']\n next_state_samples = batch_of_samples['next_state_samples']\n #print type(current_state_samples[0])\n #print current_state_samples\n\n # fetch stuff we need from samples 32*84*84*4\n current_state_images = np.zeros([1, 84, 84, 4])\n #print current_state_samples\n current_state_images[0,...] = np.dstack([sample.state for sample in current_state_samples])\n\n next_state_images = np.zeros([1, 84, 84, 4])\n next_state_images[0,...] = np.dstack([sample.state for sample in next_state_samples])\n\n # preprocess\n current_state_images = self.preprocessor.process_batch(current_state_images)\n next_state_images = self.preprocessor.process_batch(next_state_images)\n # print \"current_state_images {} max {} \".format(current_state_images.shape, np.max(current_state_images))\n #print current_state_images.shape\n q_current = self.q_network.predict(current_state_images,batch_size=self.batch_size) # 32*num_actions\n q_next = self.q_network.predict(next_state_images,batch_size=self.batch_size)\n\n # targets\n y_targets_all = q_current #1*num_actions\n #print y_targets_all.shape # [1,6]\n idx = 0 \n last_sample = current_state_samples[-1]\n if last_sample.is_terminal:\n y_targets_all[idx, last_sample.action] = last_sample.reward\n else:\n if self.mode == 'vanilla':\n y_targets_all[idx, last_sample.action] = np.float32(last_sample.reward) + self.gamma*np.max(q_next[idx])\n if self.mode == 'double': \n y_targets_all[idx, last_sample.action] = np.float32(last_sample.reward) + self.gamma*q_next[idx, np.argmax(q_current[idx])] \n\n loss = self.q_network.train_on_batch(current_state_images, np.float32(y_targets_all))\n\n with tf.name_scope('summaries'):\n self.tf_log_scaler(tag='train_loss', value=loss, step=self.iter_ctr)\n\n if not (self.iter_ctr % self.log_loss_every_nth):\n self.dump_train_loss(loss)\n\n # if (self.iter_ctr > (self.num_burn_in+1)) and not(self.iter_ctr%self.target_update_freq):\n # # copy weights\n # print \"Iter {} Updating target Q network\".format(self.iter_ctr)\n # self.target_q_network.set_weights(self.q_network.get_weights())\n # [self.target_q_network.trainable_weights[i].assign(self.q_network.trainable_weights[i]) \\\n # for i in range(len(self.target_q_network.trainable_weights))]", "def update_parameters(self):\n # We update gamma, gamma0, lambda and nu in turn (Bottolo et al, 2011)\n self.update_gamma()\n self.update_gamma0()\n self.update_lambda()\n self.update_nu()\n if self.sample_xi:\n self.update_xi()", "def update_values(self, to_update):\n for key, value in kwargs.iteritems():\n self.params[key] = value\n # update the possibly dependent parameters\n self.set_filenames()", "def ppo_update(policy_model, policy_optimizer, baseline_model, baseline_optimizer, baseline_criterion,\n ppo_epoch, minibatch_size, obs_n_tensor, log_prob_n_old_tensor, action_n_tensor, rewards_n, mask_n,\n gamma, lam, update_policy=True, update_baseline=True):\n\n ## get number of data\n n_data = obs_n_tensor.shape[0]\n ## get baseline estimations\n baseline_n = baseline_model(obs_n_tensor).detach() ## between -1 and 1\n ## get q values, used for scaling baseline values\n q_n = get_reward_to_go(rewards_n, mask_n, gamma)\n q_n = Tensor(q_n).reshape(-1, 1)\n q_n_mean = q_n.mean()\n q_n_std = q_n.std() + 1e-2\n ## get scaled baseline values\n baseline_n_scaled = baseline_n * q_n_std + q_n_mean\n\n ## get advantage and scaled baseline targets\n ## the adv returned is normalized, baseline_target_scaled is not normalized\n adv_n, baseline_target_n_scaled = get_gae_advantage(rewards_n, baseline_n_scaled, mask_n, gamma, lam)\n adv_n = adv_n.detach()\n\n ## get baseline targets\n baseline_target_n = ((baseline_target_n_scaled - q_n_mean) / q_n_std).detach() ## target now is normalized\n\n ## NOTE MAKE SURE YOU DETACH THINGS OTHERWISE YOU WILL HAVE VERY STRANGE ERRORS\n\n ## for each training iteration, do some epoches\n for i_epoch in range(ppo_epoch):\n ## ppo: shuffle data\n shuffle_indexes = torch.randperm(n_data)\n obs_n_tensor = obs_n_tensor[shuffle_indexes]\n adv_n = adv_n[shuffle_indexes]\n log_prob_n_old_tensor = log_prob_n_old_tensor[shuffle_indexes]\n action_n_tensor = action_n_tensor[shuffle_indexes]\n baseline_target_n = baseline_target_n[shuffle_indexes]\n\n ## after shuffle data, we do the minibatch ppo update\n n_minibatch = int(n_data / minibatch_size)\n for i_minibatch in range(n_minibatch):\n ## get minibatch\n istart = i_minibatch * minibatch_size\n iend = (i_minibatch + 1) * minibatch_size\n obs_m = obs_n_tensor[istart:iend]\n adv_m = adv_n[istart:iend]\n log_prob_old_m = log_prob_n_old_tensor[istart:iend]\n action_m = action_n_tensor[istart:iend]\n baseline_target_m = baseline_target_n[istart:iend]\n\n ## update baseline\n if update_baseline:\n baseline_m = baseline_model(obs_m)\n baseline_optimizer.zero_grad()\n baseline_loss = baseline_criterion(baseline_m, baseline_target_m)\n baseline_loss.backward()\n baseline_optimizer.step()\n\n ## update policy\n ## we need the new policy's log probs so that we can calculate importance sampling term\n if update_policy:\n mu, log_sigma = policy_model(obs_m)\n normal_dist = Normal(mu, log_sigma.exp())\n log_prob_new_m = normal_dist.log_prob(action_m)\n log_prob_new_m = torch.sum(log_prob_new_m, dim=1).reshape(-1, 1)\n ## now we get the importance sampling weight term\n is_term_m = (log_prob_new_m - log_prob_old_m).exp()\n\n ## compute the clipped surrogate\n epsilon = 0.2\n first_term = is_term_m * adv_m\n clipped_term = is_term_m.clamp(1 - epsilon, 1 + epsilon) * adv_m\n obj_term = torch.min(first_term, clipped_term)\n obj_sum = obj_term.sum()\n policy_loss = -obj_sum / n_data\n policy_optimizer.zero_grad()\n policy_loss.backward()\n policy_optimizer.step()", "def apply_parameters(self):\n n_bins = int(self.record_length / self.bin_width)\n time_bins = self.bin_width * np.arange(n_bins)\n \n\n self.tau = np.arange(self.tau_start, self.tau_end+self.tau_delta, self.tau_delta)\n sequence = self.generate_sequence()\n self.n_laser = find_laser_pulses(sequence)\n \n FC.Configure(self.record_length, self.bin_width, self.n_laser)\n\n if self.keep_data and sequence == self.sequence and np.all(time_bins == self.time_bins): # if the sequence and time_bins are the same as previous, keep existing data\n self.old_count_data = self.count_data.copy()\n else:\n self.old_count_data = np.zeros_like(FC.GetData())\n \n self.sequence = sequence\n self.time_bins = time_bins\n self.n_bins = n_bins\n \n self.MW_source = {'mw':mw, 'mw2':mw2}[self.mw_source]", "def optimize_policy(self, all_samples_data, log=True):\n raise NotImplementedError", "def update_policy(self, req):\n\n with self.mutex:\n\n # Apply all gradients\n for gradient in req.gradients:\n\n self.global_policy.zero_grad()\n\n # Transfer gradient to global policy\n self.transfer_gradient(gradient)\n\n # Clip gradient\n torch.nn.utils.clip_grad_norm_(self.global_policy.parameters(),\n 1.0)\n # torch.nn.utils.clip_grad_value_(self.global_policy.parameters(),\n # 1.0)\n\n # Apply gradient\n self.optimizer.step()\n self.iteration_counter += 1\n\n self._log_iteration(req)\n\n if (self.save_path and\n self.save_steps > 0 and\n self.iteration_counter >= self.save_steps * self.i):\n self.save_model()\n self.i += 1\n\n weights = self.encode_weights()\n cov = max(1.0 - self.iteration_counter /\n float(self.exploration), 0.05)\n covariance = [cov, cov]\n\n return UpdateGlobalPolicyResponse(weights, covariance)", "def defineUpdateOperations(self):\n self.updated_value = tf.placeholder(shape=[1, self.network.action_size], dtype=tf.float32)\n self.loss = tf.reduce_sum(tf.square(self.updated_value - self.network.policyLayer))\n self.trainer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)\n\n self.updateModel = self.trainer.minimize(self.loss)", "def callback(self, rollout):\n assert len(self._t_switch)==len(self._scale)==len(self._k_star)\n # Log rollout statistics\n if self._ro_with_policy or self._unfinished_mix(len(rollout)):\n if self._unfinished_mix(len(rollout)):\n del self._k_star[-1]\n del self._t_switch[-1]\n del self._scale[-1]\n self._ind_ro_pol.append(self._n_ro)\n self._n_samples_ro_pol+=len(rollout)\n if self._ro_by_n_samples:\n self._ro_with_policy = self._n_samples_ro_pol<self._n_samples_ro_mix\n else:\n self._ro_with_policy = False\n else:\n self._ind_ro_mix.append(self._n_ro)\n self._n_samples_ro_mix+=len(rollout)\n self._ro_with_policy = True\n\n # unlock so `pi` can be called again\n self._locked =False\n self._n_ro+=1", "def update_policy(env, policy, V, discount_factor):\n\n for state in range(env.nS):\n # for a given state compute state-action value.\n action_values = one_step_lookahead(env, state, V, discount_factor)\n\n # choose the action which maximizes the state-action value.\n policy[state] = np.argmax(action_values)\n\n return policy", "def refresh(self):\n self._policies = self._get_policies()", "def _update_params(self):\n pass", "def handle_wps_update(self, data):\n\n self.jobs = data", "def update_params(self):\n pass", "def update_parameters(parameters, grads, learning_rate):\n pass", "def update_policy(self):\n self.optimizer.step()\n self.optimizer.zero_grad()", "def update_params(self, learning_rate):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\t\tself._W = self._W - learning_rate * self._grad_W_current\n\t\tself._b = self._b - learning_rate * self._grad_b_current\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def update_params(self):\n \n # read parameters from context broker\n for attr in ['Kp', 'Ti', 'Td', 'lim_low', 'lim_high', 'setpoint']:\n self.params[attr] = float(self.ORION_CB.get_entity_attribute_value(entity_name=self.params['name'], attribute_name=attr))\n self.params['reverse_act'] = str(self.ORION_CB.get_entity_attribute_value(entity_name=self.params['name'], attribute_name='reverse_act')) \n \n # update PID parameters\n self.PID.Kp = self.params['Kp']\n self.PID.Ti = self.params['Ti']\n self.PID.Td = self.params['Td']\n self.PID.lim_low = self.params['lim_low']\n self.PID.lim_high = self.params['lim_high']\n self.PID.reverse_act = eval(eval(self.params['reverse_act']))", "def _apply_params(self):\n config = self.get_startup_config()\n # Pass true to _set_params so we know these are startup values\n self._set_params(config, True)", "def updatedata(self, force_update=False):\n\n self._update_counter = self._update_counter + 1\n logging.debug('ParameterViewer: update values')\n for instrument_name in self._instrumentnames:\n instr = self._instruments[self._instrumentnames.index(instrument_name)]\n parameters = {}\n\n try:\n parameters = instr.parameters\n except AttributeError as ex:\n # instrument was removed\n print('instrument was removed, stopping ParameterViewer')\n # logging.exception(ex)\n self._timer.stop()\n\n parameter_names = sorted(parameters.keys())\n\n si = sys.getswitchinterval()\n\n for parameter_name in parameter_names:\n # hack to make this semi thread-safe\n\n for field_name in self._fields:\n if field_name == 'Value':\n sys.setswitchinterval(100)\n value = parameters[parameter_name].get_latest()\n sys.setswitchinterval(si)\n self.update_field_signal.emit(instrument_name, parameter_name, field_name, value, force_update)\n else:\n if self._update_counter % 20 == 1 or 1:\n sys.setswitchinterval(100)\n value = getattr(parameters[parameter_name], field_name, '')\n sys.setswitchinterval(si)\n self.update_field_signal.emit(instrument_name, parameter_name,\n field_name, value, force_update)\n\n for callback_function in self.callbacklist:\n try:\n callback_function()\n except Exception as ex:\n logging.debug('update function failed')\n logging.exception(str(ex))", "def update_with_evaluation(param_object, evaluation_dict, verbose):\n if evaluation_dict: # evaluates true if dict is not empty and the value is not None\n for key, value in evaluation_dict.items():\n try:\n setattr(param_object, key, value)\n TellUser.debug('attribute (' + param_object.name + ': ' + key + ') set: ' + str(value))\n except KeyError:\n TellUser.debug('No attribute ' + param_object.name + ': ' + key)", "def updateParameters(self, parameters):", "def pre_network_policy_update(self, resource_id, resource_dict):\n pass", "def _modify_schedule_policy_properties(self):\n request_json = {\n 'taskInfo':\n {\n 'taskOperation': 1,\n 'associations': self._associations,\n 'task': self._task_json,\n \"appGroup\":\n {\n \"appGroups\": self._app_groups if self._app_groups else [],\n },\n 'subTasks': self._subtasks\n }\n }\n\n flag, response = self._commcell_object._cvpysdk_object.make_request(\n 'PUT', self._MODIFY_SCHEDULE_POLICY, request_json\n )\n output = self._process_schedule_policy_update_response(flag, response)\n self.refresh()\n\n if output[0]:\n return\n\n o_str = 'Failed to update properties of Schedule Policy\\nError: \"{0}\"'\n raise SDKException('Schedules', '102', o_str.format(output[2]))", "def __setitem__(self, i, v):\n # The policy function can't be modified", "def set_policy (self, policy = None, args = (), policy_cleanup = None):\n if policy == self.policy:\n # same policy; might want to change args/cleanup function, though\n self._policy_args = args\n if policy is not None and not isinstance(policy, basestring):\n self._policy_cleanup = policy_cleanup\n return\n # perform cleanup for current policy, if any\n if isinstance(self.policy, basestring):\n # built-in\n try:\n POLICY_CLEANUP[self.policy](self)\n except AttributeError:\n pass\n elif self.policy is not None and self._policy_cleanup is not None:\n # custom\n self._policy_cleanup(self)\n del self._policy_cleanup\n # set new policy\n self.policy = policy\n if policy is None:\n # if disabling scrolling, clean up some attributes we won't need\n try:\n del self._scroll_fn, self._policy_args\n except AttributeError:\n pass\n else:\n self._policy_args = args if args else ()\n if isinstance(policy, basestring):\n # built-in\n self._scroll_fn = POLICY_SCROLL[policy]\n else:\n # custom\n self._scroll_fn = policy\n self._policy_cleanup = policy_cleanup", "def _update_embedding_param(self):\n for layer, ids in self._tls._unique_ids_all_layers.items():\n value = self._get_embedding_variable(layer).numpy()\n self._update_embedding_func(layer, ids, value)\n\n for slot in self._allowed_slot_names:\n value = self._get_slot_variable(layer, slot).numpy()\n slot_table_name = get_slot_table_name(layer, slot)\n self._update_embedding_func(slot_table_name, ids, value)", "def update_policy(ranger_url, policy_id, policy_data, admin_username_password):\n\n url = format(\"{ranger_url}/service/public/v2/api/policy/{policy_id}\")\n\n base_64_string = base64.encodestring(admin_username_password).replace('\\n', '')\n\n request = urllib2.Request(url, json.dumps(policy_data))\n request.get_method = lambda: 'PUT'\n request.add_header('Content-Type', 'application/json')\n request.add_header('Accept', 'application/json')\n request.add_header('Authorization', format('Basic {base_64_string}'))\n\n try:\n result = openurl(request, timeout=20)\n response_code = result.getcode()\n if response_code == 200:\n Logger.info(format(\"Successfully updated policy in Ranger Admin\"))\n return response_code\n else:\n Logger.error(format(\"Unable to update policy in Ranger Admin\"))\n return None\n except urllib2.HTTPError as e:\n raise Fail(\"HTTPError while updating policy Reason = \" + str(e.code))\n except urllib2.URLError as e:\n raise Fail(\"URLError while updating policy. Reason = \" + str(e.reason))\n except TimeoutError:\n raise Fail(\"Connection timeout error while updating policy\")\n except Exception as err:\n raise Fail(format(\"Error while updating policy. Reason = {err}\"))", "def _init_actor_update(self):\n\n actions = self._policy.actions([self._observations_ph])\n log_pis = self._policy.log_pis([self._observations_ph], actions)\n\n assert log_pis.shape.as_list() == [None, 1]\n\n log_alpha = self._log_alpha = tf.get_variable(\n 'log_alpha',\n dtype=tf.float32,\n initializer=0.0)\n alpha = tf.exp(log_alpha)\n\n if isinstance(self._target_entropy, Number):\n alpha_loss = -tf.reduce_mean(\n log_alpha * tf.stop_gradient(log_pis + self._target_entropy))\n\n self._alpha_optimizer = tf.train.AdamOptimizer(\n self._policy_lr, name='alpha_optimizer')\n self._alpha_train_op = self._alpha_optimizer.minimize(\n loss=alpha_loss, var_list=[log_alpha])\n\n self._training_ops.update({\n 'temperature_alpha': self._alpha_train_op\n })\n\n self._alpha = alpha\n\n if self._action_prior == 'normal':\n policy_prior = tf.contrib.distributions.MultivariateNormalDiag(\n loc=tf.zeros(self._action_shape),\n scale_diag=tf.ones(self._action_shape))\n policy_prior_log_probs = policy_prior.log_prob(actions)\n elif self._action_prior == 'uniform':\n policy_prior_log_probs = 0.0\n\n Q_log_targets = tuple(\n Q([self._observations_ph, actions])\n for Q in self._Qs)\n min_Q_log_target = tf.reduce_min(Q_log_targets, axis=0)\n\n if self._reparameterize:\n policy_kl_losses = (\n alpha * log_pis\n - min_Q_log_target\n - policy_prior_log_probs)\n else:\n raise NotImplementedError\n\n assert policy_kl_losses.shape.as_list() == [None, 1]\n\n policy_loss = tf.reduce_mean(policy_kl_losses)\n\n self._policy_optimizer = tf.train.AdamOptimizer(\n learning_rate=self._policy_lr,\n name=\"policy_optimizer\")\n policy_train_op = tf.contrib.layers.optimize_loss(\n policy_loss,\n self.global_step,\n learning_rate=self._policy_lr,\n optimizer=self._policy_optimizer,\n variables=self._policy.trainable_variables,\n increment_global_step=False,\n summaries=(\n \"loss\", \"gradients\", \"gradient_norm\", \"global_gradient_norm\"\n ) if self._tf_summaries else ())\n\n self._training_ops.update({'policy_train_op': policy_train_op})", "def updateParameters(self):\n\n return", "def variational_update(self):\n with self.elbo_check('update_p_allele_swap'):\n self.model.update_p_allele_swap()\n\n with self.elbo_check('p_cn'):\n self.model.update_p_cn()\n\n with self.elbo_check('p_breakpoint'):\n self.model.update_p_breakpoint()\n\n with self.elbo_check('p_outlier_total'):\n self.model.update_p_outlier_total()\n\n with self.elbo_check('p_outlier_allele'):\n self.model.update_p_outlier_allele()", "def log_update(self, policy_loss, entropy, kl_divergence, clipping_fraction,\n value_loss, explained_variance, steps):\n\n # Diagnostics\n self.writer.add_scalar(\"Diagnostics/Policy/PolicyLoss\",\n policy_loss,\n self.update_counter)\n self.writer.add_scalar(\"Diagnostics/Policy/Entropy\",\n entropy,\n self.update_counter)\n self.writer.add_scalar(\"Diagnostics/Policy/KLDivergence\",\n kl_divergence,\n self.update_counter)\n self.writer.add_scalar(\"Diagnostics/Policy/ClipFraction\",\n clipping_fraction,\n self.update_counter)\n self.writer.add_scalar(\"Diagnostics/Value/ValueLoss\",\n value_loss,\n self.update_counter)\n self.writer.add_scalar(\"Diagnostics/Value/ValueEstimate\",\n np.mean(self.buffer.values),\n self.update_counter)\n self.writer.add_scalar(\"Diagnostics/Value/ExplainedVariance\",\n explained_variance,\n self.update_counter)\n self.writer.add_scalar(\"Diagnostics/Info/LearningRate\",\n self.lr_pi,\n self.update_counter)\n self.writer.add_scalar(\"Diagnostics/Info/TotalTimesteps\",\n self.update_counter * self.batch_size,\n self.update_counter)\n self.writer.add_scalar(\"Diagnostics/Info/KLDivCoef\",\n self.kl_coef,\n self.update_counter)\n # Training Episodes\n self.writer.add_scalar(\"Training/Episodes/PolicyGradientSteps\",\n steps,\n self.update_counter)\n mean_frames = np.mean(self.buffer.episode_lengths)\n std_frames = np.std(self.buffer.episode_lengths)\n self.writer.add_scalar(\"Training/Episodes/Mean_Frames\",\n mean_frames,\n self.update_counter)\n self.writer.add_scalar(\"Training/Episodes/Std_Frames\",\n std_frames,\n self.update_counter)\n self.writer.add_histogram(\"Training/Episodes/Frames\",\n np.array(self.buffer.episode_lengths),\n self.update_counter)\n mean_reward = np.mean(self.buffer.episode_rewards)\n std_reward = np.std(self.buffer.episode_rewards)\n rews_per_frame = np.array(self.buffer.episode_rewards) / \\\n np.array(self.buffer.episode_lengths, dtype=np.float)\n self.writer.add_scalar(\"Training/Episodes/Mean_Reward\",\n mean_reward,\n self.update_counter)\n self.writer.add_scalar(\"Training/Episodes/Std_Reward\",\n std_reward,\n self.update_counter)\n self.writer.add_scalar(\"Training/Episodes/Reward_per_Frame_Mean\",\n np.mean(rews_per_frame),\n self.update_counter)\n self.writer.add_scalar(\"Training/Episodes/Reward_per_Frame_Std\",\n np.std(rews_per_frame),\n self.update_counter)\n self.writer.add_histogram(\"Training/Episodes/Rewards\",\n np.array(self.buffer.episode_rewards),\n self.update_counter)\n self.writer.add_histogram(\"Training/Episodes/Rewards_per_Frame\",\n rews_per_frame,\n self.update_counter)\n actions = np.array(self.buffer.actions)\n self.writer.add_histogram(\"Training/Action/DeltaVel\",\n actions[:, 0],\n self.update_counter)\n self.writer.add_histogram(\"Training/Action/DeltaLat\",\n actions[:, 1],\n self.update_counter)\n self.writer.add_histogram(\"Training/Values\",\n np.array(self.buffer.values),\n self.update_counter)\n self.writer.add_histogram(\"Training/Avantages\",\n np.array(self.buffer.advantages),\n self.update_counter)\n self.writer.add_histogram(\"Training/GradNorms\",\n np.array(self.grad_norms),\n self.update_counter)\n self.writer.add_histogram(\"Training/Ratio/Ratio\",\n np.array(self.ratios).flatten(),\n self.update_counter)\n self.writer.add_histogram(\"Training/Ratio/ClippedRatio\",\n np.array(self.clipped_ratios).flatten(),\n self.update_counter)\n\n self.writer.flush()\n\n print(\"-\" * 30)\n print(\"PPO Optimization\")\n print(\"Policy_Loss: {}\\t\\t\".format(policy_loss))\n print(\"Value_Loss: {}\\t\\t\".format(value_loss))\n print(\"Entropy: {}\\t\\t\".format(entropy))\n print(\"Lr_pi: {}\\t\\t\".format(self.lr_pi))\n print(\"Lr_vf: {}\\t\\t\".format(self.lr_vf))\n print(\"KL_Divergence: {}\\t\\t\".format(kl_divergence))\n print(\"Clip_Fraction: {}\\t\\t\".format(clipping_fraction))\n print(\"Exp_Variance: {}\\t\\t\".format(explained_variance))\n print(\"Mean_Reward: {}\\t\\t\".format(mean_reward))\n print(\"Std_Reward: {}\\t\\t\".format(std_reward))\n print(\"Mean_Frames: {}\\t\\t\".format(mean_frames))\n print(\"Std_Frames: {}\\t\\t\".format(std_frames))\n print(\"Mean_Reward_per_frame: {}\\t\\t\".format(np.mean(rews_per_frame)))\n print(\"Std_Reward_per_frame: {}\\t\\t\".format(np.std(rews_per_frame)))\n print(\"Optimization steps: {}\\t\\t\". format(self.update_counter))\n print(\"-\" * 30)", "def update_l7_policy(request, **kwargs):\n data = request.DATA\n l7_policy_id = data['l7policy'].get('id')\n\n conn = get_sdk_connection(request)\n l7_policy = conn.load_balancer.update_l7_policy(\n action=data['l7policy']['action'],\n admin_state_up=data['l7policy'].get('admin_state_up'),\n description=data['l7policy'].get('description'),\n l7_policy=l7_policy_id,\n name=data['l7policy'].get('name'),\n position=data['l7policy'].get('position'),\n redirect_pool_id=data['l7policy'].get('redirect_pool_id'),\n redirect_url=data['l7policy'].get('redirect_url'),\n )\n\n return _get_sdk_object_dict(l7_policy)", "def update_parameters(updates):\r\n for (key, val) in updates.items():\r\n par[key] = val\r\n print('Updating:', key, '-->', val)\r\n update_dependencies()", "def rollout(pi, env):\n rollout = defaultdict(list)\n t = 0\n ob = (env.reset())\n features = pi.get_initial_features()\n for _ in range(2000):\n rets = pi.compute(ob, features)\n ac, value, features = rets[0], rets[1], rets[2:]\n rollout[\"obs\"].append(ob)\n rollout[\"vs\"].append(value)\n rollout[\"actions\"].append(ac)\n ob, rew, done, _ = env.step(ac)\n rollout[\"rs\"].append(rew)\n rollout[\"features\"].append(features)\n t += 1\n if done:\n break\n print(\"Cur policy: \", len(rollout[\"obs\"]))\n rollout[\"r\"] = 0\n rollout[\"terminal\"] = True\n return rollout", "def _update_params(self, perf_params, loop_info):\n for vartype in list(perf_params.keys()):\n for var in perf_params[vartype]:\n self.tspec_params['performance_params'][var] = \\\n self.indent + 'param %s[] = %s;\\t#%s\\n' % (var, repr(default_perf_params[vartype]), vartype)\n\n #loop_info.vars: set of input vars", "def _update(self):\n self.all_params = {}\n self._update_experiment_params()\n self._update_preprocessing_params()\n self._update_model_params()", "def _update_parameter(self, dWxh, dbh, dWhy, dby):\n # Add code to update all the weights and biases here", "def update_generate_params(self,inps,trgs,preds):\n batch_size = np.shape(trgs)[0]\n\n self.delta_weight_h_to_v = self.learning_rate / batch_size * np.transpose(trgs) @ (inps - preds)\n self.delta_bias_v = self.learning_rate * np.mean(inps - preds)\n \n self.weight_h_to_v += self.delta_weight_h_to_v\n self.bias_v += self.delta_bias_v \n \n return", "def replace_namespaced_policy(self, body, namespace, name, **kwargs):\n\n all_params = ['body', 'namespace', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method replace_namespaced_policy\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `replace_namespaced_policy`\")\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `replace_namespaced_policy`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `replace_namespaced_policy`\")\n\n resource_path = '/oapi/v1/namespaces/{namespace}/policies/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Policy',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def refresh(self):\n self._get_schedule_policy_properties()", "def pre_update(self, **values):\r\n pass", "def post_network_policy_update(self, resource_id, resource_dict):\n pass", "def update_service_access_policies(DomainName=None, AccessPolicies=None):\n pass", "def _policy_improvement(self) -> Tuple[np.ndarray, np.ndarray]:\n # Start with a (random) policy\n policy = np.zeros([self.state_dim, self.action_dim])\n V = np.zeros([self.state_dim])\n #random init the policy\n for s in range(self.state_dim):\n policy[s,0] = 0.0\n policy[s,1] = 0.0\n policy[s,2] = 1.0\n\n V = self._policy_eval(policy)\n\n policy_stable = False\n dr = 0.9\n\n while (policy_stable != True):\n policy_stable = True\n for s in self.mdp._state_dict:\n old_action = (policy[self.mdp._state_dict[s]]).tolist()\n action_dict = {}\n for a in self.mdp._action_dict:\n temp = 0.0\n for next_s in self.mdp._state_dict:\n p = self.mdp.P[self.mdp._state_dict[s],self.mdp._action_dict[a],self.mdp._state_dict[next_s]]\n r = self.mdp.R[self.mdp._state_dict[s],self.mdp._action_dict[a],self.mdp._state_dict[next_s]]\n Vs = V[self.mdp._state_dict[next_s]]\n temp = temp + p * (r + dr * Vs)\n action_dict[self.mdp._action_dict[a]]= temp \n max_act = max(action_dict.values())\n V[self.mdp._state_dict[s]] = max_act\n res = [t for t,v in action_dict.items() if v == max_act][0]\n for opt in range(self.action_dim):\n if opt == res:\n policy[self.mdp._state_dict[s],opt] = 1.0\n else:\n policy[self.mdp._state_dict[s],opt] = 0.0\n if (old_action - policy[self.mdp._state_dict[s]]).any() == True:\n \n policy_stable = False\n if policy_stable == False:\n V = self._policy_eval(policy)\n \n return policy, V", "def em_update_params(self):\n for name in self.likelihood_params:\n with self.elbo_check(name):\n self.update_param(name)", "def update_multiple_parameters(self, detuning=None, lamb_dicke=None,\n base_rabi=None):\n if detuning is not None:\n self.__detuning = detuning\n if lamb_dicke is not None:\n self.__lamb_dicke = lamb_dicke\n if base_rabi is not None:\n self.__base_rabi = base_rabi\n if detuning is None and lamb_dicke is None and base_rabi is None:\n return\n self.__update_prefactors()", "def policy_improvement(P, nS, nA, value_from_policy, gamma=0.9):\n\n new_policy = np.ones([nS, nA]) / nA\n\t############################\n\t# YOUR IMPLEMENTATION HERE #\n #iteration_policy=new_policy\n for state in range(nS):\n #current_policy=new_policy[state] \n action_policy = np.zeros(nA) \n for action in range(nA):\n for p,nextS,r,boolean_v in P[state][action]:\n action_policy[action] += p*( r + gamma* value_from_policy[nextS])\n #print(action_policy)\n updated_policy=np.zeros(nA)\n updated_policy[np.argmax(action_policy)]= 1\n #print(updated_policy) \n new_policy[state]=updated_policy\n \n \t############################\n return new_policy", "def initialize_policies(self, policy_collection, options):", "def put(self, request, l7_policy_id):\n kwargs = {'l7_policy_id': l7_policy_id}\n update_l7_policy(request, **kwargs)", "def update_data(self, **kwargs):\n self.source_data = self.get_dict()\n for c in self.callbacks[\"update_data\"]:\n c()", "def update_lr(self, *args, **kwargs):\n raise NotImplementedError", "def update_lr(self, *args, **kwargs):\n raise NotImplementedError", "def patch_namespaced_policy(self, body, namespace, name, **kwargs):\n\n all_params = ['body', 'namespace', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method patch_namespaced_policy\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `patch_namespaced_policy`\")\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `patch_namespaced_policy`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `patch_namespaced_policy`\")\n\n resource_path = '/oapi/v1/namespaces/{namespace}/policies/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PATCH',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1Policy',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def rollout_policy_fn(board):\n # rollout randomly\n action_probs = np.random.rand(len(board.availables))\n return zip(board.availables, action_probs)", "def test_update_policy(self):\n self.ports[1].qos_policy_id = self.qos_policies[0].id\n self.ports[1].update()\n self.ports[2].qos_policy_id = self.qos_policies[1].id\n self.ports[2].update()\n self.ports[4].qos_policy_id = self.qos_policies[0].id\n self.ports[4].update()\n self.ports[5].qos_policy_id = self.qos_policies[1].id\n self.ports[5].update()\n self.networks[1].qos_policy_id = self.qos_policies[0].id\n self.networks[1].update()\n self.fips[0].qos_policy_id = self.qos_policies[0].id\n self.fips[0].update()\n self.fips[1].qos_policy_id = self.qos_policies[1].id\n self.fips[1].update()\n self._update_router_qos(self.ctx, self.routers[0].id,\n self.qos_policies[0].id)\n self._update_router_qos(self.ctx, self.routers[1].id,\n self.qos_policies[1].id)\n mock_qos_rules = mock.Mock()\n with mock.patch.object(self.qos_driver, '_qos_rules',\n return_value=mock_qos_rules), \\\n mock.patch.object(self.qos_driver, 'update_floatingip') as \\\n mock_update_fip, \\\n mock.patch.object(self.qos_driver, 'update_router') as \\\n mock_update_router:\n self.qos_driver.update_policy(self.ctx, self.qos_policies[0])\n # Ports updated from \"update_port\": self.ports[1], self.ports[4]\n updated_ports = [self.ports[1], self.ports[4]]\n calls = [mock.call(self.txn, port.id, port.network_id,\n self.qos_policies[0].id, mock_qos_rules,\n lsp=None, port_deleted=False)\n for port in updated_ports]\n # Port updated from \"update_network\": self.ports[3]\n calls.append(mock.call(self.txn, self.ports[3].id,\n self.ports[3].network_id,\n self.qos_policies[0].id, mock_qos_rules))\n\n # We can't ensure the call order because we are not enforcing any order\n # when retrieving the port and the network list.\n self.mock_rules.assert_has_calls(calls, any_order=True)\n with db_api.CONTEXT_READER.using(self.ctx):\n fip = self.qos_driver._plugin_l3.get_floatingip(self.ctx,\n self.fips[0].id)\n mock_update_fip.assert_called_once_with(self.txn, fip)\n\n with db_api.CONTEXT_READER.using(self.ctx):\n router = self.qos_driver._plugin_l3.get_router(self.ctx,\n self.routers[0].id)\n mock_update_router.assert_called_once_with(self.txn, router)", "def update_params(self, learning_rate):\n\t\t#######################################################################\n\t\t# ** START OF YOUR CODE **\n\t\t#######################################################################\n\n\t\tfor layer in self._layers:\n\t\t\tlayer.update_params(learning_rate)\n\n\t\t#######################################################################\n\t\t# ** END OF YOUR CODE **\n\t\t#######################################################################", "def _policy_eval(self, policy: np.ndarray) -> np.ndarray:\n V = np.zeros(self.state_dim)\n diff = 1.0\n dr = 0.9\n while (diff >= self.theta):\n diff = 0.0\n for s in self.mdp._state_dict:\n old = V[self.mdp._state_dict[s]]\n temp = 0.0\n for opt in range(self.action_dim):\n if policy[self.mdp._state_dict[s],opt] == 1.0: \n for next_s in self.mdp._state_dict:\n p = self.mdp.P[self.mdp._state_dict[s],opt,self.mdp._state_dict[next_s]]\n r = self.mdp.R[self.mdp._state_dict[s],opt,self.mdp._state_dict[next_s]]\n Vs = V[self.mdp._state_dict[next_s]]\n temp = temp + p * (r + dr * Vs)\n V[self.mdp._state_dict[s]] = temp\n diff = max(diff,abs(old - V[self.mdp._state_dict[s]]))\n return V", "def gen_policy_rollout(self, observations, dynam_model):\n # initialize statespace array in pytorch tensor -> need that good good gradient!\n # raise NotImplementedError(\"Need to implement the rollouts with tensors, or maybe not for gradients\")\n\n # for sampling the state progression\n norm_dist = torch.distributions.Normal(0, 1)\n\n # for storing the costs and gradients\n costs = torch.Tensor()\n baselines = torch.Tensor()\n log_probabilities = torch.Tensor()\n states = torch.Tensor()\n ''' \n # for all of these values, think a row as an updating time series for each particle\n costs = torch.zeros((self.P, self.T))\n baselines = torch.zeros((self.P, self.T))\n probabilities = torch.zeros((self.P, self.T))\n\n # This is what we would do for the states, but it's more efficient to concatenate them\n states = torch.Tensor((self.P,self.T, self.n_in))\n '''\n\n # Change in state vs raw state key\n pred_key = dynam_model.change_state_list\n # print(pred_key)\n\n # TODO: Generate initial states for each particle based on the distribution of data it was trained on\n bound_up = torch.Tensor(np.max(observations, 0))\n bound_low = torch.Tensor(np.min(observations, 0))\n\n obs_dist = torch.distributions.uniform.Uniform(bound_low, bound_up)\n\n # iterate through each particle for the states and probabilites for gradient\n for p in range(self.P):\n\n # Choose the dynamics model from the ensemble\n num_ens = dynam_model.E\n if num_ens == 0:\n model = dynam_model\n else:\n model_idx = random.randint(0, num_ens - 1)\n model = dynam_model.networks[model_idx]\n\n num_obs = np.shape(observations)[1]\n # x0 = torch.Tensor(observations[random.randint(0,num_obs),:])\n x0 = obs_dist.sample()\n\n # TODO: Normalize the states before passing into the NN\n\n state_mat = x0.view((1, 1, -1))\n # print(state_mat)\n # state_mat = x0.unsqueeze(0).unsqueeze(0) # takes a (n_in) vector to a (1,1,n_in) Tensor\n # torch.cat((), axis = 1) to cat the times\n # torch.cat((), axis = 0) to cat the particle\n # is there a way to do this without unsqueeze? Seems like the most efficient way\n log_prob_vect = torch.Tensor([1]) # states the column with 1 for concat'ing\n\n for t in range(self.T):\n # generate action from the policy\n action = self.forward(state_mat[0, t, :])\n # print(action)\n # quit()\n\n # forward pass current state to generate distribution values from dynamics model\n means, var = model.distribution(state_mat[0, t, :], action)\n # print(var)\n # sample the next state from the means and variances of the state transition probabilities\n vals = var * norm_dist.sample((1, self.n_in)) + means\n # need to account for the fact that some states are change in and some are raw here\n\n # batch mode prob calc\n log_probs = -.5 * torch.abs(vals - means) / var\n # log_probs = -.5*torch.abs(vals - means)/(var**2)\n\n # for s in range(self.n_in):\n # # sample predicted new state for each element\n # val = var[s]*np.random.normal()+means[0] # sample from the scaled gaussian with y = sigma*x + mu\n\n # # calculate probability of this state for each sub state\n # p = -.5*(val-means[0])/var[0]\n # states = torch.cat((states, state), 0)\n # probabilities = torch.cat((probabilities, p), 0)\n\n # reduce the probabilities vector to get a single probability of the state transition\n log_prob = torch.sum((log_probs), 1)\n log_prob_vect = torch.cat((log_prob_vect, log_prob))\n\n state = torch.Tensor(vals).view((1, 1, -1))\n\n state_mat = torch.cat((state_mat, state),\n 1) # appends the currnt state to the current particle, without overwriting the otherone\n\n # print(state_mat)\n # calculates costs\n # idea ~ calculate the cost of each each element and then do an cumulative sum for the costs\n # use torch.cumsum\n c_list = []\n for state in state_mat.squeeze():\n c_row = self.cost_fnc(state)\n c_list.append(c_row)\n c_list = torch.stack(c_list)\n\n # note we calc the cum sum on the flipped tensor, then flip costs back\n cost_cum = torch.cumsum(torch.flip(c_list, [0]), 0)\n # Assembles the arrays for the current particle\n\n # costs were given above\n costs = torch.cat((costs, torch.flip(cost_cum, [0]).view(1, -1)), 0)\n\n # update the states array for each particle\n states = torch.cat((states, state_mat), 0)\n\n # concatenates the vector of prob at each time to the 2d array\n log_probabilities = torch.cat(\n (log_probabilities, log_prob_vect.view((1, -1))), 0)\n\n # calculates baselines as the leave one out mean for each particle at each time\n costs_summed = torch.sum(costs, 0)\n costs_summed_exp = costs_summed.expand_as(costs)\n costs_leave_one_out = costs_summed_exp - costs\n baselines = costs_leave_one_out / (self.P - 1)\n\n # freezes gradients on costs and baselines\n # these two lines of code actually do nothing, but are for clarification\n # costs.requires_grad_(requires_grad=False)\n # baselines.requires_grad_(requires_grad=False)\n # . detach() is another way to ensure this\n \"\"\"\n RuntimeError: you can only change requires_grad flags of leaf variables. If you want to use a \n computed variable in a subgraph that doesn't require differentiation use var_no_grad = var.detach().\n \"\"\"\n costs_d = costs.detach()\n baselines_d = baselines.detach()\n # costs_d = costs.requires_grad_(False)\n # baselines_d = baselines.requires_grad_(False)\n # print(baselines)\n # print(probabilities)\n # print(costs)\n # print(log_probabilities)\n return states, log_probabilities, costs_d, baselines_d", "def test_function_values(self):\n\n self.param_dict.add_paramdictval(\n FunctionParamDictVal(\n \"fn_foo\",\n self.pick_byte2,\n lambda x : str(x),\n direct_access=True,\n startup_param=True,\n value=1,\n visibility=ParameterDictVisibility.READ_WRITE)\n )\n self.param_dict.add_paramdictval(\n FunctionParamDictVal(\n \"fn_bar\",\n lambda x : bool(x&2), # bit map example\n lambda x : str(x),\n direct_access=True,\n startup_param=True,\n value=False,\n visibility=ParameterDictVisibility.READ_WRITE)\n )\n \n # check defaults just to be safe\n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 1)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, False)\n \n result = self.param_dict.update(1005) # just change first in list\n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 3)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, False)\n \n # fn_bar does not get updated here\n result = self.param_dict.update_many(1205)\n self.assertEqual(result['fn_foo'], True)\n self.assertEqual(len(result), 1)\n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 4)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, False)\n \n # both are updated now\n result = self.param_dict.update_many(6)\n self.assertEqual(result['fn_foo'], True)\n self.assertEqual(result['fn_bar'], True)\n self.assertEqual(len(result), 2)\n \n val = self.param_dict.get(\"fn_foo\")\n self.assertEqual(val, 0)\n val = self.param_dict.get(\"fn_bar\")\n self.assertEqual(val, True)", "def eval_policy(env, policy, obs_key, goal_key, num_rollouts, limit, pos=None, goal=None, filename=None):\n from tqdm import trange\n from ml_logger import logger\n\n logger.summary_cache.clear()\n if filename:\n frames = []\n\n for _ in trange(num_rollouts, desc=\"evaluate\"):\n\n obs, done = env.reset(), False\n if pos is not None or goal is not None:\n obs = env.unwrapped.reset(pos, goal)\n # d2goal = l2(obs['x'], obs['goal'])\n i, R = 0, 0\n while not done:\n if filename:\n _ = np.concatenate([obs[obs_key], obs[goal_key]], axis=-1)[0]\n frames.append(_) # , obs['goal_img']\n action = policy(obs[obs_key], obs[goal_key])\n obs, reward, done, info = env.step(action)\n R += reward\n if info['success'] or limit and i >= limit:\n logger.store_metrics( # d2goal=d2goal,\n success=info['success'])\n break\n i += 1\n\n if filename:\n logger.log_video(np.array(frames), key=filename)\n\n logger.print(f'has finished evaluation.')", "def _update_parameters(self, curr_state, reward, next_state):\n phi = self._features.vector(curr_state)\n phi_dash = self._features.vector(next_state)\n\n self._A += np.outer(phi, (phi - self._gamma * phi_dash))\n self._b += reward * phi", "def hard_update(self,target, source):\n\t\tfor target_param, param in zip(target.parameters(), source.parameters()):\n\t\t\t\ttarget_param.data.copy_(param.data)", "def updateParameters(self, parameters):\r\n\t\tin_wikiplace_IRI = parameters[0]\r\n\t\tin_relation_degree = parameters[1]\r\n\t\tin_first_property_dir = parameters[2]\r\n\t\tin_first_property = parameters[3]\r\n\t\tin_second_property_dir = parameters[4]\r\n\t\tin_second_property = parameters[5]\r\n\t\tin_third_property_dir = parameters[6]\r\n\t\tin_third_property = parameters[7]\r\n\t\tin_fourth_property_dir = parameters[8]\r\n\t\tin_fourth_property = parameters[9]\r\n\t\tout_location = parameters[10]\r\n\t\tout_table_name = parameters[11]\r\n\t\tout_points_name = parameters[12]\r\n\r\n\t\t\r\n\r\n\t\tif in_relation_degree.altered:\r\n\t\t\trelationDegree = int(in_relation_degree.valueAsText)\r\n\t\t\tif relationDegree == 1:\r\n\t\t\t\tin_first_property.enabled = True\r\n\t\t\t\tin_first_property_dir.enabled = True\r\n\t\t\t\tin_second_property.enabled = False\r\n\t\t\t\tin_second_property_dir.enabled = False\r\n\t\t\t\tin_third_property.enabled = False\r\n\t\t\t\tin_third_property_dir.enabled = False\r\n\t\t\t\tin_fourth_property.enabled = False\r\n\t\t\t\tin_fourth_property_dir.enabled = False\r\n\t\t\telif relationDegree == 2:\r\n\t\t\t\tin_first_property.enabled = True\r\n\t\t\t\tin_first_property_dir.enabled = True\r\n\t\t\t\tin_second_property.enabled = True\r\n\t\t\t\tin_second_property_dir.enabled = True\r\n\t\t\t\tin_third_property.enabled = False\r\n\t\t\t\tin_third_property_dir.enabled = False\r\n\t\t\t\tin_fourth_property.enabled = False\r\n\t\t\t\tin_fourth_property_dir.enabled = False\r\n\t\t\telif relationDegree == 3:\r\n\t\t\t\tin_first_property.enabled = True\r\n\t\t\t\tin_first_property_dir.enabled = True\r\n\t\t\t\tin_second_property.enabled = True\r\n\t\t\t\tin_second_property_dir.enabled = True\r\n\t\t\t\tin_third_property.enabled = True\r\n\t\t\t\tin_third_property_dir.enabled = True\r\n\t\t\t\tin_fourth_property.enabled = False\r\n\t\t\t\tin_fourth_property_dir.enabled = False\r\n\t\t\telif relationDegree == 4:\r\n\t\t\t\tin_first_property.enabled = True\r\n\t\t\t\tin_first_property_dir.enabled = True\r\n\t\t\t\tin_second_property.enabled = True\r\n\t\t\t\tin_second_property_dir.enabled = True\r\n\t\t\t\tin_third_property.enabled = True\r\n\t\t\t\tin_third_property_dir.enabled = True\r\n\t\t\t\tin_fourth_property.enabled = True\r\n\t\t\t\tin_fourth_property_dir.enabled = True\r\n\t\t\r\n\t\t\tif in_wikiplace_IRI.value:\r\n\t\t\t\tinputFeatureClassName = in_wikiplace_IRI.valueAsText\r\n\t\t\t\tlastIndexOFGDB = inputFeatureClassName.rfind(\"\\\\\")\r\n\t\t\t\tfeatureClassName = inputFeatureClassName[(lastIndexOFGDB+1):]\r\n\t\t\t\tcurrentWorkspace = inputFeatureClassName[:lastIndexOFGDB]\r\n\r\n\t\t\t\tarcpy.env.workspace = currentWorkspace\r\n\t\t\t\tout_location.value = currentWorkspace\r\n\r\n\t\t\t\tout_table_name.value = featureClassName + \"PathQueryTripleStore\"\r\n\r\n\t\t\t\tout_points_name.value = featureClassName + \"PathQueryLocation\"\r\n\r\n\r\n\t\t\t\toutLocation = out_location.valueAsText\r\n\t\t\t\toutTableName = out_table_name.valueAsText\r\n\t\t\t\toutputTableName = os.path.join(outLocation,outTableName)\r\n\t\t\t\tif arcpy.Exists(outputTableName):\r\n\t\t\t\t\tarcpy.AddError(\"The output table already exists in current workspace!\")\r\n\t\t\t\t\traise arcpy.ExecuteError\r\n\r\n\t\t\t\toutFeatureClassName = out_points_name.valueAsText\r\n\t\t\t\toutputFeatureClassName = os.path.join(outLocation,outFeatureClassName)\r\n\t\t\t\tif arcpy.Exists(outputFeatureClassName):\r\n\t\t\t\t\tarcpy.AddError(\"The output Feature Class already exists in current workspace!\")\r\n\t\t\t\t\traise arcpy.ExecuteError\r\n\r\n\r\n\t\t\t\t# get all the IRI from input point feature class of wikidata places\r\n\t\t\t\tinplaceIRIList = []\r\n\t\t\t\tcursor = arcpy.SearchCursor(inputFeatureClassName)\r\n\t\t\t\tfor row in cursor:\r\n\t\t\t\t\tinplaceIRIList.append(row.getValue(\"URL\"))\r\n\r\n\r\n\r\n\t\t\t\t# get the first property URL list and label list\r\n\t\t\t\tif in_first_property_dir.value:\r\n\t\t\t\t\tfristDirection = in_first_property_dir.valueAsText\r\n\t\t\t\t\t# get the first property URL list\r\n\t\t\t\t\tfirstPropertyURLListJsonBindingObject = SPARQLQuery.relFinderCommonPropertyQuery(inplaceIRIList, relationDegree, [fristDirection], [\"\", \"\", \"\"])\r\n\t\t\t\t\tfirstPropertyURLList = []\r\n\t\t\t\t\tfor jsonItem in firstPropertyURLListJsonBindingObject:\r\n\t\t\t\t\t\tfirstPropertyURLList.append(jsonItem[\"p1\"][\"value\"])\r\n\r\n\t\t\t\t\tfirstPropertyLabelJSON = SPARQLQuery.locationCommonPropertyLabelQuery(firstPropertyURLList)\r\n\t\t\t\t\t# firstPropertyLabelJSON = firstPropertyLabelJSONObj[\"results\"][\"bindings\"]\r\n\r\n\t\t\t\t\t# get the first property label list\r\n\t\t\t\t\tfirstPropertyURLList = []\r\n\t\t\t\t\tfirstPropertyLabelList = []\r\n\t\t\t\t\tfor jsonItem in firstPropertyLabelJSON:\r\n\t\t\t\t\t\tpropertyURL = jsonItem[\"p\"][\"value\"]\r\n\t\t\t\t\t\tfirstPropertyURLList.append(propertyURL)\r\n\t\t\t\t\t\tpropertyName = jsonItem[\"propertyLabel\"][\"value\"]\r\n\t\t\t\t\t\tfirstPropertyLabelList.append(propertyName)\r\n\r\n\t\t\t\t\tRelFinder.firstPropertyLabelURLDict = dict(zip(firstPropertyLabelList, firstPropertyURLList))\r\n\r\n\t\t\t\t\tin_first_property.filter.list = firstPropertyLabelList\r\n\r\n\t\t\t\t\t# get the second property URL list and label list\r\n\t\t\t\t\tif in_second_property_dir.value:\r\n\t\t\t\t\t\tfristDirection = in_first_property_dir.valueAsText\r\n\t\t\t\t\t\tfirstProperty = in_first_property.valueAsText\r\n\r\n\t\t\t\t\t\tif firstProperty == None:\r\n\t\t\t\t\t\t\tfirstProperty = \"\"\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tfirstProperty = RelFinder.firstPropertyLabelURLDict[firstProperty]\r\n\r\n\t\t\t\t\t\tsecondDirection = in_second_property_dir.valueAsText\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t# get the second property URL list\r\n\t\t\t\t\t\tsecondPropertyURLListJsonBindingObject = SPARQLQuery.relFinderCommonPropertyQuery(inplaceIRIList, relationDegree, [fristDirection, secondDirection], [firstProperty, \"\", \"\"])\r\n\t\t\t\t\t\tsecondPropertyURLList = []\r\n\t\t\t\t\t\tfor jsonItem in secondPropertyURLListJsonBindingObject:\r\n\t\t\t\t\t\t\tsecondPropertyURLList.append(jsonItem[\"p2\"][\"value\"])\r\n\r\n\t\t\t\t\t\tsecondPropertyLabelJSON = SPARQLQuery.locationCommonPropertyLabelQuery(secondPropertyURLList)\r\n\t\t\t\t\t\t# secondPropertyLabelJSON = secondPropertyLabelJSONObj[\"results\"][\"bindings\"]\r\n\r\n\t\t\t\t\t\t# get the second property label list\r\n\t\t\t\t\t\tsecondPropertyURLList = []\r\n\t\t\t\t\t\tsecondPropertyLabelList = []\r\n\t\t\t\t\t\tfor jsonItem in secondPropertyLabelJSON:\r\n\t\t\t\t\t\t\tpropertyURL = jsonItem[\"p\"][\"value\"]\r\n\t\t\t\t\t\t\tsecondPropertyURLList.append(propertyURL)\r\n\t\t\t\t\t\t\tpropertyName = jsonItem[\"propertyLabel\"][\"value\"]\r\n\t\t\t\t\t\t\tsecondPropertyLabelList.append(propertyName)\r\n\r\n\t\t\t\t\t\tRelFinder.secondPropertyLabelURLDict = dict(zip(secondPropertyLabelList, secondPropertyURLList))\r\n\r\n\t\t\t\t\t\tin_second_property.filter.list = secondPropertyLabelList\r\n\r\n\t\t\t\t\t\t# get the third property URL list and label list\r\n\t\t\t\t\t\tif in_third_property_dir.value:\r\n\t\t\t\t\t\t\tfristDirection = in_first_property_dir.valueAsText\r\n\t\t\t\t\t\t\tfirstProperty = in_first_property.valueAsText\r\n\r\n\t\t\t\t\t\t\tsecondDirection = in_second_property_dir.valueAsText\r\n\t\t\t\t\t\t\tsecondProperty = in_second_property.valueAsText\r\n\r\n\t\t\t\t\t\t\tif firstProperty == None:\r\n\t\t\t\t\t\t\t\tfirstProperty = \"\"\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tfirstProperty = RelFinder.firstPropertyLabelURLDict[firstProperty]\r\n\t\t\t\t\t\t\tif secondProperty == None:\r\n\t\t\t\t\t\t\t\tsecondProperty = \"\"\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tsecondProperty = RelFinder.secondPropertyLabelURLDict[secondProperty]\r\n\r\n\t\t\t\t\t\t\tthirdDirection = in_third_property_dir.valueAsText\r\n\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t# get the third property URL list\r\n\t\t\t\t\t\t\tthirdPropertyURLListJsonBindingObject = SPARQLQuery.relFinderCommonPropertyQuery(inplaceIRIList, relationDegree, [fristDirection, secondDirection, thirdDirection], [firstProperty, secondProperty, \"\"])\r\n\t\t\t\t\t\t\tthirdPropertyURLList = []\r\n\t\t\t\t\t\t\tfor jsonItem in thirdPropertyURLListJsonBindingObject:\r\n\t\t\t\t\t\t\t\tthirdPropertyURLList.append(jsonItem[\"p3\"][\"value\"])\r\n\r\n\t\t\t\t\t\t\tthirdPropertyLabelJSON = SPARQLQuery.locationCommonPropertyLabelQuery(thirdPropertyURLList)\r\n\t\t\t\t\t\t\t# thirdPropertyLabelJSON = thirdPropertyLabelJSONObj[\"results\"][\"bindings\"]\r\n\r\n\t\t\t\t\t\t\t# get the third property label list\r\n\t\t\t\t\t\t\tthirdPropertyURLList = []\r\n\t\t\t\t\t\t\tthirdPropertyLabelList = []\r\n\t\t\t\t\t\t\tfor jsonItem in thirdPropertyLabelJSON:\r\n\t\t\t\t\t\t\t\tpropertyURL = jsonItem[\"p\"][\"value\"]\r\n\t\t\t\t\t\t\t\tthirdPropertyURLList.append(propertyURL)\r\n\t\t\t\t\t\t\t\tpropertyName = jsonItem[\"propertyLabel\"][\"value\"]\r\n\t\t\t\t\t\t\t\tthirdPropertyLabelList.append(propertyName)\r\n\r\n\t\t\t\t\t\t\tRelFinder.thirdPropertyLabelURLDict = dict(zip(thirdPropertyLabelList, thirdPropertyURLList))\r\n\r\n\t\t\t\t\t\t\tin_third_property.filter.list = thirdPropertyLabelList\r\n\r\n\t\t\t\t\t\t\t# get the fourth property URL list and label list\r\n\t\t\t\t\t\t\tif in_fourth_property_dir.value:\r\n\t\t\t\t\t\t\t\tfristDirection = in_first_property_dir.valueAsText\r\n\t\t\t\t\t\t\t\tfirstProperty = in_first_property.valueAsText\r\n\r\n\t\t\t\t\t\t\t\tsecondDirection = in_second_property_dir.valueAsText\r\n\t\t\t\t\t\t\t\tsecondProperty = in_second_property.valueAsText\r\n\r\n\t\t\t\t\t\t\t\tthirdDirection = in_third_property_dir.valueAsText\r\n\t\t\t\t\t\t\t\tthirdProperty = in_third_property.valueAsText\r\n\r\n\t\t\t\t\t\t\t\tif firstProperty == None:\r\n\t\t\t\t\t\t\t\t\tfirstProperty = \"\"\r\n\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tfirstProperty = RelFinder.firstPropertyLabelURLDict[firstProperty]\r\n\t\t\t\t\t\t\t\tif secondProperty == None:\r\n\t\t\t\t\t\t\t\t\tsecondProperty = \"\"\r\n\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tsecondProperty = RelFinder.secondPropertyLabelURLDict[secondProperty]\r\n\t\t\t\t\t\t\t\tif thirdProperty == None:\r\n\t\t\t\t\t\t\t\t\tthirdProperty = \"\"\r\n\t\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t\tthirdProperty = RelFinder.thirdPropertyLabelURLDict[thirdProperty]\r\n\r\n\t\t\t\t\t\t\t\tfourthDirection = in_fourth_property_dir.valueAsText\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t\t# get the fourth property URL list\r\n\t\t\t\t\t\t\t\tfourthPropertyURLListJsonBindingObject = SPARQLQuery.relFinderCommonPropertyQuery(inplaceIRIList, relationDegree, [fristDirection, secondDirection, thirdDirection, fourthDirection], [firstProperty, secondProperty, thirdProperty])\r\n\t\t\t\t\t\t\t\tfourthPropertyURLList = []\r\n\t\t\t\t\t\t\t\tfor jsonItem in fourthPropertyURLListJsonBindingObject:\r\n\t\t\t\t\t\t\t\t\tfourthPropertyURLList.append(jsonItem[\"p4\"][\"value\"])\r\n\r\n\t\t\t\t\t\t\t\tfourthPropertyLabelJSON = SPARQLQuery.locationCommonPropertyLabelQuery(fourthPropertyURLList)\r\n\t\t\t\t\t\t\t\t# fourthPropertyLabelJSON = fourthPropertyLabelJSONObj[\"results\"][\"bindings\"]\r\n\r\n\t\t\t\t\t\t\t\t# get the fourth property label list\r\n\t\t\t\t\t\t\t\tfourthPropertyURLList = []\r\n\t\t\t\t\t\t\t\tfourthPropertyLabelList = []\r\n\t\t\t\t\t\t\t\tfor jsonItem in fourthPropertyLabelJSON:\r\n\t\t\t\t\t\t\t\t\tpropertyURL = jsonItem[\"p\"][\"value\"]\r\n\t\t\t\t\t\t\t\t\tfourthPropertyURLList.append(propertyURL)\r\n\t\t\t\t\t\t\t\t\tpropertyName = jsonItem[\"propertyLabel\"][\"value\"]\r\n\t\t\t\t\t\t\t\t\tfourthPropertyLabelList.append(propertyName)\r\n\r\n\t\t\t\t\t\t\t\tRelFinder.fourthPropertyLabelURLDict = dict(zip(fourthPropertyLabelList, fourthPropertyURLList))\r\n\r\n\t\t\t\t\t\t\t\tin_fourth_property.filter.list = fourthPropertyLabelList\r\n\r\n\r\n\t\treturn", "def update_parameters(parameters, grads, learning_rate=0.01):\n # Retrieve each parameter from the dictionary \"parameters\"\n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = parameters[\"W1\"]\n b1 = parameters[\"b1\"]\n W2 = parameters[\"W2\"]\n b2 = parameters[\"b2\"]\n W3 = parameters[\"W3\"]\n b3 = parameters[\"b3\"]\n ### END CODE HERE ###\n\n # Retrieve each gradient from the dictionary \"grads\"\n ### START CODE HERE ### (≈ 4 lines of code)\n dW1 = grads[\"dW1\"]\n db1 = grads[\"db1\"]\n dW2 = grads[\"dW2\"]\n db2 = grads[\"db2\"]\n dW3 = grads[\"dW3\"]\n db3 = grads[\"db3\"]\n ## END CODE HERE ###\n\n # Update rule for each parameter\n ### START CODE HERE ### (≈ 4 lines of code)\n W1 = W1 - (learning_rate * dW1)\n b1 = b1 - (learning_rate * db1)\n W2 = W2 - (learning_rate * dW2)\n b2 = b2 - (learning_rate * db2)\n W3 = W3 - (learning_rate * dW3)\n b3 = b3 - (learning_rate * db3)\n ### END CODE HERE ###\n\n parameters = {\"W1\": W1,\n \"b1\": b1,\n \"W2\": W2,\n \"b2\": b2,\n \"W3\": W3,\n \"b3\": b3}\n\n return parameters", "def update_recognize_params(self,inps,trgs,preds):\n batch_size = np.shape(trgs)[0]\n self.delta_weight_v_to_h = self.learning_rate/batch_size * np.transpose(trgs) @ (inps - preds)\n self.delta_bias_h = self.learning_rate * np.mean(inps - preds)\n\n self.weight_v_to_h += self.delta_weight_v_to_h\n self.bias_h += self.delta_bias_h\n \n return", "def update():\n\n # Get last new x value as last x value + 1\n x_n0 = data_source.data['x'][-1]\n x_n1 = x_n0 + 0.1\n\n # Assign a new y value\n y_n1 = param_source.data['amp_sine'][0] * np.sin(x_n1) +\\\n param_source.data['amp_rand'][0] * np.random.rand(1)\n\n # Get old last average and use to calculate new average\n avg_n1 = _get_new_avg(data_source,\n y_n1,\n param_source.data['rollover'][0])\n\n # Make a dict of data to add on to the end of the source\n additional_data = dict(x=[x_n1], y=[y_n1], avg=[avg_n1])\n\n # Stream the new data with a rollover value of 10\n data_source.stream(additional_data,\n rollover=param_source.data['rollover'][0])\n\n # logger.debug(param_source.data['update_delay'][0])", "def update_parameters(self, learning_rate):\n for i in range(self.L - 1):\n self.W[i] -= learning_rate * self.dW[i]\n self.b[i] -= learning_rate * self.db[i]", "def _update_params(self):\n _load = not self.san_interface.runmode\n params={}\n if ('iosched' in self._updatedattr or _load) and self.iosched<>IoSchedType.default:\n params['iosched']=str(self.iosched)\n if ('readahead' in self._updatedattr or _load) and self.readahead :\n params['readahead']=self.readahead\n if params:\n for pt in self.paths():\n pt.provider.set_dev_params(pt,params)", "def _update_parameters(self, delta):\n if delta is not None:\n self.SGD.update_with_L1_regularization(self.variables, delta, self.L1)", "def rollout_policy_fn(board):\n # rollout randomly\n # action_probs = np.random.rand(len(board.availables))\n action_probs = np.random.rand(len(board.availables))\n return zip(board.availables, action_probs)", "def updateInstrumentParameterValue(tablews, paramdict):\n paramnames = paramdict.keys()\n for parname in paramnames: \n parvalue = paramdict[parname]\n\t# print \"%s = %f\" % (parname, parvalue)\n\tif parname.count(\"Chi2\") == 0:\n\t # Only update parameters nothing to do with chi2\n UpdatePeakParameterTableValue(InputWorkspace=tablews,\n\t\tColumn='Value',\n ParameterNames=[parname],\n\t\tNewFloatValue=parvalue)\n\n return", "def _update_params(self):\n raise NotImplementedException()", "def update_filter_params(self, fh):\n (self.data_timestamp, self.framerate,\n self.l, self.d, self.gamma,\n self.eps, self.alex, self.traceswitch) = (fh.attrs['data_timestamp'], fh.attrs['framerate'],\n fh.attrs['l'], fh.attrs['d'], fh.attrs['gamma'],\n fh.attrs['eps'], fh.attrs['alex'], fh.attrs['traceswitch'])", "def update(self):\n ## Initialize\n self.domain.update()\n self.var = self.domain.var.copy()\n self.out = []\n\n ## Construct var and out, respecting DAG properties\n for fun in self.functions:\n self.var = list(set(self.var).union(set(fun.var).difference(set(self.out))))\n\n self.out = list(set(self.out).union(set(fun.out)))\n\n try:\n self.var_rand = list(self.density.marginals.keys())\n except AttributeError:\n self.var_rand = []\n self.var_det = list(set(self.var).difference(self.var_rand))\n\n ## TODO parameters\n\n ## Convenience constants\n self.n_var = len(self.var)\n self.n_var_rand = len(self.var_rand)\n self.n_var_det = len(self.var_det)\n self.n_out = len(self.out)", "def update_throughput(self, throughput):\n\n # Update the throughput values.\n throughput_new = []\n for throughput_n in throughput: # Loop over orders.\n\n if callable(throughput_n):\n\n # Throughput was given as a callable function.\n throughput_new.append(throughput_n(self.wave_grid))\n\n elif throughput_n.shape == self.wave_grid.shape:\n\n # Throughput was given as an array.\n throughput_new.append(throughput_n)\n\n else:\n msg = 'Throughputs must be given as callable or arrays matching the extraction grid.'\n log.critical(msg)\n raise ValueError(msg)\n\n # Set the attribute to the new values.\n self.throughput = throughput_new\n\n return", "def update_by_config(self, policy_enabling_map):\n self.enabled_policies = []\n\n for policy_name, policy_config in policy_enabling_map.items():\n if not self._is_policy_exists(policy_name):\n self._warn_unexistent_policy(policy_name)\n continue\n\n if policy_config['enabled']:\n enabled_policy = self._get_policy(policy_name)\n self.enabled_policies.append(enabled_policy)", "def applyFuncOnValues(self, func):\r\n self._value = func(self._value)" ]
[ "0.73782724", "0.7067951", "0.66169596", "0.63102925", "0.60266966", "0.5975819", "0.595031", "0.5920189", "0.5888775", "0.58857965", "0.5820289", "0.5794157", "0.57780397", "0.5770066", "0.5761156", "0.57414526", "0.5721823", "0.5698898", "0.56790006", "0.56590545", "0.56160057", "0.56153065", "0.5565208", "0.5556312", "0.5517219", "0.54992104", "0.54901123", "0.54857236", "0.54672176", "0.5462786", "0.545099", "0.5439098", "0.54196477", "0.5411225", "0.53655124", "0.5348262", "0.53421843", "0.5337477", "0.5331581", "0.53118527", "0.5305816", "0.52816814", "0.527974", "0.52783436", "0.52560455", "0.52547145", "0.5254095", "0.5253944", "0.5249747", "0.5246404", "0.52357113", "0.522655", "0.52248305", "0.5221231", "0.52193296", "0.5208372", "0.5202038", "0.52006006", "0.5200223", "0.5198469", "0.51870227", "0.5185032", "0.5178441", "0.5176815", "0.517646", "0.5173889", "0.5173757", "0.517247", "0.5163767", "0.5162832", "0.51529115", "0.51507014", "0.51481485", "0.5147096", "0.5147096", "0.5143221", "0.5142306", "0.51294446", "0.5128907", "0.5096138", "0.5095267", "0.5094718", "0.5094446", "0.5093303", "0.5087175", "0.5081736", "0.50812024", "0.50739354", "0.5071112", "0.5069899", "0.5069716", "0.5064084", "0.50622696", "0.5056564", "0.50534225", "0.50533885", "0.5046644", "0.50452745", "0.50401545", "0.5036533" ]
0.5152639
71
Creates random environments of the given type.
def set_random_envs(self): self.randomizer.randomize(num_samples=self.num_teachers) params = self.randomizer.get_params(fmt="dict", dtype="numpy") for e in range(self.num_teachers): self.teacher_envs.append(deepcopy(self.env_real)) print({key: value[e] for key, value in params.items()}) self.teacher_envs[e].domain_param = {key: value[e] for key, value in params.items()}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_environment(seed, task_horizon):\n # Load the gym environment.\n environment = CartPoleEnv()\n environment = gym_wrappers.TimeLimit(environment, task_horizon)\n environment.seed(seed)\n environment = wrappers.GymWrapper(environment)\n environment = wrappers.SinglePrecisionWrapper(environment)\n return environment", "def _make_environment(\n self, problem_type, curriculum_sample, wrapper_type, backend_type=None):\n if backend_type is None:\n backend_type = FLAGS.backend\n return dm_construction.get_environment(\n problem_type,\n unity_environment=self._unity_envs[backend_type],\n wrapper_type=wrapper_type,\n curriculum_sample=curriculum_sample)", "def get_env_class(environment_type):\n if environment_type == \"vanilla\":\n return city.CityGridEnv\n elif environment_type == \"distraction\":\n return city.DistractionGridEnv\n elif environment_type == \"map\":\n return city.MapGridEnv\n elif environment_type == \"cooking\":\n return cooking.CookingGridEnv\n elif environment_type == \"miniworld_sign\":\n # Dependencies on OpenGL, so only load if absolutely necessary\n from envs.miniworld import sign\n return sign.MiniWorldSign\n else:\n raise ValueError(\n \"Unsupported environment type: {}\".format(environment_type))", "def make_env(env_id, rand, seed=0):\n def _init():\n env = gym.make(env_id)\n # env.unwrapped\n env.seed(seed + rand)\n return env\n set_global_seeds(seed)\n return _init", "def generate(self, site_type='random', arg='random'):\n size = entities.world['size']\n if site_type == 'random':\n if randint(1,3) == 1:\n site_type = 'adventure'\n else:\n site_type = 'resource'\n elif site_type in ref.material_type_dct.keys():\n self.resource = site_type\n site_type = 'resource'\n terrain_list = None\n if arg == 'random':\n terrain_list = [x for x in ref.terrain_dct.keys() if type(x) == int]\n elif arg in ref.terrain_type_list:\n terrain_list = [\n x for x in ref.terrain_dct.keys() if ref.terrain_dct[x]['terrain type'] == arg\n ]\n x = randint(0, size-1)\n y = randint(0, size-1)\n terrain_type = entities.world['grid'][y][x]\n site_locations = [s.location for s in entities.sites['object list']]\n while terrain_type not in terrain_list or [x,y] in site_locations:\n x = randint(0, size-1)\n y = randint(0, size-1)\n terrain_type = entities.world['grid'][y][x]\n\n self.location = [x,y]\n self.structure = Structure().generate(\n ref.terrain_dct[terrain_type]['terrain type'], site_type\n )\n if self.resource == None:\n if 'resource type' in ref.structure_type_dct[\n self.structure.structure_type\n ].keys():\n resource_type = ref.structure_type_dct[\n self.structure.structure_type]['resource type'\n ]\n resource_possibilities = []\n for possible_material in [\n x for x in ref.material_class_dct[resource_type][\n 'types'] if 'rarity' in ref.material_type_dct[x].keys()\n ]:\n for x in xrange(ref.rarity_dct[\n ref.material_type_dct[possible_material]['rarity']\n ]):\n resource_possibilities.append(possible_material)\n self.resource = choice(resource_possibilities)\n #resources measured in grams\n if self.resource != None:\n self.harvestable = randint(100000, 1500000)\n try:\n entities.town['object'].resources[\n ref.material_type_dct[self.resource]['class']][\n self.resource]['harvestable'] += self.harvestable\n except KeyError:\n pass\n #NOTE: These numbers suitable for metal, may not be for other materials\n #NOTE: Mine production should be ~1kg pure metal per day per miner.\n #NOTE: IRL mine has ~43500kg before producing much less.\n \n self.set_site_id()\n return self", "def _generate_raw_environments(self, num, seed):", "def make_env(rank, env_conf, seed=0):\n def _init():\n env = RedGymEnv(env_conf)\n env.seed(seed + rank)\n return env\n set_random_seed(seed)\n return _init", "def _create_environment(config):\n if isinstance(config.env, str):\n env = gym.make(config.env)\n else:\n env = config.env()\n if config.max_length:\n env = tools.wrappers.LimitDuration(env, config.max_length)\n env = tools.wrappers.RangeNormalize(env)\n env = tools.wrappers.ClipAction(env)\n env = tools.wrappers.ConvertTo32Bit(env)\n return env", "def init_environment(env_name):\n env = gym.make(env_name)\n discrete = False\n if type(env.action_space) is gym.spaces.Discrete:\n discrete = True\n else:\n env = NormalizedActions(env)\n return env, discrete", "def generate_environment(dataset, tmpdir):\n\n print(\">>> Test environment:\")\n print(\"dataset:\", dataset)\n print(\"tmpdir:\", tmpdir)\n\n generate_test_environment(tmpdir, dataset)\n\n return { 'dataset': dataset, 'tmpdir': tmpdir }", "def create_environment(game_name, seed, is_test=False):\n\n # Note, you may want to use a level cache to speed of compilation of\n # environment maps. See the documentation for the Python interface of DeepMind\n # Lab.\n config = {\n \"width\": FLAGS.width,\n \"height\": FLAGS.height,\n \"logLevel\": \"WARN\",\n \"n_input_frames\": FLAGS.history,\n \"show_display\": FLAGS.display,\n \"save_video\": FLAGS.save_video,\n }\n if is_test:\n config[\"allowHoldOutLevels\"] = \"true\"\n # Mixer seed for evalution, see\n # https://github.com/deepmind/lab/blob/master/docs/users/python_api.md\n config[\"mixerSeed\"] = 0x600D5EED\n\n p = py_process.PyProcess(\n environments.PyProcessAtari,\n game_name,\n config,\n FLAGS.num_action_repeats,\n seed,\n )\n return environments.FlowEnvironment(p.proxy)", "def make_env(rank, agents, seed=10000):\n\tdef _init():\n\t\tenv = TradingGameEnv.TradingGameEnv(player_count = NUM_PLAYERS, other_agent_list = agents,\n\t\t\tseq_per_day = SEQ_PER_DAY, cards_per_suit = CARDS_PER_SUIT, player_hand_count = HAND_COUNT,\n\t\t\trandom_seq = True, self_play = SELF_PLAY, policy_type = POLICY_TYPE, self_copy_freq = SELF_COPY_FREQ,\n\t\t\tobs_transaction_history_size=TRANSACTION_HISTORY_SIZE)\n\t\tenv.seed(seed + rank)\n\t\treturn env\n\tset_global_seeds(seed)\n\treturn _init", "def make_env(\n\t\tdomain_name,\n\t\ttask_name,\n\t\tseed=0,\n\t\tepisode_length=1000,\n\t\tframe_stack=3,\n\t\taction_repeat=4,\n\t\timage_size=100,\n\t\tmode='original'\n\t):\n\tassert mode in {'original', 'color_easy', 'color_hard', 'video_easy', 'video_hard'}, \\\n\t\tf'specified mode \"{mode}\" is not supported'\n\n\tenv = dmc2gym.make(\n\t\tdomain_name=domain_name,\n\t\ttask_name=task_name,\n\t\tseed=seed,\n\t\tvisualize_reward=False,\n\t\tfrom_pixels=True,\n\t\theight=image_size,\n\t\twidth=image_size,\n\t\tepisode_length=episode_length,\n\t\tframe_skip=action_repeat\n\t)\n\tenv = VideoWrapper(env, mode, seed)\n\tenv = FrameStack(env, frame_stack)\n\tenv = RandomizationWrapper(env, mode, seed)\n\n\treturn env", "def generate_random_value(self, type):\n generators = {\n str: lambda: self.generate_random_string(20, uppercase=True, punctuations=True),\n int: lambda: random.randrange(100000),\n float: lambda: random.random() * 100000.0,\n bool: lambda: bool(random.getrandbits(1)),\n list: lambda: self.generate_random_list_or_string(),\n dict: lambda: self.generate_random_dict_or_string()\n }\n generator = generators[type]\n return generator()", "def make_env(stack=True, scale_rew=True, frame_wrapper=WarpFrame, reward_type=None, env_idx=-1):\n print(env_idx)\n if env_idx == -1:\n idx = random.randint(0, 12)\n elif env_idx < 0 or env_idx > 12:\n print(\"index of environment {} out of range [0, 12]\".format(str(env_idx)))\n exit(1)\n else:\n idx = env_idx\n print(\"select level: \", idx)\n game, state = train_level[idx]\n # game, state = ['SonicTheHedgehog-Genesis', 'GreenHillZone.Act1']\n\n print(game, state)\n env = make(game=game, state=state)\n\n return wrap_env(env, stack, scale_rew, frame_wrapper, reward_type)", "def create_environment(cls, full_config):\n\n config = full_config['template']['devops_settings']\n environment = cls.create(config['env_name'])\n\n # create groups and drivers\n groups = config['groups']\n environment.add_groups(groups)\n\n # create address pools\n address_pools = config['address_pools']\n environment.add_address_pools(address_pools)\n\n # process group items\n for group_data in groups:\n group = environment.get_group(name=group_data['name'])\n\n # add l2_network_devices\n group.add_l2_network_devices(\n group_data.get('l2_network_devices', {}))\n\n # add network_pools\n group.add_network_pools(\n group_data.get('network_pools', {}))\n\n # Connect nodes to already created networks\n for group_data in groups:\n group = environment.get_group(name=group_data['name'])\n\n # add group volumes\n group.add_volumes(\n group_data.get('group_volumes', []))\n\n # add nodes\n group.add_nodes(\n group_data.get('nodes', []))\n\n return environment", "def buildWeatherFromType(type, environment, turns=-1, forever=True):\r\n weatherClass = WeatherFactory.weatherTypeDictionary[type]\r\n return weatherClass(overCallbackFunction=environment.clearWeather, turns=turns, forever=forever)", "def generate(self, terrain_type='random', site_type='random'):\n if terrain_type == 'random' and site_type == 'random':\n self.structure_type = choice(ref.structure_type_dct.keys())\n elif site_type == 'random':\n self.structure_type = choice(ref.structure_class_dct[terrain_type])\n elif terrain_type == 'random':\n self.structure_type = choice(ref.site_type_dct[site_type])\n else:\n self.structure_type = choice(\n [x for x in ref.site_type_dct[site_type] if ref.structure_type_dct[\n x]['class'] == terrain_type]\n )\n if self.get_attribute('site type') == 'resource':\n self.workers = 0\n else:\n self.workers = []\n self.monsters = []\n self.worker_capacity = self.get_attribute('worker capacity')\n self.time_until_harvest = self.get_attribute('time per harvest')\n return self", "def init():\n env = Environment(5, 5, 20, [10, 20, 10, 5])\n return env", "def get_environment_class_by_name(environment_type):\n for cls in util.iter_subclasses(Environment):\n if cls.tool_name == environment_type:\n return cls\n raise EnvironmentUnavailable(\n f\"Unknown environment type '{environment_type}'\")", "def _init_seeding(cls, seed_type=int(SeedType.NONE), seeds=None):\n seed_type = int(seed_type)\n \n if seed_type == (SeedType.NONE):\n assert seeds is None, \"Seed type set to NONE, therefore seed cannot be set.\"\n elif seed_type == (SeedType.CONSTANT):\n assert seeds is not None, \"Seed set to constant seed, so seed must be specified.\"\n cls._seed_generator = [int(x) for x in seeds.split(\",\") if x]\n elif seed_type == (SeedType.GENERATED):\n assert seeds is not None, \"Seed set to generated seed, so initial seed must be specified.\"\n cls._seed_generator = Random(int(seeds))\n elif seed_type == (SeedType.SPECIFIED):\n cls._seed_generator = ([[str(x) for x in s.split(\",\") if x] for s in seeds.split(\"-\") if s])\n else:\n raise TypeError(\"Seed type {} not supported\".format(seed_type))\n \n cls._seed_type = seed_type", "def easy(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None, **kwargs):\n\n physics = Physics.from_xml_string(*get_model_and_assets())\n task = Cloth(randomize_gains=False, random=random, **kwargs)\n environment_kwargs = environment_kwargs or {}\n return control.Environment(\n physics, task, time_limit=time_limit,n_frame_skip=1,special_task=True, **environment_kwargs)", "def generate_environment(timestep=3600,\n year_timer=2017,\n year_co2=2017,\n try_path=None,\n location=(51.529086, 6.944689),\n altitude=55,\n new_try=False):\n\n # Create environment\n timer = time.TimerExtended(timestep=timestep, year=year_timer)\n\n weather = weath.Weather(timer, useTRY=True, pathTRY=try_path,\n location=location, altitude=altitude,\n new_try=new_try)\n\n market = germanmarket.GermanMarket()\n co2em = co2.Emissions(year=year_co2)\n\n environment = env.EnvironmentExtended(timer=timer,\n weather=weather,\n prices=market,\n location=location,\n co2em=co2em)\n\n return environment", "def by_type(environments):\n types = {}\n for env in environments:\n et = env.environmentType\n options = types.setdefault(et.id, set())\n options.add(env.id)\n return types", "def get_environment():\n return GenericGymEnv(id=\"real-time-gym-v1\", gym_kwargs={\"config\": CONFIG_DICT})", "def make_halide_env(env_id, seed):\n logger.log('random_seed: %s'%seed)\n set_global_seeds(seed)\n env = gym.make(env_id)\n env = Monitor(env, logger.get_dir(), info_keywords=('best_exec', 'best_schedule'))\n env.seed(seed)\n return env", "def make_env(local_env, rank, seed=0):\n\n def _init():\n local_env.seed(seed + rank)\n return local_env\n\n set_random_seed(seed)\n return _init", "def make_env(local_env, rank, seed=0):\n\n def _init():\n local_env.seed(seed + rank)\n return local_env\n\n set_random_seed(seed)\n return _init", "def generate_random_toy() -> Toy:\n dimensions = round(uniform(5, 100), 2)\n rooms_number = randint(1, 5)\n return SantaWorkShop(dimensions, rooms_number, 5)", "def make_robotics_env(env_id, seed, rank=0):\n set_global_seeds(seed)\n env = gym.make(env_id)\n env = FlattenDictWrapper(env, ['observation', 'desired_goal'])\n env = Monitor(\n env, logger.get_dir() and os.path.join(logger.get_dir(), str(rank)),\n info_keywords=('is_success',))\n env.seed(seed)\n return env", "def random(ctx, type=\"str\", nchars=\"\", special_chars=string.punctuation):\n\n pool_lookup = {\n \"str\": string.ascii_letters + string.digits + \"-_\",\n \"int\": string.digits,\n \"loweralpha\": string.ascii_lowercase,\n \"upperalpha\": string.ascii_uppercase,\n \"loweralphanum\": string.ascii_lowercase + string.digits,\n \"upperalphanum\": string.ascii_uppercase + string.digits,\n \"special\": string.ascii_letters + string.digits + special_chars,\n }\n\n default_nchars_lookup = {\n \"str\": 43,\n \"int\": 16,\n }\n\n # get pool of given type\n pool = pool_lookup.get(type, None)\n if not pool:\n raise RefError(\n \"{}: unknown random type used. Choose one of {}\".format(type, [key for key in pool_lookup])\n )\n\n # get default value for nchars if nchars is not specified\n if not nchars:\n nchars = default_nchars_lookup.get(type, 8)\n else:\n # check input for nchars\n try:\n nchars = int(nchars)\n except ValueError:\n raise RefError(f\"Ref error: eval_func: {nchars} cannot be converted into integer.\")\n\n # check if any special characters are specified without using type special\n if type != \"special\" and special_chars != string.punctuation:\n raise RefError(\n \"Ref error: eval_func: {} has no option to use special characters. Use type special instead, i.e. ||random:special:{}\".format(\n type, special_chars\n )\n )\n\n # check if pool is valid, eliminates duplicates\n allowed_pool = string.ascii_letters + string.digits + string.punctuation\n pool = \"\".join(set(pool).intersection(allowed_pool))\n\n # generate string based on given pool\n generated_str = \"\".join(secrets.choice(pool) for i in range(nchars))\n\n # set ctx.data to generated string\n ctx.data = generated_str", "def environments_of(groups):\n types = {}\n for group in groups:\n for env in group.environments:\n et = env.environmentType\n envs = types.setdefault((et.id, et.name), set())\n envs.add((env.id, env.name))\n return types", "def generate_random_human_from_environment(environment,\n generate_appearance=False):\n appearance = None\n if generate_appearance:\n appearance = \\\n HumanAppearance.generate_rand_human_appearance(HumanAppearance)\n configs = HumanConfigs.generate_random_human_config(environment)\n return Human.generate_human(appearance, configs)", "def run_seed(self, mode):\n # Clear data from tables\n # clear_data()\n if mode == MODE_CLEAR:\n return\n # industry = create_industy()\n # structure_type, structure_type1 = create_structure_type()\n # structure, structure2 = create_structure(structure_type)\n # stock = create_stock(industry, structure)\n # create_price_list(stock)\n # create_news(stock, structure, structure2)\n # create_analysis(structure)\n # create_market_indices()\n create_section_group()", "def make_env(env_name):\n \n env = gym.make(env_name) \n return env", "def createEnvironment(self, _):\r\n if self._namespaces:\r\n raise InternalError('The environment can have only one namespace '\r\n 'at a time.')\r\n\r\n environment = Environment(self)\r\n return self._avatar.callRemote('setupNamespace', environment)", "def _create_local_env_from_spec(\n env_spec: study_pb2.EnvironmentSpec) -> environment.Environment:\n global HAS_PROCO\n env_type = env_spec.WhichOneof('type')\n\n if env_type == 'procgen':\n from rlds_creator.envs import procgen_env\n return procgen_env.ProcgenEnvironment(env_spec)\n elif env_type == 'atari':\n from rlds_creator.envs import atari_env\n return atari_env.AtariEnvironment(env_spec)\n elif env_type == 'dmlab':\n from rlds_creator.envs import dmlab_env\n return dmlab_env.DmLabEnvironment(env_spec)\n elif env_type == 'net_hack':\n from rlds_creator.envs import net_hack_env\n return net_hack_env.NetHackEnvironment(env_spec)\n elif env_type == 'robodesk':\n from rlds_creator.envs import robodesk_env\n return robodesk_env.RoboDeskEnvironment(env_spec)\n elif env_type == 'robosuite':\n from rlds_creator.envs import robosuite_env\n return robosuite_env.RobosuiteEnvironment(env_spec)\n else:\n raise ValueError('Unsupported environment spec.')", "def create_environment(loader):\n loaders = (loader, PackageLoader('pynuts', 'templates'))\n environment = Environment(\n loader=ChoiceLoader(loaders), extensions=[ShowOnMatch])\n environment.globals.update({'url_for': flask.url_for})\n environment.filters['data'] = filters.data\n environment.filters['base64'] = filters.base64\n return environment", "def make_env(num_steps, stack=True, scale_rew=True):\n env = retro.make(game='SonicTheHedgehog-Genesis', state='GreenHillZone.Act1')\n env = gym.wrappers.TimeLimit(env, max_episode_steps=num_steps)\n env = SonicDiscretizer(env)\n env = AllowBacktracking(env)\n if scale_rew:\n env = RewardScaler(env, REWARD_RATE)\n env = WarpFrame(env)\n if stack:\n env = FrameStack(env, NUM_STATES)\n return env", "def env_init(self, env_info={}):\n self.dealer_sticks = env_info['dealer_sticks']\n self.random = np.random.RandomState(env_info['seed'])\n self.current_state = None", "def build_randomization(\n cls, constants: CType, parameters: PType\n ) -> EnvRandomization:\n return EnvRandomization(\n parameter_randomizer=cls.build_parameter_randomizer(constants, parameters),\n observation_randomizer=EnvObservationRandomizer(\n cls.build_observation_randomizers(constants)\n ),\n action_randomizer=EnvActionRandomizer(\n cls.build_action_randomizers(constants)\n ),\n simulation_randomizer=EnvSimulationRandomizer(\n cls.build_simulation_randomizers(constants)\n ),\n )", "def generate_environment(self):\n try:\n if self._environment is None:\n self._environment = Environment.fromfilepath(self._environmentName,\n self._configuration.environment_file_path)\n except Exception:\n raise", "def setup_env(config_path=BENCHMARK_CONF_PATH, env_type=None):\n check_arg_not_blank(config_path, 'config_path')\n \n conf = BenchmarkConfig(config_path=config_path)\n _setup_fabric_env(conf)\n\n if not env_type or env_type == 'client':\n execute(_setup_clients, conf, hosts=_config_hosts_or_cli_hosts(conf.client_conf))\n\n if not env_type or env_type == 'server':\n execute(_setup_servers, conf, hosts=_config_hosts_or_cli_hosts(conf.server_conf))", "def generate_E_random(number_obeservations, number_environments):\n E = np.random.randint(0, number_environments, (number_obeservations,1))\n return E", "def make_environment(manager: 'Manager') -> None:\n global environment\n environment = FlexGetEnvironment(\n undefined=StrictUndefined,\n loader=ChoiceLoader(\n [\n PackageLoader('flexget'),\n FileSystemLoader(os.path.join(manager.config_base, 'templates')),\n ]\n ),\n extensions=['jinja2.ext.loopcontrols'],\n )\n environment.template_class = FlexGetTemplate\n for name, filt in list(globals().items()):\n if name.startswith('filter_'):\n environment.filters[name.split('_', 1)[1]] = filt\n for name, test in list(globals().items()):\n if name.startswith('is_'):\n environment.tests[name.split('_', 1)[1]] = test", "def create_data_generators(shuffle=True, novelty_type='normal', item_to_include='None',\n scale_level=1):\n\n total_noi_i = 10 # Number of processed images from one environemnt i\n noe = 1 # Numer of environments\n n_p = 32 # Patch size, patch --> n_p x n_p\n\n novelty = novelty_type\n datasets = []\n\n for i in range(noe):\n\n # Load only images of the environment which includes images of the stated novel item.\n if item_to_include is not None and novelty == 'novel_item':\n dataset_env_i = PolycraftDatasetWithSpecificItem(\n nov_type=novelty, noi=total_noi_i, env_idx=i, p_size=n_p, scale_factor=scale_level,\n item_name=item_to_include)\n datasets.append(dataset_env_i)\n # We only process the one environment with the item (maybe change this\n # if we have more than one environement per novel_item!?)\n break\n\n # No specific item given which should be included.\n else:\n dataset_env_i = PolycraftDatasetNoSpecificItem(\n nov_type=novelty, noi=total_noi_i, env_idx=i, p_size=n_p, scale_factor=scale_level)\n datasets.append(dataset_env_i)\n\n final_dataset = ConcatDataset(datasets)\n\n total_noi = len(final_dataset) # Total number of processed images from all datasets\n\n if(total_noi < 7):\n print('Number of samples too small for splitting dataset in training-/valid-/test set.')\n\n train_noi = int(0.7 * total_noi) # Number of images used for training (70 %)\n valid_noi = int(0.15 * total_noi) # Number of images used for validation (15 %)\n test_noi = total_noi - train_noi - valid_noi # Number of images used for testing (15 %)\n train_dataset, valid_dataset, test_dataset = torch.utils.data.random_split(\n final_dataset, [train_noi, valid_noi, test_noi])\n\n train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True)\n valid_loader = DataLoader(valid_dataset, batch_size=1, shuffle=True)\n test_loader = DataLoader(test_dataset, batch_size=1, shuffle=True)\n\n return train_loader, valid_loader, test_loader", "def generate_random_roles():\n\n role_names = set()\n roles_size = 0\n while roles_size != 3:\n role_name = ''\n for i in range(3):\n role_name += random.choice(['0', '1'])\n if role_name not in role_names:\n role_names.add(role_name)\n roles_size += 1\n\n for role_name in role_names:\n delete_access = ActionTypes.DELETE.value if role_name[0] == '1' else ''\n write_access = ActionTypes.WRITE.value if role_name[1] == '1' else ''\n read_access = ActionTypes.READ.value if role_name[2] == '1' else ''\n\n allowed_actions = [access for access in (delete_access, write_access, read_access) if access]\n Role(role_name, allowed_actions)", "def main():\n # parse arguments from the command line (argparse validates arguments)\n args = _get_args()\n # build the environment with the given ID\n env = gym.make(args.env)\n # play the environment with the given mode\n if args.mode == 'human':\n play_human(env)\n else:\n play_random(env, args.steps)", "def generate_hosts(quantity, host_type):\n assert type(quantity) == int\n assert host_type in ['source', 'host']\n for n in range(0, quantity, 1):\n if host_type == 'host':\n yield {'hostname': 'host-{}'.format(uuid.uuid4()),\n 'lastReceived': random.randint(1154394061 * 1000, 1505330380 * 1000)}\n elif host_type == 'source':\n yield {'sourcePath': \".\".join(map(str, (random.randint(0, 254) for _ in range(4)))),\n 'lastReceived': random.randint(1154394061 * 1000, 1505330380 * 1000)}", "def env_generator(\n env_name: str, max_episode_steps: int, env_wrappers: List[gym.Wrapper] = None\n) -> Callable:\n\n def _thunk(rank: int):\n env = gym.make(env_name)\n env.seed(777 + rank + 1)\n env, _ = set_env(env, max_episode_steps, env_wrappers)\n return env\n\n return _thunk", "def build_net(env, seeds, model_type, hidden_size, noise_std, action_type):\r\n torch.manual_seed(seeds[0])\r\n if action_type == gym.spaces.box.Box:\r\n \taction_space = env.action_space.shape[0] \r\n else:\r\n action_space = env.action_space.n\r\n if model_type == \"ff\":\r\n net = model.Model_FF(env.observation_space.shape[0], action_space, hidden_size, action_type)\r\n elif model_type == \"cnn\":\r\n net = model.Model_CNN1D(env.observation_space.shape[0], action_space, hidden_size)\r\n else:\r\n net = model.Model_LSTM(env.observation_space.shape[0], action_space, hidden_size, action_type)\r\n for seed in seeds[1:]:\r\n net = mutate(net, seed, noise_std, copy_net=False)\r\n\r\n return net", "def make_vec_env(env_id, env_type, num_env, seed, wrapper_kwargs=None, start_index=0, reward_scale=1.0, gamestate=None):\n if wrapper_kwargs is None: wrapper_kwargs = {}\n mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0\n seed = seed + 10000 * mpi_rank if seed is not None else None\n def make_thunk(rank):\n return lambda: make_env(\n env_id=env_id,\n env_type=env_type,\n subrank = rank,\n seed=seed,\n reward_scale=reward_scale,\n gamestate=gamestate,\n wrapper_kwargs=wrapper_kwargs\n )\n\n set_global_seeds(seed)\n if num_env > 1:\n return SubprocVecEnv([make_thunk(i + start_index) for i in range(num_env)])\n else:\n return DummyVecEnv([make_thunk(start_index)])", "def __init__(self, config_type='random'):\n switcher = {\n 'random': self._create_random_config,\n 'generic_class': self._create_generic_class_config,\n 'generic_class_diff': self._create_generic_class_diff_config,\n 'generic_class_orient': self._create_generic_class_orient_config,\n 'random_sequence_baseline': self._create_random_sequence_baseline_config,\n 'playground': self._create_playground_config,\n }\n self.create_config = switcher.get(config_type, None)\n\n logging.info('Setting config type to {}'.format(config_type))\n if self.create_config is None:\n logging.warning('Invalid config type {}. Setting config type to the default random config.'\n .format(config_type))\n self.create_config = self._create_random_config", "def create_random(self):\n number_of_layers = random.choice(self.parameter_choices['number_of_layers'])\n neurons_per_layer = []\n dropout_per_layer = []\n self.network['number_of_layers'] = number_of_layers\n\n for i in range(number_of_layers):\n neurons_per_layer.append(random.choice(self.parameter_choices['neurons_per_layer']))\n dropout_per_layer.append(random.choice(self.parameter_choices['dropout_per_layer']))\n\n self.network['neurons_per_layer'] = neurons_per_layer\n self.network['dropout_per_layer'] = dropout_per_layer\n self.network['optimizer'] = random.choice(self.parameter_choices['optimizer'])\n self.network['activation'] = random.choice(self.parameter_choices['activation'])", "def make_planet():\n dice = random.randint(0, 100)\n if dice < 60:\n status = 'nopressure'\n else:\n status = random.choice(list(constants.STATUS.keys()))\n\n return Planet(\n name=random.choice(constants.NAMES),\n x=random.randint(constants.XMIN, constants.MAXWIDTH - 1),\n y=random.randint(constants.YMIN, constants.MAXHEIGHT - 1),\n system_size=random.choice(list(constants.SYSTEMSIZE.keys())),\n tech_level=random.choice(list(constants.TECHLEVEL.keys())),\n regim=random.choice(list(constants.REGIM.keys())),\n special=random.choice(list(constants.SPECIALRESOURCES.keys())),\n status=status,\n price_slip={},\n shipyard=[])", "def create_test_env(\n env_id: str,\n n_envs: int = 1,\n stats_path: Optional[str] = None,\n seed: int = 0,\n log_dir: Optional[str] = None,\n should_render: bool = True,\n hyperparams: Optional[Dict[str, Any]] = None,\n env_kwargs: Optional[Dict[str, Any]] = None,\n) -> VecEnv:\n # Create the environment and wrap it if necessary\n env_wrapper = get_wrapper_class(hyperparams)\n\n hyperparams = {} if hyperparams is None else hyperparams\n\n if \"env_wrapper\" in hyperparams.keys():\n del hyperparams[\"env_wrapper\"]\n\n vec_env_kwargs = {}\n vec_env_cls = DummyVecEnv\n if n_envs > 1 or \"Bullet\" in env_id:\n # HACK: force SubprocVecEnv for Bullet env\n # as Pybullet envs does not follow gym.render() interface\n vec_env_cls = SubprocVecEnv\n # start_method = 'spawn' for thread safe\n\n env = make_vec_env(\n env_id,\n n_envs=n_envs,\n monitor_dir=log_dir,\n seed=seed,\n wrapper_class=env_wrapper,\n env_kwargs=env_kwargs,\n vec_env_cls=vec_env_cls,\n vec_env_kwargs=vec_env_kwargs,\n )\n\n # Load saved stats for normalizing input and rewards\n # And optionally stack frames\n if stats_path is not None:\n if hyperparams[\"normalize\"]:\n print(\"Loading running average\")\n print(f\"with params: {hyperparams['normalize_kwargs']}\")\n path_ = os.path.join(stats_path, \"vecnormalize.pkl\")\n if os.path.exists(path_):\n env = VecNormalize.load(path_, env)\n # Deactivate training and reward normalization\n env.training = False\n env.norm_reward = False\n else:\n raise ValueError(f\"VecNormalize stats {path_} not found\")\n\n n_stack = hyperparams.get(\"frame_stack\", 0)\n if n_stack > 0:\n print(f\"Stacking {n_stack} frames\")\n env = VecFrameStack(env, n_stack)\n return env", "def env_creator(env_config):\n return CartPoleBTEnv(\n goal_state=env_config['goal_state'],\n disturbances=env_config['disturbances'],\n initial_state=env_config['initial_state'],\n initial_state_variance=env_config['initial_state_variance']\n )", "def make_envs(env_gen: Callable, n_envs: int = 8) -> SubprocVecEnv:\n envs = [env_gen(i) for i in range(n_envs)]\n subproc_env = SubprocVecEnv(envs)\n return subproc_env", "async def test_create_engine_uses_robot_type(\n decoy: Decoy, robot_type: RobotType, deck_type: pe_types.DeckType\n) -> None:\n # TODO(mc, 2021-06-11): to make these test more effective and valuable, we\n # should pass in some sort of actual, valid HardwareAPI instead of a mock\n hardware_api = decoy.mock(cls=API)\n subject = EngineStore(\n hardware_api=hardware_api, robot_type=robot_type, deck_type=deck_type\n )\n\n await subject.create(run_id=\"run-id\", labware_offsets=[], protocol=None)\n\n assert subject.engine.state_view.config.robot_type == robot_type", "def generate_test_environment(tmpdir, dataset):\n\n # Overwrite settings with test settings\n generate_test_settings(tmpdir, dataset)\n\n # Generate the archive files\n for usage in ['train', 'test']:\n \n for dstype in ['images', 'labels']:\n \n dataset_type = usage + '.' + dstype\n \n mnist_dataset = 'datasets.mnist.' + dataset_type\n filepath = get_setting(mnist_dataset)\n\n test_dataset = dataset + '.' + dataset_type\n generate_test_dataset_archive(filepath, test_dataset)", "def set_single_environment(cls, products, values):\n env = values[\"name\"]\n for i in range(len(products)):\n products[i][\"environments\"] = [env]\n return products", "def __create_test_environment(self):\n os.chdir(self.wd)\n temp_dir = tempfile.gettempdir()\n self.test_root = os.path.join(temp_dir, \"test-grpc\")\n print(\"Creating testing environment in {}\".format(self.test_root))\n if os.path.exists(self.test_root):\n # delete any previous environment\n shutil.rmtree(self.test_root)\n # create root directory\n os.makedirs(self.test_root)\n def copy_app(name):\n app_root = os.path.join(self.test_root, name)\n os.makedirs(app_root)\n filename = \"grpc-{}\".format(name)\n src = os.path.join(self.args.bin, filename)\n dst = os.path.join(app_root, filename)\n shutil.copy(src, dst)\n return dst\n # copy client and server into the new test environment\n self.server_path = copy_app(\"server\")\n self.client_path = copy_app(\"client\")", "def _get_or_create_environment_objects(openshift_api=settings.OPENSHIFT_API):\n\n choices = {}\n\n for k in openshift_api.keys():\n obj, created = Environment.objects.get_or_create(environment=k)\n choices[k] = obj\n logger.info('{}, {}'.format(obj, created))\n\n return choices", "def make_env(self, env_id, seed, logger_dir=None, reward_scale=1.0, mpi_rank=0, subrank=0, info_keywords=()):\n scenario = scenarios.load('{}.py'.format(env_id)).Scenario()\n world = scenario.make_world()\n env_dict = {\n \"world\": world,\n 'reset_callback': scenario.reset_world,\n 'reward_callback': scenario.reward, \n 'observation_callback': scenario.observation,\n 'info_callback': None,\n 'done_callback': scenario.done, \n 'shared_viewer': True\n }\n env = gym.make('MultiAgent-v0', **env_dict)\n env.seed(seed + subrank if seed is not None else None)\n env = Monitor(env,\n logger_dir and os.path.join(logger_dir, str(mpi_rank) + '.' + str(subrank)),\n allow_early_resets=True,\n info_keywords=info_keywords)\n env = ClipActionsWrapper(env)\n if reward_scale != 1.0:\n from baselines.common.retro_wrappers import RewardScaler\n env = RewardScaler(env, reward_scale)\n return env", "def make_environment(\n evaluation: bool = False,\n task: str = 'MountainCarContinuous-v0') -> dm_env.Environment:\n del evaluation\n\n # Load the gym environment.\n environment = gym.make(task)\n\n # Make sure the environment obeys the dm_env.Environment interface.\n environment = wrappers.GymWrapper(environment)\n # Clip the action returned by the agent to the environment spec.\n environment = wrappers.CanonicalSpecWrapper(environment, clip=True)\n environment = wrappers.SinglePrecisionWrapper(environment)\n\n return environment", "def _create_spacecrafts(self):\n self.spacecraft = MPDSpacecraft.create_random(self.space, )\n self.space.spacecrafts.append(self.spacecraft)\n\n vehicles_type = utils.class_from_path(self.config[\"other_spacecrafts_type\"])\n for _ in range(self.config[\"spacecrafts_count\"]):\n self.road.spacecrafts.append(spacecrafts_type.create_random(self.space))", "def conda_create_environment(name, python='3'):\n conda = '{0}/bin/conda'.format(utils.home('apps', 'miniconda'))\n\n run('{conda} create --name {name} python={python} --yes'.format(\n name=name,\n conda=conda,\n python=python))", "def build_env(env_id, inst=0, **all_args):\n env_keys = ['dt', 'timing']\n if env_id == 'RDM-v0':\n env_keys.append('stimEv')\n env_args = {x: all_args[x] for x in env_keys}\n\n if all_args['combine']:\n env1 = gym.make(env_id, **env_args)\n env_args['timing'] = all_args['timing2']\n env2 = gym.make(all_args['env2'], **env_args)\n env = combine.combine(env1, env2, delay=all_args['delay'])\n else:\n env = gym.make(env_id, **env_args)\n if all_args['trial_hist']:\n env = trial_hist.TrialHistory(env, rep_prob=all_args['rep_prob'],\n block_dur=all_args['bl_dur'],\n blk_ch_prob=all_args['blk_ch_prob'])\n elif all_args['side_bias']:\n env = side_bias.SideBias(env, prob=all_args['rep_prob'],\n block_dur=all_args['bl_dur'])\n if all_args['catch_trials']:\n env = catch_trials.CatchTrials(env,\n catch_prob=all_args['catch_prob'],\n stim_th=all_args['stim_th'])\n if all_args['reaction_time']:\n env = reaction_time.ReactionTime(env)\n if all_args['pass_reward']:\n env = pass_reward.PassReward(env)\n if all_args['pass_action']:\n env = pass_action.PassAction(env)\n env = manage_data.manage_data(env, inst=inst, plt_tr=all_args['figs'],\n folder=all_args['save_path'])\n return env", "def testrandom(self):\n for i in range(100):\n WeaponAbility()", "def randomize(self):\n for network in self.networks.values():\n network.database = []\n self.env = Environment(self.networks)", "def make_mujoco_env(env_id, seed):\n rank = MPI.COMM_WORLD.Get_rank()\n set_global_seeds(seed + 10000 * rank)\n env = gym.make(env_id)\n env = Monitor(env, os.path.join(logger.get_dir(), str(rank)))\n env.seed(seed)\n return env", "def _create_random_ca(cls):\n random_ca = entity.CustomAttribute()\n random_ca.ca_type = random.choice(AttributesTypes.ALL_TYPES)\n random_ca.title = cls._generate_title(random_ca.ca_type)\n random_ca.definition_type = random.choice(objects.all_objects)\n return random_ca", "def draw_random_setup(types_available, team, game_dim):\n\n nr_pieces = len(types_available)-1\n types_available = [type_ for type_ in types_available if not type_ == 0]\n if game_dim == 5:\n row_offset = 2\n elif game_dim == 7:\n row_offset = 3\n else:\n row_offset = 4\n setup_agent = np.empty((row_offset, game_dim), dtype=object)\n if team == 0:\n flag_positions = [(game_dim-1, j) for j in range(game_dim)]\n flag_choice = np.random.choice(range(len(flag_positions)), 1)[0]\n flag_pos = game_dim-1 - flag_positions[flag_choice][0], game_dim-1 - flag_positions[flag_choice][1]\n setup_agent[flag_pos] = pieces.Piece(0, 0, flag_positions[flag_choice])\n\n types_draw = np.random.choice(types_available, nr_pieces, replace=False)\n positions_agent_0 = [(i, j) for i in range(game_dim-row_offset, game_dim) for j in range(game_dim)]\n positions_agent_0.remove(flag_positions[flag_choice])\n\n for idx in range(nr_pieces):\n pos = positions_agent_0[idx]\n setup_agent[(game_dim-1 - pos[0], game_dim-1 - pos[1])] = pieces.Piece(types_draw[idx], 0, pos)\n elif team == 1:\n flag_positions = [(0, j) for j in range(game_dim)]\n flag_choice = np.random.choice(range(len(flag_positions)), 1)[0]\n setup_agent[flag_positions[flag_choice]] = pieces.Piece(0, 1, flag_positions[flag_choice])\n\n types_draw = np.random.choice(types_available, nr_pieces, replace=False)\n positions_agent_1 = [(i, j) for i in range(row_offset) for j in range(game_dim)]\n positions_agent_1.remove(flag_positions[flag_choice])\n\n for idx in range(nr_pieces):\n pos = positions_agent_1[idx]\n setup_agent[pos] = pieces.Piece(types_draw[idx], 1, pos)\n return setup_agent", "def create_universe():\n # Universe is a large list, planets and others things are separated using\n # type() and isinstance(object, ClassInfo)\n univers = []\n captain = Captain()\n captain.ship = Ship()\n captain.account = BankAccount(captain.name, [])\n planetes = create_planetes()\n for planete in planetes:\n if planete.homeworld:\n captain.homeworld = planete\n captain.location = planete\n if planete.tech_level > 5:\n planete.shipyard = populate_shipyard()\n\n univers.append(captain)\n univers.extend(planetes)\n\n return univers", "def random_placement(area):\n\n area.create_houses(True)\n\n for house in area.houses:\n place_house(area, house)", "def __init__(self, env, random_seed=None):\n self.env = env \n self.RandomState = np.random.RandomState(random_seed)", "def initialize_random_number_generator(self,question_type):\n\t\tself.generator.seed(self.generate_index(self.magic, self.level, self.problem_id, question_type))", "def _active_environment(self, tuple_type):\n return", "def create_environment(args):\n env.username = args.user\n env.password = args.password\n env.service_url = args.service_url\n env.quiet = args.quiet\n env.verbose = args.verbose\n env.manifest = args.manifest\n env.debug = args.debug\n env.always_confirm = args.yes\n env.args = args\n env.api = ravello.RavelloClient(env.username, env.password, env.service_url)", "def network_factory(\n environment_spec: specs.EnvironmentSpec,\n agent_types: Dict[ma_types.AgentID, ma_types.GenericAgent],\n init_network_fn: Optional[ma_types.InitNetworkFn] = None\n) -> ma_types.MultiAgentNetworks:\n init_fn = init_network_fn or init_default_network\n networks = {}\n for agent_id, agent_type in agent_types.items():\n single_agent_spec = ma_utils.get_agent_spec(environment_spec, agent_id)\n networks[agent_id] = init_fn(agent_type, single_agent_spec)\n return networks", "def role_batch():\n return [\n RoleFactory(\n scopes=ScopeFactory.create_batch(randint(0, 3), type=choice(('odp', 'client'))),\n is_collection_role=n in (1, 2) or randint(0, 1),\n )\n for n in range(randint(3, 5))\n ]", "def _random_mode(self):\n modes = ((1, 2, 3, 4, 5),\n (1, 2, 4, 5),\n (2, 3, 4),\n (1, 3, 5),\n (1, 5),\n (2, 4),\n (3,),\n ())\n return random.choice(modes)", "def random_player(env_name, render=False):\n\n # Make\n env = gym.make(env_name)\n\n n_game = 0\n\n # For each game\n while True:\n\n # Reset\n env.reset()\n done = False\n if render:\n env.render()\n\n # Until the end\n while not done:\n\n # Random agent moves\n action = env.action_space.sample()\n\n # Environment moves\n observation, reward, done, info = env.step(action)\n\n # Result\n if render:\n env.render()\n yield observation\n\n n_game += 1\n\n # Leave\n env.close()", "def testrandom(self):\n for i in range(100):\n AmuletAbility()", "def jobtype_factory(jobtype_toolkit):\n\n jobtype_toolkits = {'aimless_shooting': jobtype.AimlessShooting(),\n 'committor_analysis': jobtype.CommittorAnalysis(),\n 'equilibrium_path_sampling': jobtype.EquilibriumPathSampling(),\n 'find_ts': jobtype.FindTS(),\n 'umbrella_sampling': jobtype.UmbrellaSampling()}\n\n if jobtype_toolkit not in jobtype_toolkits.keys():\n raise ValueError('unsupported JobType name: ' + jobtype_toolkit)\n\n return jobtype_toolkits[jobtype_toolkit]", "def seed_model(model):\n if model == 'all':\n seed_all()\n elif model == 'client':\n seed_client()\n elif model == 'comment':\n seed_comment()\n elif model == 'staff':\n seed_staff()\n elif model == 'request':\n seed_request()", "def construct_envs(\n config,\n env_class,\n workers_ignore_signals=False,\n):\n\n num_processes = config.NUM_ENVIRONMENTS\n configs = []\n env_classes = [env_class for _ in range(num_processes)]\n dataset = habitat.datasets.make_dataset(config.TASK_CONFIG.DATASET.TYPE)\n scenes = config.TASK_CONFIG.DATASET.CONTENT_SCENES\n if \"*\" in config.TASK_CONFIG.DATASET.CONTENT_SCENES:\n scenes = dataset.get_scenes_to_load(config.TASK_CONFIG.DATASET)\n\n if num_processes > 1:\n if len(scenes) == 0:\n raise RuntimeError(\n \"No scenes to load, multiple process logic relies on being able to split scenes uniquely between processes\"\n )\n\n if len(scenes) < num_processes:\n scenes = scenes * num_processes\n\n random.shuffle(scenes)\n\n scene_splits = [[] for _ in range(num_processes)]\n for idx, scene in enumerate(scenes):\n scene_splits[idx % len(scene_splits)].append(scene)\n\n assert sum(map(len, scene_splits)) == len(scenes)\n\n for i in range(num_processes):\n proc_config = config.clone()\n proc_config.defrost()\n\n task_config = proc_config.TASK_CONFIG\n task_config.SEED = task_config.SEED + i\n if len(scenes) > 0:\n task_config.DATASET.CONTENT_SCENES = scene_splits[i]\n\n task_config.SIMULATOR.HABITAT_SIM_V0.GPU_DEVICE_ID = (\n config.SIMULATOR_GPU_ID\n )\n\n task_config.SIMULATOR.AGENT_0.SENSORS = config.SENSORS\n\n proc_config.freeze()\n configs.append(proc_config)\n\n envs = habitat.ThreadedVectorEnv(\n make_env_fn=make_env_fn,\n env_fn_args=tuple(zip(configs, env_classes)),\n workers_ignore_signals=workers_ignore_signals,\n )\n return envs", "def create_aiida_project_environment(self):\n try:\n self.create_folder_structure()\n self.build_python_environment()\n self.install_packages_from_index()\n except Exception:\n self.exit_on_exception()\n raise\n self.create_spec_entry()", "def generate_random_individual():\n genotype = []\n ### Your code here\n return {'genotype': genotype, 'fitness': None }", "def base_env(*args, **kwargs):\n try:\n # regular gym\n env = gym.make(*args, **kwargs)\n except:\n try:\n # gym retro\n env = retro.make(*args, **kwargs)\n except:\n # gym-super-mario-bros\n env = gym_super_mario_bros.make(*args, **kwargs)\n env.recognized = None\n return env", "def get_rng(environ, experiment, swabid):\n r = Random()\n r.seed(experiment.seed_strategy(environ, experiment, swabid))\n return r", "def usertype_add(request, simulation):\n # Create new distributions with good defaults.\n alphaTI = Distribution(type='NONE', mean=10)\n alphaTP = Distribution(type='NONE', mean=15)\n beta = Distribution(type='NONE', mean=5)\n delta = Distribution(type='NONE', mean=10)\n departureMu = Distribution(type='NONE', mean=2)\n gamma = Distribution(type='NONE', mean=20)\n modeMu = Distribution(type='NONE', mean=5)\n penaltyTP = Distribution(type='NONE', mean=2)\n routeMu = Distribution(type='NONE', mean=10)\n # Default value for t star is average arrival at middle of period and\n # uniform distribution over half of the period.\n mid_time = (simulation.startTime + simulation.lastRecord) / 2\n length = simulation.lastRecord - simulation.startTime\n tstar = Distribution(type='UNIFORM', mean=mid_time, std=length / (4 * sqrt(3)))\n # Save the distributions to generate ids.\n alphaTI.save()\n alphaTP.save()\n beta.save()\n delta.save()\n departureMu.save()\n gamma.save()\n modeMu.save()\n penaltyTP.save()\n routeMu.save()\n tstar.save()\n # Create the new user type.\n usertype = UserType()\n usertype.alphaTI = alphaTI\n usertype.alphaTP = alphaTP\n usertype.beta = beta\n usertype.delta = delta\n usertype.departureMu = departureMu\n usertype.gamma = gamma\n usertype.modeMu = modeMu\n usertype.penaltyTP = penaltyTP\n usertype.routeMu = routeMu\n usertype.tstar = tstar\n # Set user_id to user_id of previous usertype + 1.\n usertypes = get_query('usertype', simulation)\n if usertypes.exists():\n usertype.user_id = usertypes.last().user_id + 1\n else:\n usertype.user_id = 1\n usertype.save()\n # Create a demand segment and a matrix for the user type.\n matrix = Matrices()\n matrix.save()\n demandsegment = DemandSegment()\n demandsegment.usertype = usertype\n demandsegment.matrix = matrix\n demandsegment.save()\n demandsegment.demand.add(simulation.scenario.demand)\n # Return the view to edit the new user type.\n return HttpResponseRedirect(\n reverse('metro:usertype_edit', args=(simulation.id, demandsegment.id,))\n )", "def generate_deck(suits=4, type_cards=13):\n cards = []\n for suite in range(suits):\n for type_card in range(1, type_cards+1):\n # Setting the key-value pair for every card\n if (type_card == 1):\n cards.append({'A':type_cards+1})\n elif (type_card == 11):\n cards.append({'J': type_card})\n elif (type_card == 12):\n cards.append({'Q': type_card})\n elif (type_card == 13):\n cards.append({'K': type_card})\n else:\n cards.append({type_card:type_card})\n # Randomize the set of cards in the deck\n random.shuffle(cards)\n return cards", "def setup(env, NUM_TRACKS, landtime, t_inter):\n # Create the airport\n airport = Airport(env, NUM_TRACKS, landtime)\n\n # Create 4 initial planes\n for i in range(1):\n env.process(plane(env, 'Aviao %d' % i, airport))\n\n # Create more planes while the simulation is running\n while True:\n yield env.timeout(random.randint(t_inter-2, t_inter+2))\n# yield env.timeout(random.expovariate(1.0 / t_inter))\n i += 1\n env.process(plane(env, 'Aviao %d' % i, airport))", "def generate(count):\n return unpack_random_animals(generate_animals_randomly(count))", "def get_env_type ( base_name ) :\n return base_name.split( '-', 1 )[ 0 ]", "def init_from_env(\n *,\n device_type: T.Optional[str] = None,\n dist_init_method_type: T.Literal[\"env\", \"tcp\", \"file\"] = \"env\",\n pg_backend: T.Optional[str] = None,\n pg_timeout: timedelta = default_pg_timeout,\n float32_matmul_precision: str = \"high\",\n) -> torch.device:\n device = torch.device(\"cpu\") if device_type == \"cpu\" else get_device_from_env()\n\n if device_type is not None and device.type != device_type:\n raise RuntimeError(\n f\"Device type is specified to {device_type} but got {device.type} from env\"\n )\n\n if _check_dist_env():\n if not torch.distributed.is_available():\n _log.warning(\n \"torch.distributed is not available. Skipping initializing the process group.\"\n )\n return device\n if torch.distributed.is_initialized():\n _log.warning(\n \"torch.distributed is already initialized. Skipping initializing the process group.\"\n )\n return device\n pg_backend = (\n pg_backend\n if pg_backend is not None\n else get_process_group_backend_from_device(device)\n )\n init_method: Optional[str] = None\n if dist_init_method_type == \"tcp\":\n init_method = get_tcp_init_method()\n elif dist_init_method_type == \"file\":\n init_method = get_file_init_method()\n torch.distributed.init_process_group(\n init_method=init_method, backend=pg_backend, timeout=pg_timeout\n )\n maybe_enable_tf32(float32_matmul_precision)\n return device", "def init_gym(env_name):\n env = gym.make(env_name)\n obs_dim = env.observation_space.shape[0]\n act_dim = env.action_space.shape[0]\n\n return env, obs_dim, act_dim", "def init_gym(env_name):\n env = gym.make(env_name)\n obs_dim = env.observation_space.shape[0]\n act_dim = env.action_space.shape[0]\n\n return env, obs_dim, act_dim", "def do_create_environment(csp: CloudProviderInterface, environment_id=None):\n\n environment = Environments.get(environment_id)\n\n with claim_for_update(environment) as environment:\n\n if environment.cloud_id is not None:\n app.logger.warning(\n \"Environment cloud ID %s already present.\", environment.cloud_id\n )\n return\n\n parent_id = environment.application.cloud_id\n tenant_id = environment.portfolio.csp_data[\"tenant_id\"]\n\n log_do_create_environment(environment.portfolio.id, parent_id, tenant_id)\n\n payload = EnvironmentCSPPayload(\n tenant_id=tenant_id, display_name=environment.name, parent_id=parent_id\n )\n env_result = csp.create_environment(payload)\n Environments.update(environment, new_data={\"cloud_id\": env_result.id})\n\n app.logger.info(\"Created environment %s\", env_result.name)\n async_result = create_subscription.delay(environment_id=environment.id)\n app.logger.info(\n \"Attempting to create subscription for environment %s [Task ID: %s])\",\n env_result.name,\n async_result.task_id,\n )" ]
[ "0.639185", "0.61963034", "0.6018439", "0.5951477", "0.58627504", "0.5845492", "0.57870317", "0.57843524", "0.5763606", "0.56360656", "0.56098664", "0.5605386", "0.55746096", "0.55565745", "0.55424", "0.5505138", "0.5489955", "0.5485594", "0.5475609", "0.54569376", "0.54431057", "0.5383052", "0.5377942", "0.537035", "0.533368", "0.5313675", "0.5278804", "0.5278804", "0.52732605", "0.52709585", "0.52593523", "0.52563876", "0.52329487", "0.52316195", "0.5218797", "0.52102125", "0.5194515", "0.51866895", "0.5166156", "0.515835", "0.51398325", "0.5135437", "0.5132706", "0.51324636", "0.5131158", "0.51213187", "0.5096718", "0.509515", "0.50815076", "0.507846", "0.50758123", "0.5074542", "0.50720716", "0.505994", "0.5058852", "0.5054973", "0.50504345", "0.50494015", "0.5032212", "0.50084466", "0.49937198", "0.49893355", "0.4988699", "0.49881414", "0.49587047", "0.49550214", "0.4952673", "0.4946545", "0.49359736", "0.49356723", "0.49349436", "0.49067375", "0.48907775", "0.48858875", "0.488026", "0.48751807", "0.48678333", "0.48659298", "0.48584455", "0.48572108", "0.4855854", "0.48518696", "0.48496425", "0.4840201", "0.48333257", "0.48306638", "0.4829679", "0.4815217", "0.48084435", "0.47800606", "0.4769437", "0.47504744", "0.47403175", "0.47369665", "0.47293139", "0.47227347", "0.47162747", "0.47113392", "0.47113392", "0.47046828" ]
0.5628647
10
Recursively load all teachers that can be found in the current experiment's directory.
def load_teachers(self): # Get the experiment's directory to load from ex_dir = ask_for_experiment(max_display=10, env_name=self.env_real.name, perma=False) self.load_teacher_experiment(ex_dir) if len(self.teacher_policies) < self.num_teachers: print( f"You have loaded {len(self.teacher_policies)} teachers - load at least {self.num_teachers - len(self.teacher_policies)} more!" ) self.load_teachers()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_teacher_experiment(self, exp: Experiment):\n _, _, extra = load_experiment(exp)\n self.unpack_teachers(extra)", "def preload_all_problems(self):\n for _, _, filenames in os.walk(self.problemDir):\n for filename in filenames:\n if filename[-3:] == \".py\" and filename != \"__init__.py\":\n self.load_problem_file(filename[0:-3])", "def loadallskills(self):\r\n for skill in os.listdir( os.path.join( es.getAddonPath( info.basename ), \"skills\" )):\r\n es.load(\"%s/skills/%s\" % (info.basename, skill))", "def load_training():\n for can in candidates:\n trainings[can] = []\n for subdir, dirs, files in os.walk(os.path.join(corpus_dir, can)):\n for doc in files:\n trainings[can].append(doc)", "def _load_dirs(self):\n rootdirs = self._docset.get_compounds(xml.Directory,\n lambda x: x.get_parent() is None)\n for dirdoc in rootdirs:\n self._load_dir(dirdoc, None)", "def load_test_users():\n return [load_test_angel(), load_test_troublemaker(), load_test_rebel()]", "def load_fixtures(self):\n for fixture_dir in settings.FIXTURE_DIRS:\n fixture_dir = os.path.join(fixture_dir, self.filesystem_name)\n for (root, dirs, files) in os.walk(fixture_dir):\n for file in files:\n full_file_path = os.path.join(root, *dirs, file)\n with open(full_file_path, 'rb') as f:\n self.save(os.path.relpath(full_file_path, fixture_dir), f)", "def _load_module_recursive(self, dir) :\t\n\t\tfor filepath in os.listdir(dir) :\n\t\t\tfullpath = os.path.join(dir, filepath)\n\n\t\t\tif os.path.isdir(fullpath) :\n\t\t\t\tself._load_module_recursive(fullpath)\n\n\t\t\telif os.path.splitext(filepath)[1] == '.py' :\n\t\t\t\tutils.load_module(fullpath, self.settings.ROOT_PATH)", "def __init__(self, *paths, **kwargs):\n trajectories = load_trajectories(*paths, **kwargs)\n super().__init__(trajectories, **kwargs)", "def load_data(self) -> None:\n self.paths: List[str] = []\n self.durations: List[float] = []\n self.transcriptions: List[str] = []\n\n def raise_(err):\n \"\"\"raises error if problem during os.walk\"\"\"\n raise err\n\n for subset in self.subsets:\n subset_path = os.path.join(self.root, self.base_dir, subset)\n for root, dirs, files in os.walk(subset_path, onerror=raise_):\n if not files:\n continue\n matches = fnmatch.filter(files, \"*.trans.txt\")\n assert len(matches) == 1, \"> 1 transcription file found\"\n self._parse_transcription_file(root, matches[0])\n\n self._sort_by_duration()", "def load_data_from_files(self):\n # separated method to allow mock easier\n logger.info(\"Loading data...\")\n parent = Path(__file__).parent\n path = parent / \"resources\" / \"scores.txt\"\n self.scorer.load_from_file(path)\n path = parent / \"resources\" / \"american-english-large.txt\"\n self.trie.load_from_file(path)\n path = parent / \"resources\" / \"reels.txt\"\n self.reels = Reel.get_from_file(path)\n logger.info(\"Data loaded!\")", "def discover_examples():\n root = './examples'\n for filename in os.listdir(root):\n if os.path.splitext(filename)[1] == '.py':\n yield os.path.join(root, filename)", "def loadTrie(self):\n for file in self._gram_files:\n trie_file = getTrieFile(os.path.basename(file), self._pickle_dir)\n with open(trie_file, 'rb') as fd:\n self._tries.append(pickle.load(fd))", "def prune_teachers(self):\n self.teacher_policies = self.teacher_policies[: self.num_teachers]\n self.teacher_envs = self.teacher_envs[: self.num_teachers]\n self.teacher_expl_strats = self.teacher_expl_strats[: self.num_teachers]\n self.teacher_critics = self.teacher_critics[: self.num_teachers]\n self.teacher_ex_dirs = self.teacher_ex_dirs[: self.num_teachers]", "def _load_trials(self) -> hyperopt.Trials:\n if os.path.isfile(self.trials_path):\n trials = pickle.load(open(self.trials_path, \"rb\"))\n else:\n trials = hyperopt.Trials()\n return trials", "def imdb_load():\n for root, dirs, filenames in os.walk(os.path.dirname(__file__) + \"/imdb\"):\n for file_name in filenames:\n if file_name.find(\".json\") > 0:\n Movie.imdb_load_file(os.path.dirname(__file__) + \"/imdb/\" + file_name)\n return Movie.__movies", "def get_test_examples(self, data_dir):\n raise NotImplementedError()", "def get_test_examples(self, data_dir):\n raise NotImplementedError()", "def load_all_files(self):\n\t\tself.get_rankings()\n\t\tself.get_partition()\n\t\tself.__load_factors()\n\t\tself.get_document_associations()\n\t\tself.get_term_associations()", "def _load_templates(cls):\n if cls._raw_templates is None:\n cls._raw_templates = fetch_rrlyrae_templates()", "def load_tests(loader, suite, pattern):\n tests = loader.discover(start_dir=here.parents[0], pattern=pattern)\n suite.addTests(tests)\n return suite", "def get_teachers(self):\n query = Teacher.all().order('teacher')\n return query.fetch()", "def iter_dir(tree, path):\n for f in os.listdir(path):\n if os.path.isfile(path + '/' + f + '/__init__.py'):\n tree[f] = None\n elif os.path.isdir(path + '/' + f):\n tree[f] = {}\n SnakeWM.iter_dir(tree[f], path + '/' + f)", "def get_test_examples(self, data_dir):\n \n raise NotImplementedError()", "def loadFiles():\n all_chapters = []\n for name in names:\n f = open(file_path + name, \"r\", encoding=\"utf-8\")\n html_file = f.read()\n f.close()\n chap_text = extractText(html_file)\n new_text = cleanText(chap_text)\n all_chapters.append(new_text)\n concatenated_chapters = \" \".join(all_chapters)\n return concatenated_chapters", "def loadDirectory(self, dirname):\r\n cachelist=os.listdir(dirname)\r\n testlist=fnmatch.filter(cachelist,'*.hdf5')\r\n \r\n for file_ in testlist:\r\n print(\"Using {0}\".format(file_))\r\n \r\n files = [h5py.File(os.path.join(dirname, fn),'r') for fn in testlist]\r\n return files", "def tubs_from_directory(tub_dir, verbose=False):\n tubs = []\n count = 0\n root_path = Path(tub_dir)\n for item in root_path.iterdir():\n if item.is_dir():\n try:\n t = Tub(str(item),read_only=True)\n count += len(t)\n except FileNotFoundError as ex:\n continue\n except ValueError as ex:\n # In case the catalog file is empty\n continue\n tubs.append(t)\n if verbose:\n print( f\"Loaded {count} records.\" )\n\n return tubs", "def preload_all(self):\n for tp in self.tps:\n for f in self.featurefiles + self.maskfiles:\n file = os.path.join(tp, f)\n print('preloading {}'.format(file))\n self.load(file, lazy=False)", "def update_treemakers():\n global treemakers\n treemakers = {}\n for module_filename in glob(os.path.join(hax.hax_dir + '/treemakers/*.py')):\n module_name = os.path.splitext(os.path.basename(module_filename))[0]\n if module_name.startswith('_'):\n continue\n\n # Import the module, after which we can do hax.treemakers.blah\n __import__('hax.treemakers.%s' % module_name, globals=globals())\n\n # Now get all the treemakers defined in the module\n for tm_name, tm in inspect.getmembers(getattr(hax.treemakers, module_name),\n lambda x: type(x) == type and issubclass(x, TreeMaker)):\n if tm_name == 'TreeMaker':\n # This one is the base class; we get it because we did from ... import TreeMaker at the top of the file\n continue\n if tm_name in treemakers:\n raise ValueError(\"Two treemakers named %s!\" % tm_name)\n treemakers[tm_name] = tm", "def find_user_templates(self):\n\n # a list to store file names in\n local_templates = []\n\n # loop through the directory content\n for name in os.listdir(self._template_directory):\n # check to see if it is a directory and not in the database\n if (os.path.isdir(os.path.join(self._template_directory, name)) and\n name not in self._templates):\n # add it to the list\n local_templates.append(name)\n\n return local_templates", "def LoadTeacherModels(lang):\n\n # load Trained teacher model parameters\n log_dir = 'data/logs'\n with open(log_dir + '/' + lang + '_model_params', 'rb') as fp:\n params = pickle.load(fp)\n\n model_args = params['args']\n\n if model_args.use_colab is None:\n OUTPUT_DIR = 'ckpts/' + model_args.lang\n if not os.path.isdir(OUTPUT_DIR): os.mkdir(OUTPUT_DIR)\n else:\n from google.colab import drive\n\n drive.mount('/content/gdrive')\n OUTPUT_DIR = '/content/gdrive/My Drive/ckpts/' + model_args.lang\n if not os.path.isdir(OUTPUT_DIR): os.mkdir(OUTPUT_DIR)\n\n if model_args.enc_type == 'gat' and model_args.dec_type == 'transformer':\n models = {}\n OUTPUT_DIR += '/' + model_args.enc_type + '_' + model_args.dec_type\n\n # Load the vocabs\n with open('vocabs/' + model_args.model + '/' +\n lang + '/' + model_args.opt + '_src_vocab', 'rb') as fp:\n src_vocab = pickle.load(fp)\n # loading the target vocab\n model_args.sentencepiece = 'False'\n if model_args.sentencepiece == 'True':\n sp = spm.SentencePieceProcessor()\n sp.load('vocabs/' + model_args.model + '/' +\n lang + '/' + 'train_tgt.model')\n tgt_vocab = sp\n else:\n tgt_vocab = src_vocab\n\n print('Loaded ' + lang + ' Parameters..')\n model = TransGAT(params['args'], params['src_vocab_size'], src_vocab,\n params['tgt_vocab_size'], tgt_vocab)\n # Load the latest checkpoints\n optimizer = tf.train.AdamOptimizer(beta1=0.9, beta2=0.98,\n epsilon=1e-9)\n\n ckpt = tf.train.Checkpoint(\n model=model,\n optimizer=optimizer\n )\n\n ckpt_manager = tf.train.CheckpointManager(ckpt, OUTPUT_DIR, max_to_keep=5)\n if ckpt_manager.latest_checkpoint:\n ckpt.restore(ckpt_manager.latest_checkpoint).expect_partial()\n\n print('Loaded ' + lang + ' Teacher model !')\n\n return model", "def get_test_examples(self, data_dir):\n raise NotImplementedError()", "def get_test_examples(self, data_dir):\n raise NotImplementedError()", "def get_test_examples(self, data_dir):\n raise NotImplementedError()", "def get_test_examples(self, data_dir):\n raise NotImplementedError()", "def get_test_examples(self, data_dir):\n raise NotImplementedError()", "def run_load(rootpath):\n global CSV_PATH\n CSV_PATH = rootpath+'/csv_files/'\n load_movies_details()\n load_movies_cast()\n load_movies_reviews()", "def load():\n root = Path(__file__).parent\n for path in root.iterdir():\n if path.is_dir() and not path.name.startswith(\"_\"):\n subject = (path / \"subject.txt\").read_text()\n txt = (path / \"body.txt\").read_text()\n html = path / \"body.html\"\n if html.exists():\n html = html.read_text()\n else:\n html = None\n attachment = None\n pymodule = path / \"__init__.py\"\n if pymodule.exists():\n pymodule = importlib.import_module(f\"egapro.emails.{path.name}\")\n attachment = pymodule.attachment\n globals()[path.name] = Email(subject, txt, html, attachment)", "def load_templates(self):\n TemplateHandler.templates = []\n for template in os.listdir(TemplateHandler.templates_path):\n template_config = self.load_template_conf(template)\n if template_config is None:\n continue\n TemplateHandler.templates.append(template_config)", "def unpack_teachers(self, extra: dict):\n self.teacher_policies.extend(extra[\"teacher_policies\"])\n self.teacher_envs.extend(extra[\"teacher_envs\"])\n self.teacher_expl_strats.extend(extra[\"teacher_expl_strats\"])\n self.teacher_critics.extend(extra[\"teacher_critics\"])\n self.teacher_ex_dirs.extend(extra[\"teacher_ex_dirs\"])", "def LoadAllSubElements(self, recursive=False):\n pass", "def load_templates(self):\n\n self.templates = []\n\n if os.path.exists(\"question_templates.txt\"):\n for line in open(\"question_templates.txt\", \"r\"):\n self.templates.append(line.replace(\"\\n\", \"\"))", "def load(datasets, treemakers='Basics', force_reload=False):\n if isinstance(datasets, str):\n datasets = [datasets]\n if isinstance(treemakers, (type, str)):\n treemakers = [treemakers]\n\n combined_dataframes = []\n\n for treemaker in treemakers:\n\n dataframes = []\n for dataset in datasets:\n minitree_path = get(dataset, treemaker, force_reload=force_reload)\n new_df = pd.DataFrame.from_records(root_numpy.root2array(minitree_path).view(np.recarray)) \n dataframes.append(new_df)\n\n # Concatenate mini-trees of this type for all datasets\n combined_dataframes.append(pd.concat(dataframes))\n\n # Concatenate mini-trees of all types\n if not len(combined_dataframes):\n raise RuntimeError(\"No data was extracted? What's going on??\")\n return pd.concat(combined_dataframes, axis=1)", "def load_tamper(self, dataset_dir, subset):\n # Add classes. We have one class.\n # Naming the dataset nucleus, and the class nucleus\n self.add_class(\"tampers\", 1, \"tampers\")\n\n # Which subset?\n # \"val\": use hard-coded list above\n # \"train\": use data from stage1_train minus the hard-coded list above\n # else: use the data from the specified sub-directory\n # assert subset in [\"train\", \"val\", \"stage1_train\", \"stage1_test\", \"stage2_test\"]\n # subset_dir = \"stage1_train\" if subset in [\"train\", \"val\"] else subset\n dataset_dir = os.path.join(dataset_dir, subset, 'images')\n if subset == \"val\" or subset == \"test\":\n image_ids = next(os.walk(dataset_dir))[2]\n else:\n # Get image ids from directory names\n image_ids = next(os.walk(dataset_dir))[2]\n \n\n # dircopy_move = '/data/twj/copy-move/data_zoo/dataset/images/train'\n # image_ids_copy_move = next(os.walk(os.path.join(dircopy_move, 'images')))[2]\n\n # dirnew_splicing = '/data/tamper'\n # image_ids_new_splicing = next(os.walk(os.path.join(dirnew_splicing, 'images')))[2]\n\n # dircopy_move = '/home/as/deeplab/wpmrcnn/ca2new/test'\n # image_ids_copy_move = next(os.walk(os.path.join(dircopy_move, 'images')))[2]\n\n \n # dircopy_move = '/data/gy/ca2att/train3'\n # image_ids_copy_move = next(os.walk(os.path.join(dircopy_move, 'images')))[2]\n\n # # # dirtxt_sp = '/data/gy/tamperpre/train'\n # # # image_ids_txt_sp = next(os.walk(os.path.join(dirtxt_sp, 'images')))[2]\n\n # dirnew_sp = '/data/gy/c2newsp/train'\n # image_ids_new_sp = next(os.walk(os.path.join(dirnew_sp, 'images')))[2]\n\n # Add images\n for image_id in image_ids:\n self.add_image(\n \"tampers\",\n image_id=image_id[:-4],\n path=os.path.join(dataset_dir, image_id))\n\n # for image_id in image_ids_copy_move:\n # self.add_image(\n # \"tampers\",\n # image_id=image_id[:-4],\n # path=os.path.join(dircopy_move, 'images', image_id))\n\n # for image_id in image_ids_new_splicing:\n # self.add_image(\n # \"tampers\",\n # image_id=image_id[:-4],\n # path=os.path.join(dirnew_splicing, 'images', image_id))\n\n # # for image_id in image_ids_txt_sp:\n # # self.add_image(\n # # \"tampers\",\n # # image_id=image_id[:-4],\n # # path=os.path.join(dirtxt_sp, 'images', image_id))\n\n # for image_id in image_ids_new_sp:\n # self.add_image(\n # \"tampers\",\n # image_id=image_id[:-4],\n # path=os.path.join(dirnew_sp, 'images', image_id))", "def _find_fixtures(self, start_dir):\r\n fixtures = []\r\n def _find(arg, dirname, names):\r\n if (dirname.endswith('fixtures')) and (dirname.find('unit_test')==-1):\r\n for name in names:\r\n if (name.endswith(FIXTUERS_EXT)) and (name.find('initial_data')==-1):\r\n fixtures.append(name.replace(FIXTUERS_EXT, ''))\r\n os.path.walk(start_dir, _find, None)\r\n \r\n return fixtures", "def _load(self, directory):\n pass", "def load_all_from_path(self, path):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\t#111: handle expanded paths\n\t\tpath = os.path.abspath(path)\n\t\t#http://stackoverflow.com/questions/301134/dynamic-module-import-in-python\n\t\tif os.path.abspath(path) == self.shutit_main_dir:\n\t\t\treturn\n\t\tif not os.path.exists(path):\n\t\t\treturn\n\t\tif os.path.exists(path + '/STOPBUILD') and not self.build['ignorestop']:\n\t\t\tself.log('Ignoring directory: ' + path + ' as it has a STOPBUILD file in it. Pass --ignorestop to shutit run to override.',level=logging.DEBUG)\n\t\t\treturn\n\t\tfor sub in glob.glob(os.path.join(path, '*')):\n\t\t\tsubpath = os.path.join(path, sub)\n\t\t\tif os.path.isfile(subpath):\n\t\t\t\tself.load_mod_from_file(subpath)\n\t\t\telif os.path.isdir(subpath):\n\t\t\t\tself.load_all_from_path(subpath)", "def load_experiment(self):\n load_dir = select_dir(os.getcwd())\n if load_dir is not None:\n if os.path.isfile(os.path.join(load_dir, 'conf', 'config')):\n self.load_main(load_dir)\n else:\n msg_window('missing conf/config file, not experiment directory')\n return\n\n if self.t is None:\n self.t = Tabs(self)\n self.vbox.addWidget(self.t)\n self.t.clear_configs()\n self.t.load_conf(load_dir)\n\n self.set_experiment(True)\n else:\n msg_window('please select valid conf directory')", "def load_tree_files(tree_dir):\r\n tree_file_names = os.listdir(tree_dir)\r\n # ignore invisible files like .DS_Store\r\n tree_file_names = [fname for fname in tree_file_names if not\r\n fname.startswith('.')]\r\n\r\n # try to warn user if using multiple types of trees {\r\n try:\r\n base_names = []\r\n for fname in tree_file_names:\r\n base_names.append(qiime.parse.parse_rarefaction_fname(fname)[0])\r\n except ValueError:\r\n pass\r\n else:\r\n if len(set(base_names)) > 1:\r\n warnstr = \"\"\"\r\nwarning: trees are named differently, please be sure you're not\r\ncomparing trees generated in different manners, unless you're quite sure\r\nthat's what you intend to do. types: \"\"\" + str(set(base_names)) + \"\"\"\r\ncontinuing anyway...\"\"\"\r\n warn(warnstr)\r\n # }\r\n trees = []\r\n for fname in tree_file_names:\r\n try:\r\n f = open(os.path.join(tree_dir, fname), 'U')\r\n tree = parse_newick(f, PhyloNode)\r\n tree.filepath = fname\r\n trees.append(tree)\r\n f.close()\r\n except IOError as err:\r\n sys.stderr.write('error loading tree ' + fname + '\\n')\r\n exit(1)\r\n if len(trees) == 0:\r\n raise RuntimeError('Error: no trees loaded' +\r\n ', check that tree directory has has valid trees')\r\n return trees", "def find_main_tasks(self):\n for role_path in self.role_paths:\n for root, dirnames, filenames in os.walk(role_path):\n # WHAT OTHER DIRECTORIES SHOULD I LOOK IN???\n # handlers/main.yml??\n for filename in fnmatch.filter(filenames, '*.yml'):\n # Absolute path to file\n fullpath = os.path.join(root, filename)\n m = re.match(\"^.*?/tasks/main.yml$\", fullpath)\n if m:\n self.main_tasks.append(fullpath)", "def load_specifications(specification_dir):\n assert E(specification_dir), \"Specification directory {} does not exist\".format(specification_dir)\n\n specification_jsons = glob.glob(J(specification_dir, '*.json'))\n\n logger.info(\"Loading experiment specificaitons...\")\n if not specification_jsons:\n logger.warning(\"Could not find any experiment specifications in {}\".format(specification_dir))\n\n specs = []\n for spec_path in specification_jsons:\n with open(spec_path, 'r') as f:\n specs.append(json.load(f))\n logger.info(\"Found {} experiment specifications\".format(len(specs)))\n\n return specs", "def test_examples():\n tests = [d for d in listdir(ex) if path.isdir(path.join(ex, d))]\n for d in tests:\n yield check_examples, d", "def get_all_best_trials(self, data_indir):\n directoryFiles = glob.glob(data_indir + '\\\\*\\\\')\n bestTrials = []\n for file in directoryFiles:\n l = self.get_best_trial(file)\n bestTrials.append(l)\n return bestTrials", "def getImmediateSubdirectories(dir):", "def load_tests(self):\n base_path = os.path.split(os.path.dirname(\n os.path.abspath(valet.__file__)))[0]\n test_dir = \"valet/tests/tempest\"\n full_test_dir = os.path.join(base_path, test_dir)\n return full_test_dir, base_path", "def file_loader(self):\n\n for folder in self.config[\"data_folders\"]:\n f = os.path.join(folder, self.data_file)\n yield jsonlist.load_file(f)", "def load_people(self, file_path):\n pass", "def _load_personas(self, names, is_custom=False):\n names = names or [path.stem for path in\n self.persona_dir[is_custom].iterdir()\n if path.is_dir()]\n for name in names:\n try:\n self.update_persona_dicts(self.process_name(name),\n is_custom=is_custom)\n except:\n warnings.warn(f'Could not load files for {name}.')", "def load_all_experiments(cls, apps=settings.INSTALLED_APPS):\n if not cls.__loaded:\n cls.__loaded = True\n for app_name in apps:\n application_path = os.path.dirname(sys.modules[app_name].__file__)\n application_experiment_file_path = (\n application_path +\n ExperimentLoader.APPLICATION_RELATIVE_EXPERIMENT_FILE)\n if os.access(application_experiment_file_path, os.F_OK):\n ExperimentLoader.load_experiments(application_experiment_file_path)", "def load_training_data(vocab, directory):\n top_level = os.listdir(directory)\n dataset = []\n for d in top_level:\n if d[-1] == '/':\n label = d[:-1]\n subdir = d\n else:\n label = d\n subdir = d+\"/\"\n files = os.listdir(directory+subdir)\n for f in files:\n bow = create_bow(vocab, directory+subdir+f)\n dataset.append({'label': label, 'bow': bow})\n return dataset", "def load_trajs(trajects, topology, PELE_order=True):\n trajectories = [x for x in glob.glob(trajects)]\n topologies = [x for x in glob.glob(topology)]\n if len(topologies) > 1:\n topology_extension = len(topologies[0].split(\".\")[-1])+1\n topologies = sorted(topologies, key=lambda x: int(x.split(\"_\")[-1][:-topology_extension]))\n if PELE_order:\n topologies = topologies[1:] + topologies[:1]\n extension_len = len(trajectories[0].split(\".\")[-1])+1\n trajectories = sorted(trajectories, key=lambda x: int(x.split(\"_\")[-1][:-extension_len]))\n for file_pair in zip(trajectories, itertools.cycle(topologies)):\n yield file_pair", "def load_directory(self, parser, path):\n call_back = parser.call_back\n parser.call_back = None\n if call_back is not None:\n call_back('Finding files...')\n call_back(0, 0)\n file_tuples = []\n for root, subdirs, files in os.walk(path, followlinks=True):\n for filename in files:\n if parser.stop_check is not None and parser.stop_check():\n return\n if not parser.match_extension(filename):\n continue\n file_tuples.append((root, filename))\n if len(file_tuples) == 0:\n raise (ParseError(\n 'No files in the specified directory matched the parser. '\n 'Please check to make sure you have the correct parser.'))\n if call_back is not None:\n call_back('Parsing types...')\n call_back(0, len(file_tuples))\n cur = 0\n speakers = set()\n types = defaultdict(set)\n type_headers = None\n token_headers = None\n subannotations = None\n could_not_parse = {}\n for i, t in enumerate(file_tuples):\n if parser.stop_check is not None and parser.stop_check():\n return\n if call_back is not None:\n call_back('Parsing types from file {} of {}...'.format(i + 1, len(file_tuples)))\n call_back(i)\n root, filename = t\n path = os.path.join(root, filename)\n try:\n information = parser.parse_information(path, self.corpus_name)\n if not information['type_headers']:\n raise ParseError('There was an issue using this parser to parse the file {}.'.format(path))\n speakers.update(information['speakers'])\n type_headers = information['type_headers']\n token_headers = information['token_headers']\n subannotations = information['subannotations']\n except ParseError as e:\n could_not_parse[path] = str(e)\n continue\n for k, v in information['types'].items():\n types[k].update(v)\n if could_not_parse:\n error_template = '{}: {}'\n errors = [error_template.format(k, v) for k,v in could_not_parse.items()]\n raise ParseError('There were issues parsing the following files with {} parser: {}'.format(\n parser.name, '\\n\\n'.join(errors)))\n if call_back is not None:\n call_back('Importing types...')\n self.initialize_import(speakers, token_headers, subannotations)\n self.add_types(types, type_headers)\n\n if call_back is not None:\n call_back('Parsing files...')\n call_back(0, len(file_tuples))\n cur = 0\n for i, t in enumerate(file_tuples):\n if parser.stop_check is not None and parser.stop_check():\n return\n root, filename = t\n name = os.path.splitext(filename)[0]\n if call_back is not None:\n call_back('Parsing file {} of {} ({})...'.format(i + 1, len(file_tuples), name))\n call_back(i)\n path = os.path.join(root, filename)\n try:\n data = parser.parse_discourse(path)\n except ParseError:\n continue\n self.add_discourse(data)\n self.finalize_import(speakers, token_headers, parser.hierarchy, call_back, parser.stop_check)\n parser.call_back = call_back", "def load_all(self, root_dir, file_list=None, pattern=None):\n # each file name corresponds to another date. Also tools (A, B) and others.\n\n # Select paths for training and evaluation\n if file_list is None:\n data_paths = glob.glob(os.path.join(root_dir, '*')) # list of all paths\n else:\n data_paths = [os.path.join(root_dir, p) for p in file_list]\n if len(data_paths) == 0:\n raise Exception('No files found using: {}'.format(os.path.join(root_dir, '*')))\n\n if pattern is None:\n # by default evaluate on\n selected_paths = data_paths\n else:\n selected_paths = list(filter(lambda x: re.search(pattern, x), data_paths))\n\n input_paths = [p for p in selected_paths if os.path.isfile(p) and p.endswith('.csv')]\n if len(input_paths) == 0:\n raise Exception(\"No .csv files found using pattern: '{}'\".format(pattern))\n\n if self.n_proc > 1:\n # Load in parallel\n _n_proc = min(self.n_proc, len(input_paths)) # no more than file_names needed here\n logger.info(\"Loading {} datasets files using {} parallel processes ...\".format(len(input_paths), _n_proc))\n with Pool(processes=_n_proc) as pool:\n all_df = pd.concat(pool.map(WeldData.load_single, input_paths))\n else: # read 1 file at a time\n all_df = pd.concat(WeldData.load_single(path) for path in input_paths)\n\n return all_df", "def get_train_examples(self, data_dir):\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\n raise NotImplementedError()", "def load_users(everyone):\n if user_list.loaded:\n return\n for user in iteritems(everyone):\n user_list.load(user[1])", "def loadFolder(self, path):\n for file_name in os.listdir(path):\n if (file_name.split(\".\")[-1] == \"txt\"):\n file_path = path + \"/\" + file_name\n self.loadFile(file_path)", "def load_data(self):\n for set_name in self.image_dir_path:\n if self.verbose:\n print('\\n> Loading data files for the set: ' + set_name)\n\n # image dir\n image_dir = os.path.join(self.data_path, self.image_dir_path[set_name])\n\n # annotation file path\n annot_filepath = os.path.join(self.data_path, self.annotation_path[set_name])\n\n if 'test' in set_name:\n yield load_data_test(set_name, image_dir, annot_filepath, self.verbose)\n else:\n yield self.load_data_trainval(set_name, image_dir, annot_filepath)", "def check_samples_structure(directory: Path) -> None:\n if not directory.is_dir():\n raise ValueError(f\"{directory} is not a directory\")\n\n for pyfile in directory.rglob('*.py'):\n if pyfile.is_file():\n try:\n scan_sample_directory(pyfile.parent)\n except Exception:\n traceback.print_exc()", "def get_train_examples(self, data_dir):\r\n raise NotImplementedError()", "def load_tests(loader, filter):\n suite = unittest.TestSuite()\n for r, d, f in os.walk(test_path):\n module_path = os.path.relpath(r, os.path.dirname(test_path)).replace(os.path.sep, \".\")\n for file in f:\n filename, ext = os.path.splitext(file)\n if ext == \".py\" and not file.startswith(\"_\"):\n if not filter or module_path + \".\" + filename in filter:\n print(\"[ADD ] \" + module_path + \".\" + filename)\n test = loader.loadTestsFromName(module_path + \".\" + filename)\n suite.addTest(test)\n else:\n print(\"[SKIP] \" + module_path + \".\" + filename)\n return suite", "def all_present_experiments(self):\n return _yield_subdir_names(self.exp_configs)", "def train_teacher (nb_teachers, teacher_id):\n # Load the dataset\n X_train, X_test, y_train, y_test = models.get_dataset()\n\n print(X_train.shape)\n print(y_train.shape)\n print(X_test.shape)\n print(y_test.shape)\n \n # Retrieve subset of data for this teacher\n data, labels = partition.partition_dataset(X_train,\n y_train,\n nb_teachers,\n teacher_id)\n\n print(\"Length of training data: \" + str(len(labels)))\n\n # Define teacher checkpoint filename and full path\n\n filename = str(nb_teachers) + '_teachers_' + str(teacher_id) + '.hdf5'\n filename2 = str(nb_teachers) + '_teachers_' + str(teacher_id) + '.h5'\n \n # Perform teacher training need to modify \n \n\n # Create teacher model\n model, opt = models.create_two_layer_mlp(46) # num of cols\n model.compile(loss='binary_crossentropy',\n optimizer=\"Adam\",\n metrics=['accuracy'])\n model, hist = models.training(model, data, X_test, labels, y_test,filename)\n\n #modify\n model_json = model.to_json()\n with open(\"model.json\", \"w\") as json_file:\n json_file.write(model_json)\n# serialize weights to HDF5\n model.save_weights(filename2)\n print(\"Saved model to disk\")\n return True", "def get_all_model_dirs(experiment_dir):\r\n model_dirs = glob.glob(os.path.join(experiment_dir,\"*\"), recursive=False)\r\n return None if len(model_dirs) == 0 else model_dirs", "def load_all_playbooks_from_directory(self, path=None):\n if path is None:\n path = core.config.paths.workflows_path\n for playbook in locate_playbooks_in_directory(path):\n self.load_playbook_from_file(os.path.join(path, playbook))", "def scan_fixtures(path):\n results = list()\n for root, dirs, files in os.walk(path):\n relative_path = root.replace(path + \"/\", \"\")\n if relative_path.startswith(\"static\") or relative_path.startswith(\"theme\"):\n continue\n\n for f in files:\n if not f.endswith(\".json\"):\n continue\n\n app_name = os.path.basename(os.path.dirname(relative_path))\n\n results.append((app_name, f, relative_path))\n\n return results", "def generate_teachers(G, school_type, N_classes, family_member_counter, \n\t\t\t\t\t family_counter, teacher_p_adults, teacher_p_children):\n\tN_teachers = get_N_teachers(school_type, N_classes)\n\tteacher_nodes = ['t{:04d}'.format(i) for i in range(1, N_teachers + 1)]\n\tG.add_nodes_from(teacher_nodes)\n\t\n\tfor t in teacher_nodes:\n\t\tfamily_nodes = [t]\n\t\t# draw a random number of children and adults for the family\n\t\tages, N_adults = generate_teacher_family(teacher_p_adults, teacher_p_children)\n\t\t\n\t\tages = list(ages)\n\t\tfor adult in range(N_adults - 1):\n\t\t\tages.append(20.5) # default age for adults\n\t\t\n\t\t# add the family member nodes and their attributes to the graph\n\t\tfor age in ages:\n\t\t\tfamily_member_ID = 'f{:04d}'.format(family_member_counter)\n\t\t\tfamily_nodes.append(family_member_ID)\n\t\t\tG.add_node(family_member_ID)\n\t\t\tfamily_member_counter += 1\n\t\t\tnx.set_node_attributes(G, \\\n\t\t\t\t\t\t{family_member_ID:{'type':'family_member',\n\t\t\t\t\t\t\t\t\t\t 'age':age,\n\t\t\t\t\t\t\t\t\t\t 'family':family_counter,\n\t\t\t\t\t\t\t\t\t\t 'unit':'family'}})\n\t\t\t\t\t\n\t\t# finally, also set the teacher's node attributes\n\t\tnx.set_node_attributes(G, \\\n\t\t\t\t\t{t:{'type':'teacher', \n\t\t\t\t\t\t# Note: 20.5 is the age at which\n\t\t\t\t\t\t# the symptom and transmission risk\n\t\t\t\t\t\t# is that of an adult\n\t\t\t\t\t\t'age':20.5,\n\t\t\t\t\t\t'unit':'faculty_room',\n\t\t\t\t\t\t'family':family_counter}})\n\t\tfamily_counter += 1", "def load_generic_trfiles_fi(stimuli, subject, root=\"data/trfiles\"):\n trdict = dict()\n\n for stimulus in stimuli:\n try:\n fname = \"{0}_{1}.report\".format(stimulus, subject)\n trf = TRFile(os.path.join(root, fname))\n trdict[stimulus] = [trf]\n except Exception, e:\n print e\n\n return trdict", "def load(self):\n\n if not os.path.isdir(self.path):\n raise TreeError('Not a directory: {}'.format(self.path))\n\n self.log.debug('{} load tree'.format(self.path))\n start = int(time.mktime(time.localtime()))\n\n super(Tree, self).load()\n self.paths = {}\n self.empty_dirs = []\n self.relative_dirs = []\n\n for (root, dirs, files) in os.walk(self.path, topdown=True):\n if os.path.basename(root) in IGNORED_TREE_FOLDER_NAMES:\n continue\n\n if files:\n self.files.extend((root, filename) for filename in files if filename != '')\n for filename in files:\n self.paths[os.path.join(root, filename)] = True\n\n elif not dirs:\n self.empty_dirs.append(root)\n\n self.relative_dirs = set(self.relative_path(x[0]) for x in self.files)\n self.files.sort(key=self.__cmp_file_path__())\n\n stop = int(time.mktime(time.localtime()))\n self.log.debug('loaded {:d} files in {:d} seconds'.format(\n len(self.files),\n (stop-start)\n ))", "def ReadRecipesFromDirectory(self, path: str) -> None:\n for file_path in glob.glob(os.path.join(path, '*.json')):\n self.ReadRecipeFromFile(file_path)", "def _load_files(self):\n for filedoc in self._docset.get_files():\n path = filedoc.get_path()\n if not path:\n # In case of only partially loaded file information,\n # the path information is not set for unloaded files.\n continue\n if not os.path.isabs(path):\n path = os.path.join(self._source_root, path)\n extension = os.path.splitext(path)[1]\n # We don't care about Markdown files that only produce pages\n # (and fail the directory check below).\n if extension == '.md':\n continue\n dirdoc = filedoc.get_directory()\n if not dirdoc:\n self._reporter.xml_assert(filedoc.get_xml_path(),\n \"file is not in any directory in Doxygen\")\n continue\n relpath = self._get_rel_path(path)\n fileobj = self._files.get(relpath)\n if not fileobj:\n fileobj = File(path, relpath, self._docmap[dirdoc])\n self._files[relpath] = fileobj\n fileobj.set_doc_xml(filedoc, self)\n self._docmap[filedoc] = fileobj", "def get_train_examples(self, data_dir):\r\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\r\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\r\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\r\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\r\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\r\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\r\n raise NotImplementedError()", "def __init__(self, theme_dir):\n\n if not os.path.isdir(theme_dir): \n print(\"%s not a valid directory, please check!\" % theme_dir, file=sys.stderr)\n sys.exit(1)\n for dirname, dirnames, filenames in os.walk(theme_dir):\n for subdirname in dirnames:\n full_path = os.path.join(dirname, subdirname)\n self.theme_dir.append(full_path)\n print(\"read theme %s\" % full_path, file=sys.stdout)\n print(\"all themes loaded!\", file=sys.stdout)", "def load_files(self):\n # Needs to be implemented by child class\n raise NotImplementedError", "def get_each_loader(data_path, batch_size, trn_negnum, shuffle=True, num_workers=0):\n \n dataset = ML_Dataset(data_path, trn_negnum)\n \n if data_path.endswith('trn') == True:\n collate = dataset.train_collate\n else:\n collate = test_collate\n\n data_loader = data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate)\n\n return data_loader", "def _recurse_load_profile(self, text, profile_path):\n try:\n inherited_profile = Profile()\n cwd = os.path.dirname(os.path.abspath(profile_path)) if profile_path else None\n profile_parser = _ProfileParser(text)\n # Iterate the includes and call recursive to get the profile and variables\n # from parent profiles\n for include in profile_parser.includes:\n # Recursion !!\n profile = self._load_profile(include, cwd)\n inherited_profile.compose_profile(profile)\n\n # Current profile before update with parents (but parent variables already applied)\n inherited_profile = _ProfileValueParser.get_profile(profile_parser.profile_text,\n inherited_profile)\n return inherited_profile\n except ConanException:\n raise\n except Exception as exc:\n raise ConanException(\"Error parsing the profile text file: %s\" % str(exc))", "def load_all_templates(dataset, template_dir: str) -> Dict[str, NexusTemplate]:\n template_set = {\n template_name\n for template_name in os.listdir(template_dir)\n if not template_name.endswith(\".json\")\n }\n template_set.add(\"linear\")\n\n template_ord = []\n for template_name in TEMPLATE_PREFERRED_ORDER:\n try:\n template_set.remove(template_name)\n except KeyError:\n pass\n else:\n template_ord.append(template_name)\n template_ord.extend(sorted(template_set))\n\n return {\n template_name: load_template(dataset, template_dir, template_name)\n for template_name in template_ord\n }", "def load_bodies(directory):\n\n files = glob.glob(os.path.join(directory, \"*.json\"))\n bodies = {}\n for file in files:\n print(\"Loading body \" + file)\n with open(file) as data_file:\n internal_name = splitext(basename(file))[0]\n bodies[internal_name] = load_body(json.load(data_file))\n for key in bodies:\n body = bodies[key]\n if body.parent_internal_name is not None:\n body.parent = bodies[body.parent_internal_name]\n del body.parent_internal_name\n\n for body in bodies.values():\n print(\"Executing post_init for \" + body.name)\n body.post_init()\n\n return bodies.values()", "def load_data_from_dir(self,\n dir_list=[],\n exclude=[]):\n dir_list_ = dir_list[:]\n\n if len(dir_list) == 0:\n eprint(\"CANNOT load data generator with an empty list of directories: {}\".format(dir_list))\n return\n\n for directory in dir_list_:\n if not os.path.isdir(directory):\n eprint(\"\\t\\t {}: {} is not a directory\".format(self.load_data_from_dir.__name__, directory))\n return\n\n # Read Data from current directory\n while dir_list_:\n # Pop first directory name and create dataloader if its a valid folder\n current_dir = dir_list_.pop(0)\n valid_dir = True\n for name in exclude:\n if name in current_dir and valid_dir:\n valid_dir = False\n data_file = current_dir + \"/data.mat\"\n if os.path.isfile(data_file) and \"takktile_\" in current_dir and valid_dir:\n self.dataloaders.append(takktile_dataloader(data_dir=current_dir,\n config=self.config,\n augment=self.augment))\n\n # Find all child directories of current directory and recursively load them\n data_dirs = [os.path.join(current_dir, o) for o in os.listdir(current_dir)\n if os.path.isdir(os.path.join(current_dir, o))]\n for d in data_dirs:\n dir_list_.append(d)\n\n self.num_dl = len(self.dataloaders)\n\n if self.transform_type:\n self.__calculate_data_transforms()\n\n # Create Eval Data\n if self.create_eval_data:\n self.eval_len = (self.__len__())//10\n self.create_eval_data = False\n\n # Calculate class number and ratios\n # Also calculate class diffs\n if not self.config['label_type'] == 'value':\n self.__class_nums = self.dataloaders[0].get_data_class_numbers(self.__get_data_idx(0))\n for i, dl in enumerate(self.dataloaders[1:]):\n self.__class_nums += dl.get_data_class_numbers(self.__get_data_idx(i+1))\n self.__class_ratios = self.__class_nums / float(np.mean(self.__class_nums))\n self.__class_diff = np.max(self.__class_nums) - self.__class_nums\n self.__class_diff = [d if n > 0 else 0 for n,d in zip(self.__class_nums, self.__class_diff)]\n\n # Reset and prepare data\n self.on_epoch_end()", "def _load_dyne(self):\n if not self._hub._dscan:\n self._hub._scan_dynamic()\n for path in self._hub._dynamic.get(self._dyne_name, {}).get(\"paths\", []):\n self._dirs.append(path)", "def loadFiles(root=\"data/TAIWAN_RAW_DATA/ADHD\"):\n\tdata_rt = [] # realtime.csv\n\tdata_trial = [] # trialdata.csv\n\tdata_id = [] # caseid/subjectid\n\tRealTime = \"A2RealTime_\"\n\tTrialData = \"A2TrialData_\"\n\tfolder_list = os.listdir(root) # list of subfolders in the root\n\tfor folders in folder_list:\n\t\tfolders_path = os.path.join(root,folders)\n\t\tif folders.find(\"pass\") != -1:\n\t\t\tcontinue\n\t\t\t\n\t\ttry:\n\t\t\tdata_rt.append(pd.read_csv(os.path.join\n\t\t\t\t\t\t\t\t (folders_path,\n\t\t\t\t\t\t\t\t RealTime+folders[3:]+\".csv\")))\n\t\t\tdata_trial.append(pd.read_csv(os.path.join\n\t\t\t\t\t\t\t\t\t (folders_path,\n\t\t\t\t\t\t\t\t\t TrialData+folders[3:]+\".csv\")))\n\t\t\tdata_id.append(int(folders.split('_')[1]))\n\t\texcept:\n\t\t\tprint(os.path.join(folders_path,TrialData+folders[3:]+\".csv\"))\n\t\t\t\n\treturn data_rt,data_trial,data_id,folder_list", "def load_files(directory):\n import os\n import re\n\n files = dict()\n\n for file in os.scandir(directory):\n if re.search(\".txt$\", file.name):\n with open(file.path, \"r\", encoding=\"utf8\") as f:\n # re.sub(\".txt$\", \"\", file.name)\n files[file.name] = f.read()\n\n return files", "def load_test_subjects_names(self):\n files = os.listdir(os.path.join(self.db_path, self.test_batch))\n for f in files:\n if f.startswith('test-volume'):\n s_name = str.split(str.split(f, '.')[0], '-')[-1]\n self.testing_subjects.append(s_name)\n self.n_test = len(self.testing_subjects)", "def setUp(self):\n cltk_data_dir = '~/cltk_data/sanskrit/model/sanskrit_models_cltk'\n INDIC_RESOURCES_PATH = os.path.expanduser(cltk_data_dir)\n\n resources_present = os.path.isdir(INDIC_RESOURCES_PATH)\n if not resources_present:\n corpus_importer = CorpusImporter('sanskrit')\n corpus_importer.import_corpus('sanskrit_models_cltk')" ]
[ "0.66310847", "0.59174895", "0.5706464", "0.56006503", "0.5449008", "0.5424291", "0.54034406", "0.53829217", "0.5337041", "0.53273314", "0.53095233", "0.5307464", "0.52964854", "0.5285679", "0.52551943", "0.5226202", "0.5178625", "0.5178625", "0.51691693", "0.5146963", "0.51436335", "0.513781", "0.5132956", "0.51197994", "0.50782996", "0.50781155", "0.506339", "0.50611657", "0.5055892", "0.5051056", "0.5050649", "0.50433254", "0.50433254", "0.50433254", "0.50433254", "0.50433254", "0.50409627", "0.5033975", "0.50261146", "0.5023396", "0.5007168", "0.5001379", "0.49969667", "0.49958917", "0.49942803", "0.49821046", "0.49758598", "0.49732223", "0.494691", "0.49390018", "0.49357983", "0.49355197", "0.49330914", "0.49328667", "0.4930095", "0.4906435", "0.49035755", "0.4901464", "0.48974264", "0.48872483", "0.4879129", "0.4870847", "0.48698303", "0.48635843", "0.48635843", "0.48608184", "0.48558706", "0.4855671", "0.4855337", "0.4854289", "0.48472106", "0.48457322", "0.48439473", "0.48437947", "0.48397505", "0.4828225", "0.48127553", "0.48119432", "0.4803304", "0.47946241", "0.4792878", "0.47553888", "0.47553888", "0.47553888", "0.47553888", "0.47553888", "0.47553888", "0.47553888", "0.47531518", "0.47527847", "0.47478", "0.47461495", "0.47439265", "0.47400233", "0.47327346", "0.4725157", "0.47183686", "0.47133836", "0.47104988", "0.47043326" ]
0.7492254
0
Load teachers from PDDRTeachers experiment.
def load_teacher_experiment(self, exp: Experiment): _, _, extra = load_experiment(exp) self.unpack_teachers(extra)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_teachers(self):\n # Get the experiment's directory to load from\n ex_dir = ask_for_experiment(max_display=10, env_name=self.env_real.name, perma=False)\n self.load_teacher_experiment(ex_dir)\n if len(self.teacher_policies) < self.num_teachers:\n print(\n f\"You have loaded {len(self.teacher_policies)} teachers - load at least {self.num_teachers - len(self.teacher_policies)} more!\"\n )\n self.load_teachers()", "def load_tamper(self, dataset_dir, subset):\n # Add classes. We have one class.\n # Naming the dataset nucleus, and the class nucleus\n self.add_class(\"tampers\", 1, \"tampers\")\n\n # Which subset?\n # \"val\": use hard-coded list above\n # \"train\": use data from stage1_train minus the hard-coded list above\n # else: use the data from the specified sub-directory\n # assert subset in [\"train\", \"val\", \"stage1_train\", \"stage1_test\", \"stage2_test\"]\n # subset_dir = \"stage1_train\" if subset in [\"train\", \"val\"] else subset\n dataset_dir = os.path.join(dataset_dir, subset, 'images')\n if subset == \"val\" or subset == \"test\":\n image_ids = next(os.walk(dataset_dir))[2]\n else:\n # Get image ids from directory names\n image_ids = next(os.walk(dataset_dir))[2]\n \n\n # dircopy_move = '/data/twj/copy-move/data_zoo/dataset/images/train'\n # image_ids_copy_move = next(os.walk(os.path.join(dircopy_move, 'images')))[2]\n\n # dirnew_splicing = '/data/tamper'\n # image_ids_new_splicing = next(os.walk(os.path.join(dirnew_splicing, 'images')))[2]\n\n # dircopy_move = '/home/as/deeplab/wpmrcnn/ca2new/test'\n # image_ids_copy_move = next(os.walk(os.path.join(dircopy_move, 'images')))[2]\n\n \n # dircopy_move = '/data/gy/ca2att/train3'\n # image_ids_copy_move = next(os.walk(os.path.join(dircopy_move, 'images')))[2]\n\n # # # dirtxt_sp = '/data/gy/tamperpre/train'\n # # # image_ids_txt_sp = next(os.walk(os.path.join(dirtxt_sp, 'images')))[2]\n\n # dirnew_sp = '/data/gy/c2newsp/train'\n # image_ids_new_sp = next(os.walk(os.path.join(dirnew_sp, 'images')))[2]\n\n # Add images\n for image_id in image_ids:\n self.add_image(\n \"tampers\",\n image_id=image_id[:-4],\n path=os.path.join(dataset_dir, image_id))\n\n # for image_id in image_ids_copy_move:\n # self.add_image(\n # \"tampers\",\n # image_id=image_id[:-4],\n # path=os.path.join(dircopy_move, 'images', image_id))\n\n # for image_id in image_ids_new_splicing:\n # self.add_image(\n # \"tampers\",\n # image_id=image_id[:-4],\n # path=os.path.join(dirnew_splicing, 'images', image_id))\n\n # # for image_id in image_ids_txt_sp:\n # # self.add_image(\n # # \"tampers\",\n # # image_id=image_id[:-4],\n # # path=os.path.join(dirtxt_sp, 'images', image_id))\n\n # for image_id in image_ids_new_sp:\n # self.add_image(\n # \"tampers\",\n # image_id=image_id[:-4],\n # path=os.path.join(dirnew_sp, 'images', image_id))", "def test_pytd_teacher(self):\n defaults = parser_defaults.copy()\n defaults['datatype'] = 'train:stream'\n defaults['image_mode'] = 'ascii'\n\n with testing_utils.capture_output():\n # Get processed act from agent\n parser = display_setup_args()\n defaults['pytorch_teacher_dataset'] = 'flickr30k'\n del defaults['pytorch_teacher_task']\n parser.set_defaults(**defaults)\n opt = parser.parse_args()\n teacher = create_task_agent_from_taskname(opt)[0]\n pytorch_teacher_act = teacher.act()\n\n parser = display_setup_args()\n defaults['task'] = 'flickr30k'\n del defaults['pytorch_teacher_dataset']\n parser.set_defaults(**defaults)\n opt = parser.parse_args()\n teacher = create_task_agent_from_taskname(opt)[0]\n regular_teacher_act = teacher.act()\n\n keys = set(pytorch_teacher_act.keys()).intersection(\n set(regular_teacher_act.keys()))\n self.assertTrue(len(keys) != 0)\n for key in keys:\n self.assertTrue(pytorch_teacher_act[key] == regular_teacher_act[key],\n 'PytorchDataTeacher does not have the same value '\n 'as regular teacher for act key: {}'.format(key))", "def train_teacher (nb_teachers, teacher_id):\n # Load the dataset\n X_train, X_test, y_train, y_test = models.get_dataset()\n\n print(X_train.shape)\n print(y_train.shape)\n print(X_test.shape)\n print(y_test.shape)\n \n # Retrieve subset of data for this teacher\n data, labels = partition.partition_dataset(X_train,\n y_train,\n nb_teachers,\n teacher_id)\n\n print(\"Length of training data: \" + str(len(labels)))\n\n # Define teacher checkpoint filename and full path\n\n filename = str(nb_teachers) + '_teachers_' + str(teacher_id) + '.hdf5'\n filename2 = str(nb_teachers) + '_teachers_' + str(teacher_id) + '.h5'\n \n # Perform teacher training need to modify \n \n\n # Create teacher model\n model, opt = models.create_two_layer_mlp(46) # num of cols\n model.compile(loss='binary_crossentropy',\n optimizer=\"Adam\",\n metrics=['accuracy'])\n model, hist = models.training(model, data, X_test, labels, y_test,filename)\n\n #modify\n model_json = model.to_json()\n with open(\"model.json\", \"w\") as json_file:\n json_file.write(model_json)\n# serialize weights to HDF5\n model.save_weights(filename2)\n print(\"Saved model to disk\")\n return True", "def unpack_teachers(self, extra: dict):\n self.teacher_policies.extend(extra[\"teacher_policies\"])\n self.teacher_envs.extend(extra[\"teacher_envs\"])\n self.teacher_expl_strats.extend(extra[\"teacher_expl_strats\"])\n self.teacher_critics.extend(extra[\"teacher_critics\"])\n self.teacher_ex_dirs.extend(extra[\"teacher_ex_dirs\"])", "def get_test_loader(id_list = './data/sample_submission.csv', root_dir = './data/test/'):\n data = HumanProteinDataset(id_list, root_dir, transform = transforms.Compose([\n Rescale((256, 256)), \n ToTensor()\n ]))\n\n indices = np.arange(len(data))\n dataloader_test = DataLoader(data, batch_size=10, num_workers=5)\n\n return dataloader_test", "def prune_teachers(self):\n self.teacher_policies = self.teacher_policies[: self.num_teachers]\n self.teacher_envs = self.teacher_envs[: self.num_teachers]\n self.teacher_expl_strats = self.teacher_expl_strats[: self.num_teachers]\n self.teacher_critics = self.teacher_critics[: self.num_teachers]\n self.teacher_ex_dirs = self.teacher_ex_dirs[: self.num_teachers]", "def _load_trials(self) -> hyperopt.Trials:\n if os.path.isfile(self.trials_path):\n trials = pickle.load(open(self.trials_path, \"rb\"))\n else:\n trials = hyperopt.Trials()\n return trials", "def load_primers(tsv_filename):\n answer = []\n with open(tsv_filename) as handle:\n for line in handle:\n if line.startswith(\"#\"):\n continue\n parts = line.rstrip(\"\\n\").split(\"\\t\")\n if len(parts) == 2:\n left, right = parts\n name = f\"P{len(answer)}\"\n else:\n name, left, right = parts[:3]\n answer.append((name, left, right))\n return answer", "def get_teacher_assign():\n assignment_data = query_db(\n \"SELECT assignments.id, assignments.name, assignments.due_date \"\n \"FROM assignments JOIN topics ON assignments.topic_id=topics.id \"\n \"JOIN classes ON topics.class_id=classes.id WHERE teacher_id=?;\",\n [flask.session[\"id\"]],\n )\n assignments = []\n for assignment in assignment_data:\n assignment_dict_teach = {}\n assignment_dict_teach[\"id\"] = assignment[0]\n assignment_dict_teach[\"name\"] = assignment[1]\n assignment_dict_teach[\"due_date\"] = assignment[2]\n assignments.append(assignment_dict_teach)\n return assignments", "def load_people(self, file_path):\n pass", "def get_train(self, even=None):\n\n #self.images, self.labels, self.traces = trace_data.get_my_teacher()\n _, self.images, self.labels, self.traces, _ = trace_data.get_my_teacher()\n #print(self.labels)\n self.length = len(self.images)\n self.create_teacher()", "def LoadTeacherModels(lang):\n\n # load Trained teacher model parameters\n log_dir = 'data/logs'\n with open(log_dir + '/' + lang + '_model_params', 'rb') as fp:\n params = pickle.load(fp)\n\n model_args = params['args']\n\n if model_args.use_colab is None:\n OUTPUT_DIR = 'ckpts/' + model_args.lang\n if not os.path.isdir(OUTPUT_DIR): os.mkdir(OUTPUT_DIR)\n else:\n from google.colab import drive\n\n drive.mount('/content/gdrive')\n OUTPUT_DIR = '/content/gdrive/My Drive/ckpts/' + model_args.lang\n if not os.path.isdir(OUTPUT_DIR): os.mkdir(OUTPUT_DIR)\n\n if model_args.enc_type == 'gat' and model_args.dec_type == 'transformer':\n models = {}\n OUTPUT_DIR += '/' + model_args.enc_type + '_' + model_args.dec_type\n\n # Load the vocabs\n with open('vocabs/' + model_args.model + '/' +\n lang + '/' + model_args.opt + '_src_vocab', 'rb') as fp:\n src_vocab = pickle.load(fp)\n # loading the target vocab\n model_args.sentencepiece = 'False'\n if model_args.sentencepiece == 'True':\n sp = spm.SentencePieceProcessor()\n sp.load('vocabs/' + model_args.model + '/' +\n lang + '/' + 'train_tgt.model')\n tgt_vocab = sp\n else:\n tgt_vocab = src_vocab\n\n print('Loaded ' + lang + ' Parameters..')\n model = TransGAT(params['args'], params['src_vocab_size'], src_vocab,\n params['tgt_vocab_size'], tgt_vocab)\n # Load the latest checkpoints\n optimizer = tf.train.AdamOptimizer(beta1=0.9, beta2=0.98,\n epsilon=1e-9)\n\n ckpt = tf.train.Checkpoint(\n model=model,\n optimizer=optimizer\n )\n\n ckpt_manager = tf.train.CheckpointManager(ckpt, OUTPUT_DIR, max_to_keep=5)\n if ckpt_manager.latest_checkpoint:\n ckpt.restore(ckpt_manager.latest_checkpoint).expect_partial()\n\n print('Loaded ' + lang + ' Teacher model !')\n\n return model", "def get_teachers(self):\n query = Teacher.all().order('teacher')\n return query.fetch()", "def load_topics():\n\n print \"Importing topics...\"\n\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate retailers\n Topic.query.delete()\n\n # Read CSV file\n with open(\"seed_data/topics.csv\") as source_file:\n example_data = list(csv.reader(source_file))\n\n # skip header row for populating db\n for list_item in example_data[1:]:\n topic = Topic(topic_title=list_item[1])\n\n # Add the current retailer to the session\n db.session.add(topic)\n\n # Commit the db.session changes to the database\n db.session.commit()", "def ensemble_preds(dataset, nb_teachers, stdnt_data):\n\n # Compute shape of array that will hold probabilities produced by each\n # teacher, for each training point, and each output class\n result_shape = (nb_teachers, len(stdnt_data), FLAGS.nb_labels)\n\n # Create array that will hold result\n result = np.zeros(result_shape, dtype=np.float32)\n\n # Get predictions from each teacher\n for teacher_id in xrange(nb_teachers):\n # Compute path of checkpoint file for teacher model with ID teacher_id\n if FLAGS.deeper:\n ckpt_path = FLAGS.teachers_dir + '/' + str(dataset) + '_' + str(nb_teachers) + '_teachers_' + str(teacher_id) + '_deep.ckpt-' + str(FLAGS.teachers_max_steps - 1) #NOLINT(long-line)\n else:\n ckpt_path = FLAGS.teachers_dir + '/' + str(dataset) + '_' + str(nb_teachers) + '_teachers_' + str(teacher_id) + '.ckpt-' + str(FLAGS.teachers_max_steps - 1) # NOLINT(long-line)\n\n # Get predictions on our training data and store in result array\n result[teacher_id] = deep_cnn.softmax_preds(stdnt_data, ckpt_path)\n\n # This can take a while when there are a lot of teachers so output status\n print(\"Computed Teacher \" + str(teacher_id) + \" softmax predictions\")\n\n return result", "def get_each_loader(data_path, batch_size, trn_negnum, shuffle=True, num_workers=0):\n \n dataset = ML_Dataset(data_path, trn_negnum)\n \n if data_path.endswith('trn') == True:\n collate = dataset.train_collate\n else:\n collate = test_collate\n\n data_loader = data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate)\n\n return data_loader", "def read_triplets(seed_candidates):\n if \"pickle\" in seed_candidates:\n if \"*\" in seed_candidates:\n all_files = glob.glob(seed_candidates)\n new_data = []\n for file_name in all_files:\n with open(file_name, 'rb') as f:\n data = pickle.load(f)\n for dd in data:\n new_data.append((dd[0], dd[1], dd[2], dd[3]))\n df_seed = pd.DataFrame(new_data, columns=['evtid', 'h1', 'h2', 'h3'], dtype=np.int64)\n else:\n with open(seed_candidates, 'rb') as f:\n data = pickle.load(f)\n new_data = []\n for dd in data:\n new_data.append((dd[0], dd[1], dd[2], dd[3]))\n # idx = int(dd[0][10:])\n # new_data.append((idx, dd[1], dd[2], dd[3]))\n df_seed = pd.DataFrame(new_data, columns=['evtid', 'h1', 'h2', 'h3'], dtype=np.int64)\n else:\n column_names = ['evtid', 'h1', 'h2', 'h3']\n if \"*\" in seed_candidates:\n all_files = glob.glob(seed_candidates)\n new_data = []\n for file_name in all_files:\n df_seed_tmp = pd.read_csv(file_name, header=None, names=column_names,)\n new_data.append(df_seed_tmp)\n df_seed = pd.concat(new_data)\n else:\n df_seed = pd.read_csv(seed_candidates, header=None,\n names=column_names)\n return df_seed", "def test_pyt_multitask(self):\n\n def run_display_test(defaults, ep_and_ex_counts):\n with testing_utils.capture_output() as f:\n parser = display_setup_args()\n parser.set_defaults(**defaults)\n opt = parser.parse_args()\n display_data(opt)\n str_output = f.getvalue()\n self.assertTrue(\n '[ loaded {} episodes with a total of {} examples ]'.format(\n ep_and_ex_counts[0], ep_and_ex_counts[1]\n ) in str_output,\n 'PytorchDataTeacher multitasking failed with '\n 'following args: {}'.format(opt)\n )\n\n task1 = 'babi:task1k:1'\n task2 = 'babi:task1k:2'\n dataset1 = 'flickr30k'\n dataset2 = 'vqa_v1'\n\n # Expected example and episode counts\n eps_and_exs_counts = [\n (1800, 1800),\n (1080, 1800),\n (29900, 29900),\n (29180, 29900),\n (277349, 277349)\n ]\n defaults = parser_defaults.copy()\n\n # 1.\n defaults['pytorch_teacher_task'] = '{},{}'.format(task1, task2)\n run_display_test(defaults, eps_and_exs_counts[0])\n\n # 2.\n defaults['pytorch_teacher_task'] = task1\n defaults['task'] = task2\n run_display_test(defaults, eps_and_exs_counts[1])\n\n # 3.\n del defaults['task']\n defaults['pytorch_teacher_dataset'] = dataset1\n run_display_test(defaults, eps_and_exs_counts[2])\n\n # 4.\n del defaults['pytorch_teacher_task']\n defaults['task'] = task1\n run_display_test(defaults, eps_and_exs_counts[3])\n\n # 5.\n del defaults['task']\n defaults['pytorch_teacher_dataset'] = '{},{}'.format(dataset1, dataset2)\n run_display_test(defaults, eps_and_exs_counts[4])", "def load_ptb_dataset(name='ptb', path='raw_data'):\n path = os.path.join(path, name)\n logging.info(\"Load or Download Penn TreeBank (PTB) dataset > {}\".format(path))\n\n # Maybe dowload and uncompress tar, or load exsisting files\n maybe_download_and_extract(PTB_FILENAME, path, PTB_URL, extract=True)\n\n data_path = os.path.join(path, 'simple-examples', 'data')\n train_path = os.path.join(data_path, \"ptb.train.txt\")\n valid_path = os.path.join(data_path, \"ptb.valid.txt\")\n test_path = os.path.join(data_path, \"ptb.test.txt\")\n\n word_to_id = nlp.build_vocab(nlp.read_words(train_path))\n\n train_data = nlp.words_to_word_ids(nlp.read_words(train_path), word_to_id)\n valid_data = nlp.words_to_word_ids(nlp.read_words(valid_path), word_to_id)\n test_data = nlp.words_to_word_ids(nlp.read_words(test_path), word_to_id)\n vocab_size = len(word_to_id)\n\n # logging.info(nlp.read_words(train_path)) # ... 'according', 'to', 'mr.', '<unk>', '<eos>']\n # logging.info(train_data) # ... 214, 5, 23, 1, 2]\n # logging.info(word_to_id) # ... 'beyond': 1295, 'anti-nuclear': 9599, 'trouble': 1520, '<eos>': 2 ... }\n # logging.info(vocabulary) # 10000\n # exit()\n return train_data, valid_data, test_data, vocab_size", "def loadTrainer(path):\n\tray.shutdown()\n\tray.init()\n\tconfig = createConfig()\n\ttrainer = dqn.DQNTrainer(config=config, env=HiLoPricingEnv)\n\ttrainer.restore(path)\n\treturn trainer", "def get_data_loaders():\n dataset_path = \"\"\n dataset_cache = None\n personachat = get_dataset(dataset_path, dataset_cache)\n\n tokenizer_selected = OpenAIGPTTokenizer.from_pretrained('openai-gpt')\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": defaultdict(list), \"valid\": defaultdict(list)}\n personality = []\n history_complete = []\n count_persona = 0\n with open('data_faiss_pegasus_2sentences_finalgenerated.pkl', 'rb') as f:\n persona_selected_list = pickle.load(f)\n for dataset_name, dataset in personachat.items():\n num_candidates = len(dataset[0][\"utterances\"][0][\"candidates\"])\n if num_candidates > 0 and dataset_name == 'train':\n num_candidates = min(1, num_candidates)\n for dialog in dataset:\n persona = dialog[\"persona_info\"].copy()\n #datasets[personality].append(persona)\n count_history = 0\n for utterance in dialog[\"utterances\"]:\n count_history = count_history + 1\n history = utterance[\"history\"][-(2*2+5):]\n \n #history_complete.append(history)\n if len(persona) == 4:\n if len(history) > (len(persona)+3):\n history_chatbot = history[1::2]\n persona_selected = persona_selected_list[count_persona]\n instance = build_input_from_segments_faiss_2(persona, history_chatbot) \n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n count_persona = count_persona + 1\n return datasets", "def TestTeacher(self, model_name, **kwargs):\n batch_size = kwargs.pop(\"batch_size\", 256)\n model_save_path = kwargs.pop(\"model_save_path\", \"./checkpoints/teacher/\")\n record_save_path = kwargs.pop(\"record_save_path\", \"./records/teacher\")\n verbose = kwargs.pop(\"verbose\", False)\n\n # Do some check\n if not os.path.exists(model_save_path):\n raise RuntimeError(\"No pretrained model exists in '{}'\".format(model_save_path))\n if not os.path.exists(record_save_path):\n os.makedirs(record_save_path)\n model_save_path = os.path.join(model_save_path, \"{}.ckpt\".format(model_name))\n\n tf.reset_default_graph()\n\n # Get dataset\n test_data, test_label = self.data_manager.test_data, self.data_manager.test_label\n num_test_data = test_data.shape[0]\n\n X = tf.placeholder(test_data.dtype, shape=[None]+list(test_data.shape[1:]), name=\"input_data\")\n y = tf.placeholder(test_label.dtype, shape=[None]+list(test_label.shape[1:]), name=\"input_label\")\n is_train = tf.placeholder(tf.bool, name=\"is_train\")\n\n dataset = tf.data.Dataset.from_tensor_slices((X, y))\n batched_dataset = dataset.batch(batch_size)\n \n iterator = batched_dataset.make_initializable_iterator()\n batch_data, batch_label = iterator.get_next()\n\n # Get the teacher model\n with tf.variable_scope('teacher_model'):\n logits, probs = self.teacher_model(batch_data, is_train=is_train)\n result = tf.argmax(logits, axis=1)\n correct_num = tf.reduce_sum(tf.cast(tf.equal(result, tf.argmax(batch_label, axis=1)), tf.float32))\n saver = tf.train.Saver()\n\n # Test process\n with tf.Session() as sess:\n sess.run(iterator.initializer, feed_dict={X:test_data, y:test_label})\n saver.restore(sess, model_save_path)\n total_correct_cnt = 0\n while True:\n try:\n right_num = sess.run([correct_num], feed_dict={is_train:False})\n total_correct_cnt += right_num[0]\n except tf.errors.OutOfRangeError:\n acc = total_correct_cnt * 1.0 / num_test_data\n if verbose:\n print(\"Test accuracy: {}\".format(acc))\n break\n acc_hist = [acc]\n self._writeRecord(record_save_path, \"{}_test_accuracy\".format(model_name), acc_hist)", "def load_examples_data(dataset_name):\n dataset_name = dataset_name.strip().lower()\n if dataset_name.lower() not in ['pokemon', 'hanzi', 'animals', 'nsfw', 'simpsons', 'horse2zebra', 'people',\n 'autodrive', 'superresolution', 'anpr', 'beauty','antisproofing','facelandmarks','dogs-vs-cats','chinese']:\n raise ValueError('Not a valid dataset_name.')\n dataset_name = 'examples_' + dataset_name\n dirname = os.path.join(_trident_dir, dataset_name)\n if not os.path.exists(dirname):\n try:\n os.makedirs(dirname)\n except OSError:\n # Except permission denied and potential race conditions\n # in multi-threaded environments.\n pass\n is_internet_ok = is_connected()\n if dataset_name == 'examples_pokemon':\n is_download=download_file_from_google_drive('1U-xc54fX9j9BcidvRa0ow6qjssMlSF2A', dirname, 'pokemon.tar')\n tar_file_path = os.path.join(dirname, 'pokemon.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n extract_path = os.path.join(dirname, 'pokemon')\n dataset = load_folder_images(dataset_name, extract_path, folder_as_label=False)\n print('get pokemon images :{0}'.format(len(dataset)))\n return dataset\n\n\n elif dataset_name == 'examples_hanzi':\n download_file_from_google_drive('13UEzSG0az113gpRPKPyKrIE2HDaA2P4H', dirname, 'hanzi.tar')\n tar_file_path = os.path.join(dirname, 'hanzi.tar')\n extract_path = os.path.join(dirname, 'hanzi')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n dataset = load_folder_images(dataset_name, os.path.join(dirname, 'train'), folder_as_label=True,\n object_type=ObjectType.gray)\n\n dataset_test = load_folder_images(dataset_name, os.path.join(dirname, 'test'), folder_as_label=True,\n object_type=ObjectType.gray)\n\n dataset.testdata = dataset_test.traindata\n dataset.class_names['zh-cn'] = dataset.class_names['en-us']\n return dataset\n\n elif dataset_name == 'examples_animals':\n download_file_from_google_drive('19Cjq8OO6qd9k9TMZxlPjDpejDOdiHJoW', dirname, 'animals.tar')\n tar_file_path = os.path.join(dirname, 'animals.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n dataset = load_folder_images(dataset_name, dirname, folder_as_label=True)\n return dataset\n elif dataset_name == 'examples_nsfw':\n tar_file_path = os.path.join(dirname, 'nsfw.tar')\n if os.path.exists(tar_file_path) and get_file_create_time(tar_file_path)<datetime.datetime(2021, 2, 20, 0, 0, 0).timestamp():\n os.remove(tar_file_path)\n if os.path.exists(os.path.join(dirname,'porn_detection_data.pkl')):\n os.remove(os.path.join(dirname,'porn_detection_data.pkl'))\n _delete_h(dirname)\n download_file_from_google_drive('1EXpV2QUrSFJ7zJn8NqtqFl1k6HvXsUzp', dirname, 'nsfw.tar')\n\n extract_path = os.path.join(dirname, 'nsfw')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n folders = ['drawings', 'hentai', 'neutral', 'porn', 'sexy']\n data=unpickle(os.path.join(dirname,'porn_detection_data.pkl'))\n\n trainData = []\n testData = []\n trainLabel = []\n testLabel = []\n for n in range(5):\n folder=folders[n]\n trainData.extend(data[folder]['train'])\n trainLabel.extend([n]*len(data[folder]['train']))\n testData.extend(data[folder]['test'])\n testLabel.extend([n] * len(data[folder]['test']))\n\n trainarray = ImageDataset(trainData,object_type=ObjectType.rgb)\n trainlabel = LabelDataset(trainLabel,object_type=ObjectType.classification_label)\n train_iter = Iterator(data=trainarray, label=trainlabel)\n\n testarray = ImageDataset(testData,object_type=ObjectType.rgb)\n testlabel = LabelDataset(testLabel,object_type=ObjectType.classification_label)\n test_iter = Iterator(data=testarray, label=testlabel)\n print('training images: {0} test images:{1}'.format(len(trainarray), len(testarray)))\n\n dataset = DataProvider(dataset_name, traindata=train_iter, testdata=test_iter)\n dataset.binding_class_names(['drawing', 'hentai', 'neutral', 'porn', 'sexy'], 'en-us')\n dataset.binding_class_names(['绘画', '色情漫画', '中性', '色情', '性感'], 'zh-cn')\n dataset.binding_class_names(['繪畫', '色情漫畫', '中性', '色情', '性感'], 'zh-tw')\n dataset.scenario = 'train'\n return dataset\n elif dataset_name == 'examples_simpsons':\n download_file_from_google_drive('1hGNFbfBv3EZ4nx4Qod6PtSYzO8H4QIxC', dirname, 'simpsons.tar')\n tar_file_path = os.path.join(dirname, 'simpsons.tar')\n extract_path = os.path.join(dirname, 'simpsons')\n extract_archive(tar_file_path, extract_path, archive_format='tar')\n data_provider = load_folder_images(dataset_name, extract_path, folder_as_label=False)\n data_provider.traindata.unpair = RandomNoiseDataset(shape=(100), random_mode='normal')\n print('get simpsons images :{0}'.format(len(data_provider.traindata.data.items)))\n return data_provider\n elif dataset_name == 'examples_horse2zebra':\n download_file_from_google_drive('1pqj-T90Vh4wVNBV09kYZWgVPsZUA2f7U', dirname, 'horse2zebra.tar')\n tar_file_path = os.path.join(dirname, 'horse2zebra.tar')\n extract_path = os.path.join(dirname, 'horse2zebra')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n trainA = ImageDataset(list_images(os.path.join(dirname, 'trainA')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n trainB = ImageDataset(list_images(os.path.join(dirname, 'trainB')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n testA = ImageDataset(list_images(os.path.join(dirname, 'testA')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n testB = ImageDataset(list_images(os.path.join(dirname, 'testB')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n train_iter = Iterator(data=trainA, unpair=trainB)\n test_iter = Iterator(data=testA, unpair=testB)\n dataset = DataProvider(dataset_name, traindata=train_iter, testdata=test_iter)\n print('get horse2zebra images :{0}'.format(len(dataset)))\n return dataset\n elif dataset_name == 'examples_people':\n download_file_from_google_drive('1H7mJJfWpmXpRxurMZQqY4N_UXWLbQ2pT', dirname, 'people.tar')\n tar_file_path = os.path.join(dirname, 'people.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, 'imgs', '*.*g'))\n masks = glob.glob(os.path.join(dirname, 'masks', '*.png'))\n imgs=list(sorted(imgs))\n masks = list(sorted(masks))\n # make_dir_if_need(os.path.join(dirname, 'trimap'))\n # for i in range(len(masks)):\n # mask=mask2array(masks[i])\n # trimap=mask2trimap(mask)\n # save_mask(trimap,masks[i].replace('masks','trimap'))\n # print('trimap',len(masks))\n\n imgdata = ImageDataset(images=imgs, object_type=ObjectType.rgb)\n mskdata = MaskDataset(masks=masks, object_type=ObjectType.binary_mask)\n dataset = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=mskdata))\n print('get people images :{0}'.format(len(dataset)))\n return dataset\n elif dataset_name == 'examples_autodrive':\n download_file_from_google_drive('1JqPPeHqhWLqnI6bD8nuHcVx-Y56oIZMK', dirname, 'autodrive.tar')\n tar_file_path = os.path.join(dirname, 'autodrive.tar')\n extract_path = os.path.join(dirname, 'autodrive')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, 'images', '*.*g'))\n masks = glob.glob(os.path.join(dirname, 'masks', '*.png'))\n imgs = list(sorted(imgs))\n masks = list(sorted(masks))\n imgdata = ImageDataset(images=imgs, object_type=ObjectType.rgb,symbol='image')\n mskdata = MaskDataset(masks=masks, object_type=ObjectType.color_mask,symbol='mask')\n\n def parse_code(l):\n if len(l.strip().split(\"\\t\")) == 2:\n a, b = l.replace('\\t\\t', '\\t').strip().split(\"\\t\")\n return tuple(int(i) for i in b.split(' ')), a\n\n label_codes, label_names = zip(\n *[parse_code(l) for l in open(os.path.join(dirname, \"label_colors.txt\")).readlines()])\n for i in range(len(label_codes)):\n mskdata.palette[label_names[i]] = label_codes[i]\n\n dataset = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=mskdata))\n print('get autodrive images :{0}'.format(len(dataset)))\n return dataset\n elif dataset_name == 'examples_superresolution':\n download_file_from_google_drive('1v1uoymrWI_MLSiGvSGW7tWJYSnzzXpEQ', dirname, 'superresolution.tar')\n tar_file_path = os.path.join(dirname, 'superresolution.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, '*.*g'))\n imgs.extend(glob.glob(os.path.join(dirname, '*.bmp')))\n imgs = list(sorted(imgs))\n\n print('get super resolution images :{0}'.format(len(imgs)))\n\n imgdata = ImageDataset(images=imgs * 2, object_type=ObjectType.rgb, symbol='lr')\n labeldata = ImageDataset(images=imgs * 2, object_type=ObjectType.rgb, symbol='hr')\n dataset = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=labeldata))\n return dataset\n elif dataset_name == 'examples_beauty':\n download_file_from_google_drive('1aJhxN9IqsxuayhRTm-gmxk6PiLe5wm9X', dirname, 'beauty.tar')\n tar_file_path = os.path.join(dirname, 'beauty.tar')\n\n extract_archive(tar_file_path, dirname, archive_format='tar')\n # 讀取圖片數據\n images_dict = {}\n with open(os.path.join(dirname, 'images_dict.pkl'), 'rb') as fp:\n images_dict = pickle.load(fp)\n\n f = open(os.path.join(dirname, 'All_Ratings.txt'), encoding='utf-8-sig').readlines()\n imgs = []\n landmarks = []\n ratings = []\n for row in f:\n data = row.strip().split('\\t')\n if 'images\\\\' + data[0] in images_dict:\n img = images_dict['images\\\\' + data[0]][0]\n img = img.transpose([2, 0, 1])[::-1].transpose([1, 2, 0])\n imgs.append(img)\n landmark = images_dict['images\\\\' + data[0]][1].astype(np.float32)\n landmarks.append(landmark)\n rating = (float(data[1])) / 5.00\n ratings.append(rating)\n print('{0} faces loaded...'.format(len(imgs)))\n imgdata = ImageDataset(images=imgs, object_type=ObjectType.rgb, symbol='faces')\n landmarkdata = LandmarkDataset(landmarks=landmarks, object_type=ObjectType.landmarks, symbol='target_landmarks')\n labeldata = LabelDataset(data=ratings,object_type=ObjectType.classification_label, symbol='target_beauty')\n data_provider = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=Dataset.zip(landmarkdata,labeldata)))\n return data_provider\n\n elif dataset_name == 'examples_facelandmarks':\n download_file_from_google_drive('1GtswQBAHPa_bXaB4tW2uOOQ8Lxfz2L5B', dirname, 'ibug_300W.tar')\n tar_file_path = os.path.join(dirname, 'ibug_300W.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n root_dir=os.path.join(dirname, 'ibug_300W_large_face_landmark_dataset')\n image_paths = {}\n landmarks = {}\n crops = {}\n\n for mode in ['train','test']:\n make_dir_if_need(os.path.join(dirname, 'crops',mode))\n tree = ElementTree.parse(os.path.join(root_dir, 'labels_ibug_300W_{0}.xml'.format(mode)))\n root = tree.getroot()\n image_paths[mode]=[]\n landmarks[mode] = []\n crops[mode] = []\n\n offset=5\n for j in tqdm(range(len(root[2]))):\n try:\n filename=root[2][j]\n landmark = []\n for num in range(68):\n x_coordinate = int(filename[0][num].attrib['x'])\n y_coordinate = int(filename[0][num].attrib['y'])\n landmark.append([x_coordinate, y_coordinate])\n landmark=np.asarray(landmark)\n\n crop = filename[0].attrib\n for k in crop.keys():\n crop[k] = int(crop[k]) if isinstance(crop[k], str) else crop[k]\n for k in crop.keys():\n if k=='top' and int(landmark[:,1].min())<int(crop[k]):\n crop[k] = int( landmark[:,1].min())\n crop[ 'height']+=crop[k]-int(landmark[:,1].min())\n elif k=='left' and int(landmark[:,0].min())<int(crop[k]):\n crop[k] = int( landmark[:,0].min())\n crop['width']+= crop[k] - int(landmark[:, 0].min())\n elif k == 'width' and int(landmark[:, 0].max()-landmark[:, 0].min()) > int(crop[k]):\n crop[k] = int(landmark[:, 0].max()-landmark[:, 0].min())\n elif k == 'height' and int(landmark[:, 1].max()-landmark[:, 1].min()) > int(crop[k]):\n crop[k] = int(landmark[:, 1].max()-landmark[:, 1].min())\n\n crop['left']-=offset\n crop['top'] -= offset\n crop['width'] += 2*offset\n crop['height'] += 2*offset\n\n\n landmark[:,0]-=crop['left']\n landmark[:, 1] -= crop['top']\n\n\n if not os.path.exists(os.path.join(dirname, 'crops', mode, '{0}.png'.format(j))):\n im=image2array(os.path.join(root_dir, filename.attrib['file']))\n if im.ndim==2:\n im=cv2.cvtColor(im,cv2.COLOR_GRAY2RGB)\n im=im[crop['top']:min(crop['top']+crop['height'],im.shape[0]),crop['left']:min(crop['left']+crop['width'],im.shape[1]),:]\n\n if max(im.shape[:2])/max(min(im.shape[:2]),0)<=5:\n\n array2image(im).save(os.path.join(dirname, 'crops',mode,'{0}.png'.format(j)))\n image_paths[mode].append(os.path.join(dirname, 'crops', mode, '{0}.png'.format(j)))\n crops[mode].append(crop)\n landmarks[mode].append(landmark)\n del im\n else:\n #im = image2array(os.path.join(dirname, 'crops',mode,'{0}.png'.format(j)))\n image_paths[mode].append(os.path.join(dirname, 'crops',mode,'{0}.png'.format(j)))\n crops[mode].append(crop)\n landmarks[mode].append(landmark)\n\n if j%100==0:\n gc.collect()\n except Exception as e:\n pass\n\n print('ibug 300w train dataset: images: {0} landmarks:{1} \\n'.format(len(image_paths['train']),len(landmarks['train'])))\n print('ibug 300w test dataset: images: {0} landmarks:{1} \\n'.format(len(image_paths['test']), len(landmarks['test'])))\n imdata=ImageDataset(images=image_paths['train'],symbol='faces',object_type=ObjectType.rgb)\n landmarkdata = LandmarkDataset(landmarks=landmarks['train'], symbol='landmarks',object_type=ObjectType.landmarks)\n imtestdata = ImageDataset(images=image_paths['test'], symbol='faces',object_type=ObjectType.rgb)\n landmarktestdata = LandmarkDataset(landmarks=landmarks['test'], symbol='landmarks',object_type=ObjectType.landmarks)\n data_provider=DataProvider(traindata=Iterator(data=imdata,label=landmarkdata),testdata=Iterator(data=imtestdata,label=landmarktestdata))\n return data_provider\n\n elif dataset_name == 'examples_antisproofing':\n download_file_from_google_drive('1e7Zjn2MHNCvA5gXdJUECzY8NjK4KVpa7', dirname, 'antisproofing.tar')\n tar_file_path = os.path.join(dirname, 'antisproofing.tar')\n make_dir_if_need(os.path.join(dirname, 'antisproofing'))\n extract_archive(tar_file_path, dirname, archive_format='tar')\n data_provider = load_folder_images(dataset_name,os.path.join(dirname, 'antisproofing'))\n return data_provider\n elif dataset_name == 'examples_anpr':\n download_file_from_google_drive('1uGBd8tXlP0TZAXNgrR6H0jl5MXj7VPbN', dirname, 'anpr.tar')\n tar_file_path = os.path.join(dirname, 'anpr.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, '*.*g'))\n imgs = list(sorted(imgs))\n\n # CCPD (Chinese City Parking Dataset, ECCV) and PDRC (license Plate Detection and Recognition Challenge)\n # https://github.com/detectRecog/CCPD\n provinces = [\"皖\", \"沪\", \"津\", \"渝\", \"冀\", \"晋\", \"蒙\", \"辽\", \"吉\", \"黑\", \"苏\", \"浙\", \"京\", \"闽\", \"赣\", \"鲁\", \"豫\", \"鄂\", \"湘\", \"粤\",\n \"桂\", \"琼\", \"川\", \"贵\", \"云\", \"藏\", \"陕\", \"甘\", \"青\", \"宁\", \"新\", \"警\", \"学\", \"O\"]\n alphabets = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',\n 'W', 'X', 'Y', 'Z', 'O']\n ads = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W',\n 'X', 'Y', 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'O']\n\n def lp2char(lp):\n cols = lp.split('_')\n charstring = ''\n for i in range(len(cols)):\n if i == 0:\n charstring += provinces[int(cols[i])]\n elif i == 1:\n charstring += alphabets[int(cols[i])]\n else:\n charstring += ads[int(cols[i])]\n return charstring\n\n width = 720\n height = 1160\n for im_path in imgs:\n lbl = im_path.split('/')[-1].rsplit('.', 1)[0].split('-')[-3]\n charstring = lp2char(lbl)\n iname = im_path.rsplit('/', 1)[-1].rsplit('.', 1)[0].split('-')\n [leftUp, rightDown] = [[int(eel) for eel in el.split('&')] for el in iname[2].split('_')]\n box = [leftUp[0], leftUp[1], rightDown[0], rightDown[1]]\n ori_w, ori_h = [float(int(el)) for el in [width, height]]\n new_labels = [(leftUp[0] + rightDown[0]) / (2 * ori_w), (leftUp[1] + rightDown[1]) / (2 * ori_h),\n (rightDown[0] - leftUp[0]) / ori_w, (rightDown[1] - leftUp[1]) / ori_h]\n download_file_from_google_drive('1e7Zjn2MHNCvA5gXdJUECzY8NjK4KVpa7', dirname, 'antisproofing.tar')\n tar_file_path = os.path.join(dirname, 'antisproofing.tar')\n make_dir_if_need(os.path.join(dirname, 'antisproofing'))\n extract_archive(tar_file_path, dirname, archive_format='tar')\n data_provider = load_folder_images(dataset_name, os.path.join(dirname, 'antisproofing'))\n return data_provider\n\n\n\n elif dataset_name == 'examples_dogs-vs-cats':\n download_file_from_google_drive('10czW0On7eIXkPP-MuQ-IRxMWdTizWjNC', dirname, 'dogs-vs-cats.tar')\n tar_file_path = os.path.join(dirname, 'dogs-vs-cats.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n data_provider = load_folder_images(dataset_name, dirname)\n return data_provider\n elif dataset_name == 'examples_chinese':\n to_half=ToHalfWidth()\n to_sc=ChineseConvert(convert_to='simplified')\n download_file_from_google_drive('1yzRzXpLuhSUxnixqCgpbdTk16ajnTEWF', dirname, 'chinese.tar')\n tar_file_path = os.path.join(dirname, 'chinese.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n\n as_train = remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'as_training.utf8'), encoding='utf-8-sig').read().strip().replace('\\u3000' ,'|'))).splitlines()\n cityu_train =remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'cityu_training.utf8'), encoding='utf-8-sig').read().strip().replace(' ','|'))).splitlines()\n\n as_test = remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'as_testing_gold.utf8'), encoding='utf-8-sig').read().strip().replace('\\u3000', '|'))).splitlines()\n cityu_test = remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'cityu_test_gold.utf8'), encoding='utf-8-sig').read().strip().replace(' ', '|'))).splitlines()\n\n\n data = as_train + cityu_train # 把兩個語料合併\n test_data=as_test + cityu_test # 把兩個語料合併\n\n\n raw_data_train = [row.strip('\\n').strip('\\r') for row in data] # 移除分行字元\n raw_data_test = [row.strip('\\n').strip('\\r') for row in test_data] # 移除分行字元\n\n process_data_train=[]\n process_seg_label_train = []\n process_simplifided_label_train = []\n process_traditional_label_train = []\n\n tmp_data_train = []\n tmp_seg_label_train = []\n tmp_simplifided_label_train = []\n tmp_pronunce_label_train = []\n for k in tqdm(range(len(raw_data_train))):\n row=raw_data_train[k]\n if row.startswith('∥'):\n row=row[1:]\n words=row.replace('||','|').split('|')\n for k in range(len(words)):\n\n word = words[k]\n\n for i in range(len(word)):\n tmp_data_train.append(word[i])\n #tmp_simplifided_label_train.append(to_half(to_sc(word[i])))\n #轉換為BMES\n\n if i==0 and len(word)>1: #B 是一個詞的開始\n tmp_seg_label_train.append('B')\n elif i==len(word)-1 and len(word)>=2 and tmp_seg_label_train[-1] in ['B','M']: #E 是一個詞的結束\n tmp_seg_label_train.append('E')\n elif len(word)==1 and i==0: #S 自己就是一個單詞\n tmp_seg_label_train.append('S')\n elif len(word)>=3 and tmp_seg_label_train[-1] in ['B','M']: #M 是一個詞的中間\n tmp_seg_label_train.append('M')\n\n if len(tmp_seg_label_train)>0 and tmp_seg_label_train[-1] in ['E','S']:\n if len(word) > 1 and (is_alphabet(word) or is_punctuation(word)) and k+1<len(words):\n if word in [ '。','﹖']:\n pass\n\n elif random.random() < 0.6 or is_alphabet(word):\n tmp_data_train.append(' ')\n tmp_seg_label_train.append('S')\n\n if (k+1<len(raw_data_train) and not raw_data_train[k+1].startswith( '」')) and words[-1] in [ '。','﹖']:\n #process_traditional_label_train.append(tmp_data_train)\n\n tmp_data_train=to_half(''.join(tmp_data_train))\n tmp_seg_label_train = ''.join(tmp_seg_label_train)\n # if len(tmp_data_train)!=len(tmp_seg_label_train):\n # print('')\n tmp_simplifided_label_train =to_sc(tmp_data_train)\n\n process_data_train.append(tmp_data_train)\n process_seg_label_train.append(tmp_seg_label_train)\n process_simplifided_label_train.append(tmp_simplifided_label_train)\n tmp_data_train = []\n tmp_seg_label_train = []\n tmp_simplifided_label_train = []\n tmp_pronunce_label_train = []\n # else:\n # tmp_data_train.append('\\n')\n # tmp_simplifided_label_train.append('\\n')\n # tmp_seg_label_train.append('\\n')\n corpus=process_data_train\n seg_corpus=process_seg_label_train\n simplifided_corpus =process_simplifided_label_train\n\n process_data_test = []\n process_seg_label_test = []\n process_simplifided_label_test = []\n process_traditional_label_test = []\n print('generate test labels')\n tmp_data_test = []\n tmp_seg_label_test = []\n tmp_simplifided_label_test = []\n tmp_pronunce_label_test = []\n for k in tqdm(range(len(raw_data_test))):\n row=raw_data_test[k]\n if row.startswith('∥'):\n row=row[1:]\n words = row.replace('||', '|').split('|')\n for k in range(len(words)):\n\n word = words[k]\n\n for i in range(len(word)):\n tmp_data_test.append(word[i])\n # tmp_simplifided_label_test.append(to_half(to_sc(word[i])))\n # 轉換為BMES\n\n if i == 0 and len(word) > 1: # B 是一個詞的開始\n tmp_seg_label_test.append('B')\n elif i == len(word) - 1 and len(word) >= 2 and tmp_seg_label_test[-1] in ['B', 'M']: # E 是一個詞的結束\n tmp_seg_label_test.append('E')\n elif len(word) == 1 and i == 0: # S 自己就是一個單詞\n tmp_seg_label_test.append('S')\n elif len(word) >= 3 and tmp_seg_label_test[-1] in ['B', 'M']: # M 是一個詞的中間\n tmp_seg_label_test.append('M')\n\n if len(tmp_seg_label_test) > 0 and tmp_seg_label_test[-1] in ['E', 'S'] and k+1<len(words):\n if len(word) > 1 and (is_alphabet(word) or is_punctuation(word)):\n if word in ['。', '﹖']:\n pass\n elif random.random() < 0.6 or is_alphabet(word):\n tmp_data_test.append(' ')\n tmp_seg_label_test.append('S')\n\n if (k + 1 < len(raw_data_test) and not raw_data_test[k + 1].startswith('」')) and words[-1] in ['。', '﹖']:\n # process_traditional_label_test.append(tmp_data_test)\n\n tmp_data_test = to_half(''.join(tmp_data_test))\n tmp_seg_label_test = ''.join(tmp_seg_label_test)\n # if len(tmp_data_test)!=len(tmp_seg_label_test):\n # print('')\n tmp_simplifided_label_test = to_sc(tmp_data_test)\n\n process_data_test.append(tmp_data_test)\n process_seg_label_test.append(tmp_seg_label_test)\n process_simplifided_label_test.append(tmp_simplifided_label_test)\n tmp_data_test = []\n tmp_seg_label_test = []\n tmp_simplifided_label_test = []\n tmp_pronunce_label_test = []\n # else:\n # tmp_data_test.append('\\n')\n # tmp_simplifided_label_test.append('\\n')\n # tmp_seg_label_test.append('\\n')\n test_corpus = process_data_test\n test_seg_corpus = process_seg_label_test\n test_simplifided_corpus = process_simplifided_label_test\n\n\n data=TextSequenceDataset(corpus=corpus,sequence_length=64,sequence_start_at='section_start',object_type=ObjectType.corpus,symbol='input')\n seg_label = TextSequenceDataset(corpus=seg_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='seg_label')\n simplifided_label = TextSequenceDataset(corpus=simplifided_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='simplified_label')\n traditional_label = TextSequenceDataset(corpus= copy.deepcopy(corpus), sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='traditional_label')\n\n data_test=TextSequenceDataset(corpus=test_corpus,sequence_length=64,sequence_start_at='section_start',object_type=ObjectType.corpus,symbol='input')\n seg_test_label = TextSequenceDataset(corpus=test_seg_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='seg_label')\n simplifided_test_label = TextSequenceDataset(corpus=test_simplifided_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='simplified_label')\n traditional_test_label = TextSequenceDataset(corpus= copy.deepcopy(test_corpus), sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='traditional_label')\n\n\n chars = list(sorted(set(list( ''.join(corpus) +bpmf_phonetic+'\\n\\r\\t∥'+ ''.join(simplifided_corpus)+''.join(test_data)))))\n chars.insert(0, '[CLS]')\n chars.insert(1, '[SEP]')\n chars.insert(2, '[UNK]')\n chars.insert(3, '[PAD]')\n chars.insert(4, '[MASK]')\n\n data.vocabs =data_test.vocabs=simplifided_label.vocabs=simplifided_test_label.vocabs = chars\n data.text2index=data_test.text2index =simplifided_label.text2index=simplifided_test_label.text2index = dict((c, i) for i, c in enumerate(chars))\n data.index2text =data_test.index2text =simplifided_label.index2text=simplifided_test_label.index2text= dict((i, c) for i, c in enumerate(chars))\n traditional_label = copy.deepcopy(data)\n traditional_test_label = copy.deepcopy(data_test)\n traditional_label.object_type =traditional_test_label.object_type = ObjectType.sequence_label\n traditional_label.symbol =traditional_test_label.symbol = 'traditional_label'\n\n mask_label = copy.deepcopy(data)\n mask_test_label = copy.deepcopy(data_test)\n #mask_label.object_type =mask_test_label.object_type= ObjectType.corpus\n mask_label.symbol = mask_test_label.symbol = 'mask_label'\n\n\n\n nextword=copy.deepcopy(data)\n nextword_test = copy.deepcopy(data_test)\n nextword.object_type=nextword_test.object_type=ObjectType.sequence_label\n nextword.symbol=nextword_test.symbol='nextword_label'\n nextword.sequence_offset=nextword_test.sequence_offset=1\n\n label=ZipDataset(seg_label,nextword,simplifided_label,traditional_label,mask_label)\n label_test = ZipDataset(seg_test_label, nextword_test, simplifided_test_label, traditional_test_label, mask_test_label)\n provider=TextSequenceDataProvider(\n traindata=Iterator(data=data,label=label),\n testdata=Iterator(data=data_test,label=label_test))\n return provider\n #,sample_filter=lambda x:x[0][-1]==3\n else:\n return None", "def load_data_from_files(self):\n # separated method to allow mock easier\n logger.info(\"Loading data...\")\n parent = Path(__file__).parent\n path = parent / \"resources\" / \"scores.txt\"\n self.scorer.load_from_file(path)\n path = parent / \"resources\" / \"american-english-large.txt\"\n self.trie.load_from_file(path)\n path = parent / \"resources\" / \"reels.txt\"\n self.reels = Reel.get_from_file(path)\n logger.info(\"Data loaded!\")", "def get_train_examples(self, data_dir):\r\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\r\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\r\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\r\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\r\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\r\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\r\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\r\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\n\t\traise NotImplementedError()", "def get_all_profesors(self) -> List[Teacher]:\n self.cursor.execute(\n f\"SELECT * FROM {self.table_name}\")\n \n teachers = []\n for teacher in self.cursor.fetchall():\n teacher_parsed = list(teacher[0:8]) + [json.loads(t) for t in teacher[8:]]\n teachers.append(Teacher.parse_tuple(teacher_parsed))\n \n return teachers", "def load_testset_predictions(all, weighted=False):\n if weighted:\n return load_data_from_CSV('results/testset_results_weighted_{}.csv'.format(all))\n else:\n return load_data_from_CSV('results/testset_results_{}.csv'.format(all))", "def load_dataset():\n\n\n train_dd_loader = DailyDialogLoader(PATH_TO_TRAIN_DATA, load=False)\n train_dataloader = DataLoader(train_dd_loader, batch_size=16, shuffle=True, num_workers=0,\n collate_fn=PadCollate())\n\n test_dd_loader = DailyDialogLoader(PATH_TO_TEST_DATA, load=True)\n test_dataloader = DataLoader(test_dd_loader, batch_size=1, shuffle=False, num_workers=0,\n collate_fn=PadCollate())\n\n assert train_dd_loader.vocabulary.n_words == test_dd_loader.vocabulary.n_words\n\n return train_dd_loader, train_dataloader, test_dataloader", "def loadTrips(analyzer, tripsfile):\n tripsfile = cf.data_dir + tripsfile\n input_file = csv.DictReader(open(tripsfile, encoding=\"utf-8\"),\n delimiter=\",\")\n for trip in input_file:\n model.addTrip(analyzer, trip)\n return analyzer", "def get_teacher(self, **fields):\n existing_fields = [i.name for i in self._db.get_columns('teachers')]\n teacher_fields = {}\n for key, value in fields.items():\n if key in existing_fields:\n teacher_fields[key] = value\n teachers = [i for i in Teachers.select().filter(**teacher_fields)]\n # Expect single value if search by unique fields, list if by non-unique\n return teachers if len(teachers) > 1 else teachers[0] if len(teachers) == 1 else None", "def load_data(self):\n with open('data/fordTrain.csv') as f:\n data = csv.reader(f, delimiter=',')\n train = [x for i, x in enumerate(data) if i > 0] \n # Extract features and target variable separately\n trainx = [x[3:] for x in train]\n trainy = [x[2] for x in train]\n\n with open('data/fordTest.csv') as f:\n data = csv.reader(f, delimiter=',')\n testx = [x[3:] for i, x in enumerate(data) if i > 0] \n\n with open('data/Solution.csv') as f:\n data = csv.reader(f, delimiter=',')\n testy = [x[2] for i, x in enumerate(data) if i > 0] \n\n # Extract features and target variable, convert to numpy array\n trainx = np.asarray(trainx, dtype=np.float32)\n trainy = np.asarray(trainy, dtype=np.int8)\n testx = np.asarray(testx, dtype=np.float32)\n testy = np.asarray(testy, dtype=np.int8)\n\n # Return training and test sets\n trainSet = Dataset(trainx, trainy)\n testSet = Dataset(testx, testy)\n return trainSet, testSet", "def get_test_examples(self, data_dir):\n raise NotImplementedError()", "def get_test_examples(self, data_dir):\n raise NotImplementedError()", "def get_test_examples(self, data_dir):\n raise NotImplementedError()", "def get_test_examples(self, data_dir):\n raise NotImplementedError()", "def get_test_examples(self, data_dir):\n raise NotImplementedError()", "def load_train_subjects_names(self):\n for tf in self.train_db_batches:\n files = ns.natsorted(os.listdir(os.path.join(self.db_path, tf)))\n for f in files:\n if f.startswith('volume'):\n s_name = str.split(str.split(f, '.')[0], '-')[-1]\n self.training_subjects.append(s_name)\n np.random.seed(1)\n np.random.shuffle(self.training_subjects)\n self.n_train = len(self.training_subjects)", "def get_train_examples(self, data_dir):\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\n raise NotImplementedError()", "def get_test_examples(self, data_dir):\n \n raise NotImplementedError()", "def get_data_loader(batch_size=10, num_workers=2):\n \n data_loader = torch.utils.data.DataLoader(dataset=TempuckeyDataSet(),\n batch_size=batch_size,\n shuffle=True,\n pin_memory=True,\n num_workers=num_workers,\n collate_fn=collate)\n return data_loader", "def get_train_examples(self, data_dir):\n raise NotImplementedError()", "def get_train_examples(self, data_dir):\n raise NotImplementedError()", "def preload_all(self):\n for tp in self.tps:\n for f in self.featurefiles + self.maskfiles:\n file = os.path.join(tp, f)\n print('preloading {}'.format(file))\n self.load(file, lazy=False)", "def get_test_loader(test_dataset,\n batch_size,\n num_workers=4,\n pin_memory=False):\n data_loader = torchutils.DataLoader(\n test_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=pin_memory)\n return data_loader", "def load_pedestrian(self, dataset_dir, subset):\n # Add classes. We have only one class to add.\n self.add_class(\"pedestrian\", 1, \"pedestrian\")\n\n # Train or validation dataset?\n assert subset in [\"train\", \"val\"]\n dataset_dir = os.path.join(dataset_dir, subset)\n print(dataset_dir)\n # Load annotations\n # VGG Image Annotator (up to version 1.6) saves each image in the form:\n # { 'filename': '28503151_5b5b7ec140_b.jpg',\n # 'regions': {\n # '0': {\n # 'region_attributes': {},\n # 'shape_attributes': {\n # 'all_points_x': [...],\n # 'all_points_y': [...],\n # 'name': 'polygon'}},\n # ... more regions ...\n # },\n # 'size': 100202\n # }\n # We mostly care about the x and y coordinates of each region\n # Note: In VIA 2.0, regions was changed from a dict to a list.\n annotations = json.load(open(os.path.join(dataset_dir, \"via_region_data.json\")))\n annotations = list(annotations.values()) # don't need the dict keys\n\n # The VIA tool saves images in the JSON even if they don't have any\n # annotations. Skip unannotated images.\n annotations = [a for a in annotations if a['regions']]\n\n # Add images\n for a in annotations:\n # Get the x, y coordinaets of points of the polygons that make up\n # the outline of each object instance. These are stores in the\n # shape_attributes (see json format above)\n # The if condition is needed to support VIA versions 1.x and 2.x.\n if type(a['regions']) is dict:\n polygons = [r['shape_attributes'] for r in a['regions'].values()]\n else:\n polygons = [r['shape_attributes'] for r in a['regions']] \n\n # load_mask() needs the image size to convert polygons to masks.\n # Unfortunately, VIA doesn't include it in JSON, so we must read\n # the image. This is only managable since the dataset is tiny.\n image_path = os.path.join(dataset_dir, a['filename'])\n image = skimage.io.imread(image_path)\n height, width = image.shape[:2]\n\n self.add_image(\n \"pedestrian\",\n image_id=a['filename'], # use file name as a unique image id\n path=image_path,\n width=width, height=height,\n polygons=polygons)", "def train(self, train_loader):\n pass", "def load_premiership_teams():\n # list of PremTeams to add\n team_list = [\n {'name': 'Arsenal', 'code': 'ARS', 'is_prem': True},\n {'name': 'Aston Villa', 'code': 'AVL', 'is_prem': True},\n {'name': 'Brighton and Hove Albion', 'code': 'BTN', 'is_prem': True},\n {'name': 'Brentford', 'code': 'BRE', 'is_prem': True},\n {'name': 'Bournemouth', 'code': 'BOU', 'is_prem': False},\n {'name': 'Burnley', 'code': 'BUR', 'is_prem': True},\n {'name': 'Cardiff City', 'code': 'CAR', 'is_prem': False},\n {'name': 'Chelsea', 'code': 'CHE', 'is_prem': True},\n {'name': 'Crystal Palace', 'code': 'CRY', 'is_prem': True},\n {'name': 'Everton', 'code': 'EVE', 'is_prem': True},\n {'name': 'Fulham', 'code': 'FUL', 'is_prem': False},\n {'name': 'Hull', 'code': 'HUL', 'is_prem': False},\n {'name': 'Huddersfield Town', 'code': 'HUD', 'is_prem': False},\n {'name': 'Leeds United', 'code': 'LEE', 'is_prem': True},\n {'name': 'Leicester City', 'code': 'LEI', 'is_prem': True},\n {'name': 'Liverpool', 'code': 'LIV', 'is_prem': True},\n {'name': 'Manchester City', 'code': 'MCY', 'is_prem': True},\n {'name': 'Manchester United', 'code': 'MUN', 'is_prem': True},\n {'name': 'Middlesbrough', 'code': 'MID', 'is_prem': False},\n {'name': 'Newcastle United', 'code': 'NEW', 'is_prem': True},\n {'name': 'Norwich City', 'code': 'NOR', 'is_prem': True},\n {'name': 'Queens Park Rangers', 'code': 'QPR', 'is_prem': False},\n {'name': 'Sheffield United', 'code': 'SHF', 'is_prem': False},\n {'name': 'Southampton', 'code': 'SOT', 'is_prem': True},\n {'name': 'Stoke City', 'code': 'STO', 'is_prem': False},\n {'name': 'Sunderland', 'code': 'SUN', 'is_prem': False},\n {'name': 'Swansea City', 'code': 'SWA', 'is_prem': False},\n {'name': 'Tottenham Hotspur', 'code': 'TOT', 'is_prem': True},\n {'name': 'Watford', 'code': 'WAT', 'is_prem': True},\n {'name': 'West Bromwich Albion', 'code': 'WBA', 'is_prem': False},\n {'name': 'West Ham United', 'code': 'WHM', 'is_prem': True},\n {'name': 'Wolverhampton Wanderers', 'code': 'WLV', 'is_prem': True},\n ]\n\n for team in team_list:\n print(PremTeam.objects.update_or_create(\n name=team['name'],\n code=team['code'],\n defaults={'is_prem': team['is_prem']}\n ))\n # print(pt, created)", "def load_data(batch_size=batch_size):\n trainset = LibriSpeechDataset(training_set, int(LIBRISPEECH_SAMPLING_RATE * n_seconds))\n testset = LibriSpeechDataset(validation_set, int(LIBRISPEECH_SAMPLING_RATE * n_seconds), stochastic=False)\n\n train_loader = DataLoader(trainset, batch_size=batch_size, num_workers=1, shuffle=True, drop_last=True)\n test_loader = DataLoader(testset, batch_size=1, num_workers=1, drop_last=True)\n\n return train_loader, test_loader", "def get_test_examples(self, data_dir):\n raise NotImplementedError()", "def get_test_examples(self, data_dir):\n raise NotImplementedError()", "def load_meetings():\n\n print \"Importing meetings...\"\n\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate retailers\n Meeting.query.delete()\n\n # Read CSV file\n with open(\"seed_data/meetings.csv\") as source_file:\n example_data = list(csv.reader(source_file))\n\n # skip header row for populating db\n for list_item in example_data[1:]:\n meeting = Meeting(meeting_title=list_item[1],\n meeting_time=list_item[2],\n attendees=list_item[3],\n length=list_item[4],\n topic_id=list_item[5])\n\n # Add the current retailer to the session\n db.session.add(meeting)\n\n # Commit the db.session changes to the database\n db.session.commit()", "def load_trial_data(filelist_path, feature_dir, **kwargs):\n filelist = readlines_and_split_spaces(filelist_path)\n\n\n if (len(filelist[0]) == 4 and (\"spoof\" in filelist[0] or \"target\" in filelist[0])):\n # Four items per line: speaker_id, utterance, system, target/nontarget/spoof\n # ASVSpoof2019 logical access trial list\n return _load_trial_data_asvspoof(filelist, feature_dir, **kwargs)\n elif len(filelist[0]) == 3 and filelist[0][0] in (\"0\", \"1\"):\n # Three items per line, and first is 0 or 1 (target or nontarget)\n return _load_trial_data_voxceleb(filelist, feature_dir)\n else:\n raise RuntimeError(\"Unknown filelist type (file {})\".format(filelist_path))", "def test_practitioner_6(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"practitioner-example-f002-pv.json\"\n inst = practitioner.Practitioner.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Practitioner\" == inst.resource_type\n\n impl_practitioner_6(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Practitioner\" == data[\"resourceType\"]\n\n inst2 = practitioner.Practitioner(**data)\n impl_practitioner_6(inst2)", "def PapersFile(filename, researchers=None):\n try:\n with open(filename, \"r\") as file:\n data = json.load(file)\n except FileNotFoundError:\n data = {}\n return Papers(data, filename=filename, researchers=researchers)", "def test_practitioner_4(base_settings):\n filename = (\n base_settings[\"unittest_data_dir\"] / \"practitioner-example-xcda-author.json\"\n )\n inst = practitioner.Practitioner.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Practitioner\" == inst.resource_type\n\n impl_practitioner_4(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Practitioner\" == data[\"resourceType\"]\n\n inst2 = practitioner.Practitioner(**data)\n impl_practitioner_4(inst2)", "def test_practitioner_7(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"practitioner-example.json\"\n inst = practitioner.Practitioner.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Practitioner\" == inst.resource_type\n\n impl_practitioner_7(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Practitioner\" == data[\"resourceType\"]\n\n inst2 = practitioner.Practitioner(**data)\n impl_practitioner_7(inst2)", "def teacher(self):\n if \"teacher\" in self._prop_dict:\n if isinstance(self._prop_dict[\"teacher\"], OneDriveObjectBase):\n return self._prop_dict[\"teacher\"]\n else :\n self._prop_dict[\"teacher\"] = EducationTeacher(self._prop_dict[\"teacher\"])\n return self._prop_dict[\"teacher\"]\n\n return None", "def test_train_on_the_fly(self):\r\n input_seqs_file = NamedTemporaryFile(\r\n prefix='RdpTaxonAssignerTest_', suffix='.fasta')\r\n input_seqs_file.write(test_seq_coll.to_fasta())\r\n input_seqs_file.seek(0)\r\n\r\n exp_assignments = rdp_trained_test1_expected_dict\r\n\r\n app = RdpTaxonAssigner({\r\n 'id_to_taxonomy_fp': self.id_to_taxonomy_file.name,\r\n 'reference_sequences_fp': self.reference_seqs_file.name,\r\n })\r\n obs_assignments = app(self.tmp_seq_filepath)\r\n\r\n key = 'X67228 some description'\r\n self.assertEqual(obs_assignments[key], exp_assignments[key])", "def load(self):\n #self.df = read_file(\"../data/yelp_academic_dataset_user.json\") #Full Data.\n self.df = read_file(\"../data/user300.json\") #For local machine.\n #self.get_friend_list()\n #self.save_friend_nodes()", "def load_tas_lookup():\n logger.info('Loading TAS')\n load_tas()", "def setup(self):\n # TODO check if need both dataset together\n self.train_dataset = ABSADataset(data_path=self.train_path, mode=self.in_mode, task=self.task, \n tokenizer=self.tokenizer, vocab=\"bert\")\n self.vocabulary = self.train_dataset.vocabulary\n\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=self.vocabulary)\n #self.train_restaurant = ABSADataset(data_path=RESTAURANT_TRAIN)\n #self.eval_restaurant = ABSADataset(data_path=RESTAURANT_DEV)", "def load_data():\n print(\"PARSING TRAIN\")\n ys_train, x_train, ids_train = load_pickle_data(\"ys_train\"), load_pickle_data(\"x_train\"), load_pickle_data(\n \"ids_train\")\n if ys_train is None or x_train is None or ids_train is None:\n ys_train, x_train, ids_train = load_csv_data(\"{}/train.csv\".format(DATA_DIR))\n dump_pickle_data(ys_train, \"ys_train\")\n dump_pickle_data(x_train, \"x_train\")\n dump_pickle_data(ids_train, \"ids_train\")\n\n print(\"PARSING TEST\")\n x_test, ids_test = load_pickle_data(\"x_test\"), load_pickle_data(\"ids_test\")\n if x_test is None or ids_test is None:\n _, x_test, ids_test = load_csv_data(\"{}/test.csv\".format(DATA_DIR))\n dump_pickle_data(x_test, \"x_test\")\n dump_pickle_data(ids_test, \"ids_test\")\n\n return ys_train, x_train, ids_train, x_test, ids_test", "def get_data_loaders_4sentence():\n dataset_path = \"\"\n dataset_cache = None\n personachat = get_dataset(dataset_path, dataset_cache)\n\n tokenizer_selected = OpenAIGPTTokenizer.from_pretrained('openai-gpt')\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": defaultdict(list), \"valid\": defaultdict(list)}\n personality = []\n history_complete = []\n count_persona = 0\n with open('data_faiss_pegasus_1generated.pkl', 'rb') as f:\n persona_selected_list = pickle.load(f)\n for dataset_name, dataset in personachat.items():\n num_candidates = len(dataset[0][\"utterances\"][0][\"candidates\"])\n if num_candidates > 0 and dataset_name == 'train':\n num_candidates = min(1, num_candidates)\n for dialog in dataset:\n persona = dialog[\"persona_info\"].copy()\n #datasets[personality].append(persona)\n count_history = 0\n for utterance in dialog[\"utterances\"]:\n count_history = count_history + 1\n history = utterance[\"history\"]\n #history_complete.append(history)\n if len(history_splitted) > (len(persona)-1):\n history_chatbot = history[1::2]\n persona_selected = persona_selected_list[count_persona]\n instance = build_input_from_segments_faiss(persona_selected, history_chatbot) \n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n count_persona = count_persona + 1\n return datasets", "def fixture_example_data():\n import_example_data()", "def get_data_loaders(args, tokenizer):\n personachat = get_dataset(tokenizer, args.dataset_path, args.dataset_cache, args.train_lang)\n _ = personachat.pop(\"test\", None)\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": [], \"valid\": []}\n\n if args.train_lang in [\"En\", \"Fr\", \"It\", \"Id\", \"Jp\", \"Ko\", \"Zh\"]: #monolingual data\n for dataset_name, dataset in personachat.items():\n for dial in dataset[args.train_lang]: #dial: {\"persona\":[], \"history\":[], \"response\":str}\n instance = build_input_from_segments(dial[\"persona\"], dial[\"history\"][-args.max_turns:], dial[\"response\"], tokenizer, lm_labels = True)\n datasets[dataset_name].append(instance) \n else: #multilingual data\n for dataset_name, dataset in personachat.items():\n for lang, dials in dataset.items():\n for dial in dials: #dial: {\"persona\":[], \"history\":[], \"response\":str}\n instance = build_input_from_segments(dial[\"persona\"], dial[\"history\"][-args.max_turns:], dial[\"response\"], tokenizer, lang_id=\"<{}>\".format(lang.lower()), lm_labels = True)\n datasets[dataset_name].append(instance) #all langs together\n\n\n logger.info(\"Build train and validation dataloaders\")\n train_dataset = DatasetTrain(datasets[\"train\"])\n valid_dataset = DatasetTrain(datasets[\"valid\"])\n\n #logger.info(\"Build train and validation dataloaders\")\n #train_dataset, valid_dataset = TensorDataset(*tensor_datasets[\"train\"]), TensorDataset(*tensor_datasets[\"valid\"])\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if args.distributed else None\n valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset) if args.distributed else None\n train_loader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, shuffle=(not args.distributed), collate_fn=collate_fn)\n valid_loader = DataLoader(valid_dataset, sampler=valid_sampler, batch_size=args.valid_batch_size, shuffle=False, collate_fn=collate_fn)\n\n # logger.info(\"Train dataset (Batch, Candidates, Seq length): {}\".format(train_dataset.tensors[0].shape))\n # #logger.info(\"Train dataset (Batch, Candidates, Seq length): {}\".format(train_dataset.tensors[1].shape))\n # logger.info(\"Valid dataset (Batch, Candidates, Seq length): {}\".format(valid_dataset.tensors[0].shape))\n logger.info(\"Train dataset length: {}\".format(len(train_dataset)))\n logger.info(\"Valid dataset length: {}\".format(len(valid_dataset)))\n return train_loader, valid_loader, train_sampler, valid_sampler", "def load_trec(loc=DATA_DIR):\n train, test = [], []\n with open(os.path.join(loc, 'TREC', 'train_5500.label'), 'rb') as f:\n for line in f:\n train.append(line.strip())\n with open(os.path.join(loc, 'TREC', 'TREC_10.label'), 'rb') as f:\n for line in f:\n test.append(line.strip())\n return train, test", "def load_extractor_engines(reload=False):\r\n\r\n global youtube_dl, youtube_dlc\r\n\r\n # youtube-dl ----------------------------------------------------------------------------------------------------\r\n start = time.time()\r\n\r\n if reload and youtube_dl:\r\n importlib.reload(youtube_dl)\r\n else:\r\n import youtube_dl\r\n\r\n config.youtube_dl_version = youtube_dl.version.__version__\r\n\r\n # calculate loading time\r\n load_time = time.time() - start\r\n log(f'youtube_dl version: {config.youtube_dl_version}, load_time= {int(load_time)} seconds')\r\n\r\n # youtube-dlc ----------------------------------------------------------------------------------------------------\r\n start = time.time()\r\n\r\n if reload and youtube_dlc:\r\n importlib.reload(youtube_dlc)\r\n else:\r\n import youtube_dlc\r\n\r\n config.youtube_dlc_version = youtube_dlc.version.__version__\r\n\r\n # calculate loading time\r\n load_time = time.time() - start\r\n log(f'youtube_dlc version: {config.youtube_dlc_version}, load_time= {int(load_time)} seconds')\r\n\r\n # set interrupt / kill switch\r\n set_interrupt_switch()\r\n\r\n # set default extractor\r\n set_default_extractor()\r\n\r\n # get a random user agent and update headers\r\n if not config.custom_user_agent:\r\n config.http_headers['User-Agent'] = youtube_dl.utils.random_user_agent()", "def test_practitioner_1(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"practitioner-example-f203-jvg.json\"\n inst = practitioner.Practitioner.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Practitioner\" == inst.resource_type\n\n impl_practitioner_1(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Practitioner\" == data[\"resourceType\"]\n\n inst2 = practitioner.Practitioner(**data)\n impl_practitioner_1(inst2)", "def _load_training_data(self):\n self._save_training_data()", "def load(tweets_file, rtt_file):\n try:\n tw_df = pd.read_csv(tweets_file)\n rtt_df = pd.read_csv(rtt_file)\n except:\n print('one or several files were not found')\n sys.exit()\n\n return tw_df, rtt_df", "def test_practitioner_9(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"practitioner-example-f204-ce.json\"\n inst = practitioner.Practitioner.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Practitioner\" == inst.resource_type\n\n impl_practitioner_9(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Practitioner\" == data[\"resourceType\"]\n\n inst2 = practitioner.Practitioner(**data)\n impl_practitioner_9(inst2)", "def load_inst(self):\n self.sanity_check()\n\n fname_pub_auth_all = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_auth_all, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_auth_top = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_auth_top, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_inst_all = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_inst_all, '_',\n self.config.experiment_id, '.pk'])\n fname_pub_inst_top = ''.join([self.config.dir_data, '/',\n self.config.fname_pub_inst_top, '_',\n self.config.experiment_id, '.pk'])\n self.pub_auth_all = pickle.load(open(fname_pub_auth_all, 'rb'))\n self.pub_auth_top = pickle.load(open(fname_pub_auth_top, 'rb'))\n self.pub_inst_all = pickle.load(open(fname_pub_inst_all, 'rb'))\n self.pub_inst_top = pickle.load(open(fname_pub_inst_top, 'rb'))\n\n fname_pub_history = ''.join([self.config.dir_data, '/history_',\n self.config.experiment_id, '.pk'])\n self.history = pickle.load(open(fname_pub_history, 'rb'))\n\n fname_pub_staff = ''.join([self.config.dir_data, '/staff_',\n self.config.experiment_id, '.pk'])\n self.staff = pickle.load(open(fname_pub_staff, 'rb'))", "def load_campers():\n\n for row in open(\"static/campers.csv\"):\n row = row.rstrip()\n\n email, password, first_name, last_name, camper_photo, camper_photo_url = row.split(\",\")\n\n camper = Camper(\n camper_email=email,\n password=password,\n first_name=first_name,\n last_name=last_name,\n camper_photo=camper_photo,\n camper_photo_url=camper_photo_url)\n\n db.session.add(camper)\n\n db.session.commit()", "def get_teacher(teacher_account_id):\n query = 'SELECT * FROM teacher JOIN person ON teacher.teacher_account_id=person.account_id WHERE teacher.teacher_account_id=%s;'\n args = (teacher_account_id,)\n return database.connection.get_data(query, args)", "def load_passengers(self, passengers):\r\n if len(passengers) > self.available_capacity():\r\n raise Exception(\"The elevator cannot handle this many passengers \"\r\n \"and will be over capacity\")\r\n\r\n self.passengers += passengers\r\n\r\n self._update_destinations()", "def load_data(y_name='Species'):\n train_path = tf.keras.utils.get_file(args.TRAIN_URL.split('/')[-1], args.TRAIN_URL)\n test_path = tf.keras.utils.get_file(args.TEST_URL.split('/')[-1], args.TEST_URL)\n\n train = pd.read_csv(train_path, names=args.CSV_COLUMN_NAMES, header=0)\n train_x, train_y = train, train.pop(y_name)\n\n test = pd.read_csv(test_path, names=args.CSV_COLUMN_NAMES, header=0)\n test_x, test_y = test, test.pop(y_name)\n\n return (train_x, train_y), (test_x, test_y)", "def test_pert_file(self):\n path, case = os.path.split(self.ieee14)\n\n # --- with pert file ---\n ss = andes.run('ieee14.raw', pert='pert.py',\n input_path=path, no_output=True, default_config=True,\n )\n ss.TDS.init()\n self.assertIsNotNone(ss.TDS.callpert)\n\n # --- without pert file ---\n ss = andes.run('ieee14.raw',\n input_path=path, no_output=True, default_config=True,\n )\n ss.TDS.init()\n self.assertIsNone(ss.TDS.callpert)", "def load_all_traj():\n pdb='/bpti/bpti-prot/bpti-prot.pdb'\n dcd = lambda x: '/bpti/bpti-prot/bpti-prot-%02d.dcd' % x\n tr = []\n for i in range(11):\n print ('loading ', i)\n start = dt.datetime.now()\n tr.append(md.load(DCD_ALL(i), top=PDB_ALL))\n end = dt.datetime.now()\n print((end-start).total_seconds())\n return tr" ]
[ "0.7358244", "0.5859265", "0.5856855", "0.5826998", "0.5742936", "0.552974", "0.5405307", "0.5320931", "0.5187403", "0.5179637", "0.5173516", "0.5170551", "0.51504564", "0.51137877", "0.50919735", "0.5080042", "0.5077611", "0.50590116", "0.5048102", "0.5040496", "0.50286186", "0.50239044", "0.501151", "0.50064164", "0.4995266", "0.49907285", "0.498819", "0.498819", "0.498819", "0.498819", "0.498819", "0.498819", "0.498819", "0.49867862", "0.4977609", "0.49725476", "0.4960578", "0.49600258", "0.49580115", "0.49505138", "0.49497566", "0.49497566", "0.49497566", "0.49497566", "0.49497566", "0.4946359", "0.49343553", "0.49343553", "0.49343553", "0.49343553", "0.49343553", "0.49343553", "0.49343553", "0.49343553", "0.49343553", "0.49343553", "0.49343553", "0.49343553", "0.49343553", "0.49343553", "0.4921987", "0.49175972", "0.49165758", "0.49165758", "0.49123266", "0.49118224", "0.48832676", "0.4846203", "0.48442024", "0.48340437", "0.48252752", "0.48252752", "0.4820665", "0.4817235", "0.48166952", "0.48131245", "0.480317", "0.47975397", "0.47958085", "0.47892544", "0.47885394", "0.47799644", "0.47752997", "0.4770364", "0.47659707", "0.47515628", "0.4747951", "0.47440135", "0.4740878", "0.4723391", "0.47195834", "0.47044918", "0.46988875", "0.46988472", "0.4698844", "0.4696753", "0.4691431", "0.46901733", "0.46894738", "0.46887138" ]
0.7384021
0
Unpack teachers from PDDRTeachers experiment.
def unpack_teachers(self, extra: dict): self.teacher_policies.extend(extra["teacher_policies"]) self.teacher_envs.extend(extra["teacher_envs"]) self.teacher_expl_strats.extend(extra["teacher_expl_strats"]) self.teacher_critics.extend(extra["teacher_critics"]) self.teacher_ex_dirs.extend(extra["teacher_ex_dirs"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_teacher_experiment(self, exp: Experiment):\n _, _, extra = load_experiment(exp)\n self.unpack_teachers(extra)", "def prune_teachers(self):\n self.teacher_policies = self.teacher_policies[: self.num_teachers]\n self.teacher_envs = self.teacher_envs[: self.num_teachers]\n self.teacher_expl_strats = self.teacher_expl_strats[: self.num_teachers]\n self.teacher_critics = self.teacher_critics[: self.num_teachers]\n self.teacher_ex_dirs = self.teacher_ex_dirs[: self.num_teachers]", "def get_teacher_assign():\n assignment_data = query_db(\n \"SELECT assignments.id, assignments.name, assignments.due_date \"\n \"FROM assignments JOIN topics ON assignments.topic_id=topics.id \"\n \"JOIN classes ON topics.class_id=classes.id WHERE teacher_id=?;\",\n [flask.session[\"id\"]],\n )\n assignments = []\n for assignment in assignment_data:\n assignment_dict_teach = {}\n assignment_dict_teach[\"id\"] = assignment[0]\n assignment_dict_teach[\"name\"] = assignment[1]\n assignment_dict_teach[\"due_date\"] = assignment[2]\n assignments.append(assignment_dict_teach)\n return assignments", "def test_pytd_teacher(self):\n defaults = parser_defaults.copy()\n defaults['datatype'] = 'train:stream'\n defaults['image_mode'] = 'ascii'\n\n with testing_utils.capture_output():\n # Get processed act from agent\n parser = display_setup_args()\n defaults['pytorch_teacher_dataset'] = 'flickr30k'\n del defaults['pytorch_teacher_task']\n parser.set_defaults(**defaults)\n opt = parser.parse_args()\n teacher = create_task_agent_from_taskname(opt)[0]\n pytorch_teacher_act = teacher.act()\n\n parser = display_setup_args()\n defaults['task'] = 'flickr30k'\n del defaults['pytorch_teacher_dataset']\n parser.set_defaults(**defaults)\n opt = parser.parse_args()\n teacher = create_task_agent_from_taskname(opt)[0]\n regular_teacher_act = teacher.act()\n\n keys = set(pytorch_teacher_act.keys()).intersection(\n set(regular_teacher_act.keys()))\n self.assertTrue(len(keys) != 0)\n for key in keys:\n self.assertTrue(pytorch_teacher_act[key] == regular_teacher_act[key],\n 'PytorchDataTeacher does not have the same value '\n 'as regular teacher for act key: {}'.format(key))", "def load_teachers(self):\n # Get the experiment's directory to load from\n ex_dir = ask_for_experiment(max_display=10, env_name=self.env_real.name, perma=False)\n self.load_teacher_experiment(ex_dir)\n if len(self.teacher_policies) < self.num_teachers:\n print(\n f\"You have loaded {len(self.teacher_policies)} teachers - load at least {self.num_teachers - len(self.teacher_policies)} more!\"\n )\n self.load_teachers()", "def load_primers(tsv_filename):\n answer = []\n with open(tsv_filename) as handle:\n for line in handle:\n if line.startswith(\"#\"):\n continue\n parts = line.rstrip(\"\\n\").split(\"\\t\")\n if len(parts) == 2:\n left, right = parts\n name = f\"P{len(answer)}\"\n else:\n name, left, right = parts[:3]\n answer.append((name, left, right))\n return answer", "def test_pyt_multitask(self):\n\n def run_display_test(defaults, ep_and_ex_counts):\n with testing_utils.capture_output() as f:\n parser = display_setup_args()\n parser.set_defaults(**defaults)\n opt = parser.parse_args()\n display_data(opt)\n str_output = f.getvalue()\n self.assertTrue(\n '[ loaded {} episodes with a total of {} examples ]'.format(\n ep_and_ex_counts[0], ep_and_ex_counts[1]\n ) in str_output,\n 'PytorchDataTeacher multitasking failed with '\n 'following args: {}'.format(opt)\n )\n\n task1 = 'babi:task1k:1'\n task2 = 'babi:task1k:2'\n dataset1 = 'flickr30k'\n dataset2 = 'vqa_v1'\n\n # Expected example and episode counts\n eps_and_exs_counts = [\n (1800, 1800),\n (1080, 1800),\n (29900, 29900),\n (29180, 29900),\n (277349, 277349)\n ]\n defaults = parser_defaults.copy()\n\n # 1.\n defaults['pytorch_teacher_task'] = '{},{}'.format(task1, task2)\n run_display_test(defaults, eps_and_exs_counts[0])\n\n # 2.\n defaults['pytorch_teacher_task'] = task1\n defaults['task'] = task2\n run_display_test(defaults, eps_and_exs_counts[1])\n\n # 3.\n del defaults['task']\n defaults['pytorch_teacher_dataset'] = dataset1\n run_display_test(defaults, eps_and_exs_counts[2])\n\n # 4.\n del defaults['pytorch_teacher_task']\n defaults['task'] = task1\n run_display_test(defaults, eps_and_exs_counts[3])\n\n # 5.\n del defaults['task']\n defaults['pytorch_teacher_dataset'] = '{},{}'.format(dataset1, dataset2)\n run_display_test(defaults, eps_and_exs_counts[4])", "def get_teacher(self, **fields):\n existing_fields = [i.name for i in self._db.get_columns('teachers')]\n teacher_fields = {}\n for key, value in fields.items():\n if key in existing_fields:\n teacher_fields[key] = value\n teachers = [i for i in Teachers.select().filter(**teacher_fields)]\n # Expect single value if search by unique fields, list if by non-unique\n return teachers if len(teachers) > 1 else teachers[0] if len(teachers) == 1 else None", "def get_all_profesors(self) -> List[Teacher]:\n self.cursor.execute(\n f\"SELECT * FROM {self.table_name}\")\n \n teachers = []\n for teacher in self.cursor.fetchall():\n teacher_parsed = list(teacher[0:8]) + [json.loads(t) for t in teacher[8:]]\n teachers.append(Teacher.parse_tuple(teacher_parsed))\n \n return teachers", "def ensemble_preds(dataset, nb_teachers, stdnt_data):\n\n # Compute shape of array that will hold probabilities produced by each\n # teacher, for each training point, and each output class\n result_shape = (nb_teachers, len(stdnt_data), FLAGS.nb_labels)\n\n # Create array that will hold result\n result = np.zeros(result_shape, dtype=np.float32)\n\n # Get predictions from each teacher\n for teacher_id in xrange(nb_teachers):\n # Compute path of checkpoint file for teacher model with ID teacher_id\n if FLAGS.deeper:\n ckpt_path = FLAGS.teachers_dir + '/' + str(dataset) + '_' + str(nb_teachers) + '_teachers_' + str(teacher_id) + '_deep.ckpt-' + str(FLAGS.teachers_max_steps - 1) #NOLINT(long-line)\n else:\n ckpt_path = FLAGS.teachers_dir + '/' + str(dataset) + '_' + str(nb_teachers) + '_teachers_' + str(teacher_id) + '.ckpt-' + str(FLAGS.teachers_max_steps - 1) # NOLINT(long-line)\n\n # Get predictions on our training data and store in result array\n result[teacher_id] = deep_cnn.softmax_preds(stdnt_data, ckpt_path)\n\n # This can take a while when there are a lot of teachers so output status\n print(\"Computed Teacher \" + str(teacher_id) + \" softmax predictions\")\n\n return result", "def multiple_matlab_csv_to_teacher_data(short_runs_dirname):\n subdirname = 'Run-'\n data = None\n data_length = 0\n for i in range(10):\n dirname = os.path.join(short_runs_dirname, subdirname+str(i+1))\n run_data = matlab_csv_to_teacher_data(dirname)\n if i == 0:\n data = run_data\n else:\n for i, phoneme_data in enumerate(run_data):\n data[i] = np.vstack((data[i], phoneme_data))\n\n data_length += run_data[0].shape[0]\n\n for i, phoneme_data in enumerate(data):\n assert phoneme_data.shape[0] == data_length\n\n return data", "def get_teachers(self):\n query = Teacher.all().order('teacher')\n return query.fetch()", "def load_unpacker_dataset(sentences):\n return TFRecordDataset([path.join(TFRUDIR, sentence+'.tfr')\n for sentence in sentences])\\\n .map(\n lambda record: \\\n tf.parse_single_example(\n record,\n features={\n 's': tf.FixedLenFeature([], tf.string),\n 'l': tf.FixedLenFeature([NL], tf.float32),\n 't': tf.FixedLenFeature([NT], tf.float32)\n }\n )\n )\\\n .map(\n lambda feature: (feature['l'], feature['s'], feature['t'])\n )", "def train_teacher (nb_teachers, teacher_id):\n # Load the dataset\n X_train, X_test, y_train, y_test = models.get_dataset()\n\n print(X_train.shape)\n print(y_train.shape)\n print(X_test.shape)\n print(y_test.shape)\n \n # Retrieve subset of data for this teacher\n data, labels = partition.partition_dataset(X_train,\n y_train,\n nb_teachers,\n teacher_id)\n\n print(\"Length of training data: \" + str(len(labels)))\n\n # Define teacher checkpoint filename and full path\n\n filename = str(nb_teachers) + '_teachers_' + str(teacher_id) + '.hdf5'\n filename2 = str(nb_teachers) + '_teachers_' + str(teacher_id) + '.h5'\n \n # Perform teacher training need to modify \n \n\n # Create teacher model\n model, opt = models.create_two_layer_mlp(46) # num of cols\n model.compile(loss='binary_crossentropy',\n optimizer=\"Adam\",\n metrics=['accuracy'])\n model, hist = models.training(model, data, X_test, labels, y_test,filename)\n\n #modify\n model_json = model.to_json()\n with open(\"model.json\", \"w\") as json_file:\n json_file.write(model_json)\n# serialize weights to HDF5\n model.save_weights(filename2)\n print(\"Saved model to disk\")\n return True", "def extract_tubelets(dname, gpu=-1, redo=False):\n d = GetDataset(dname)\n\n if gpu >= 0:\n caffe.set_mode_gpu()\n caffe.set_device(gpu)\n\n model_dir = os.path.join(os.path.dirname(__file__), '../models/ACT-detector/', dname)\n output_dir = os.path.join(os.path.dirname(__file__), '../results/ACT-detector/', dname)\n \n # load the RGB network\n rgb_proto = os.path.join(model_dir, \"deploy_RGB.prototxt\")\n rgb_model = os.path.join(model_dir, \"../generated_AVA_iter_118662.caffemodel\")\n net_rgb = caffe.Net(rgb_proto, caffe.TEST, weights=rgb_model)\n \n # load the FLOW5 network\n flo_proto = os.path.join(model_dir, \"deploy_FLOW5.prototxt\")\n flo_model = os.path.join(model_dir, \"../generated_AVA_iter_59463.caffemodel\")\n net_flo = caffe.Net(flo_proto, caffe.TEST, weights=flo_model)\n\n vlist = d.test_vlist()\n for iv, v in enumerate(vlist):\n print(\"Processing video {:d}/{:d}: {:s}\".format( iv+1, len(vlist), v))\n h, w = d.resolution(v)\n \n # network output is normalized between 0,1 ; so we will multiply it by the following array\n resolution_array = np.array([w,h,w,h]*K, dtype=np.float32)\n \n # now process each frame\n for i in xrange(1, 1 + d.nframes(v) - K + 1):\n outfile = os.path.join(output_dir, d.frame_format(v,i) + \".pkl\")\n \n # skip if already computed\n if os.path.isfile(outfile) and not redo:\n continue\n \n # read the frames for the forward\n kwargs_rgb = {}\n kwargs_flo = {}\n for j in xrange(K):\n cap = cv2.VideoCapture(d.vidfile(v,0))\n #print(frame)\n #print(int(cap.get(7)))\n cap.set(1,i + j - 1)\n im = cap.read()[1]\n cap.release()\n #im = cv2.imread(d.imfile(v, i + j))\n if im is None:\n print \"Image {:s} does not exist\".format(d.imfile(v, i+j))\n return\n imscale = cv2.resize(im, (IMGSIZE, IMGSIZE), interpolation=cv2.INTER_LINEAR)\n kwargs_rgb['data_stream' + str(j)] = np.transpose(imscale-MEAN, (2, 0, 1))[None, :, :, :]\n imf = [cv2.imread(d.flowfile(v.split(\".\")[0], min(d.nframes(v), i + j + iflow))) for iflow in xrange(NFLOWS)]\n if np.any(imf) is None:\n print \"Flow image {:s} does not exist\".format(d.flowfile(v, i+j))\n return\n imscalef = [cv2.resize(im, (IMGSIZE, IMGSIZE), interpolation=cv2.INTER_LINEAR) for im in imf]\n timscale = [np.transpose(im-MEAN, (2, 0, 1))[None, :, :, :] for im in imscalef]\n kwargs_flo['data_stream' + str(j) + 'flow'] = np.concatenate(timscale, axis=1)\n \n # compute rgb and flow scores\n # two forward passes: one for the rgb and one for the flow \n net_rgb.forward(end=\"mbox_conf_flatten\", **kwargs_rgb) # forward of rgb with confidence and regression\n net_flo.forward(end=\"mbox_conf_flatten\", **kwargs_flo) # forward of flow5 with confidence and regression\n \n # compute late fusion of rgb and flow scores (keep regression from rgb)\n # use net_rgb for standard detections, net_flo for having all boxes\n scores = 0.5 * (net_rgb.blobs['mbox_conf_flatten'].data + net_flo.blobs['mbox_conf_flatten'].data)\n net_rgb.blobs['mbox_conf_flatten'].data[...] = scores\n net_flo.blobs['mbox_conf_flatten'].data[...] = scores\n net_flo.blobs['mbox_loc'].data[...] = net_rgb.blobs['mbox_loc'].data\n \n # two forward passes, only for the last layer \n # dets is the detections after per-class NMS and thresholding (stardard)\n # dets_all contains all the scores and regressions for all tubelets \n dets = net_rgb.forward(start='detection_out')['detection_out'][0, 0, :, 1:]\n dets_all = net_flo.forward(start='detection_out_full')['detection_out_full'][0, 0, :, 1:]\n \n # parse detections with per-class NMS\n if dets.shape[0] == 1 and np.all(dets == -1):\n dets = np.empty((0, dets.shape[1]), dtype=np.float32)\n\n dets[:, 2:] *= resolution_array # network output was normalized in [0..1]\n dets[:, 0] -= 1 # label 0 was background, come back to label in [0..nlabels-1]\n dets[:, 2::2] = np.maximum(0, np.minimum(w, dets[:, 2::2]))\n dets[:, 3::2] = np.maximum(0, np.minimum(h, dets[:, 3::2]))\n\n # parse detections with global NMS at 0.7 (top 300)\n # coordinates were normalized in [0..1]\n dets_all[:, 0:4*K] *= resolution_array \n dets_all[:, 0:4*K:2] = np.maximum(0, np.minimum(w, dets_all[:, 0:4*K:2]))\n dets_all[:, 1:4*K:2] = np.maximum(0, np.minimum(h, dets_all[:, 1:4*K:2]))\n idx = nms_tubelets(np.concatenate((dets_all[:, :4*K], np.max(dets_all[:, 4*K+1:], axis=1)[:, None]), axis=1), 0.7, 300)\n dets_all = dets_all[idx, :]\n \n # save file\n if not os.path.isdir(os.path.dirname(outfile)):\n os.system('mkdir -p ' + os.path.dirname(outfile))\n\n with open(outfile, 'wb') as fid:\n pickle.dump((dets, dets_all), fid)", "def test_teacher_role():\n response = test_app.post(\"/bkt_service/unwind\", params='''[{\n \"event\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/OutcomeEvent\",\n \"actor\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"student-1462300421838-1\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/lis/Person\",\n \"roles\": [\n \"urn:lti:instrole:ims/lis/Teacher\"\n ]\n },\n \"action\": \"http://purl.imsglobal.org/vocab/caliper/v1/action#Graded\",\n \"object\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"attempt-1462300421838-4\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/Attempt\",\n \"extensions\": {\n \"assessmentType\": \"Diagnostic Assessment\",\n \"assessmentId\": \"assessment-1462300421838-4\"\n },\n \"count\": 1,\n \"startedAtTime\": \"2016-05-03T21:33:41.844Z\",\n \"endedAtTime\": \"2016-05-03T22:03:41.844Z\"\n },\n \"generated\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"result-1462300421838-4\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/Result\",\n \"assignableId\": \"assessment-1462300421838-4\",\n \"normalScore\": 80,\n \"totalScore\": 100,\n \"itemResults\": [\n {\n \"@id\": \"item-result-1462300421838-4-1\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/Result\",\n \"question_type\": \"mcq\",\n \"automarkable\": 1,\n \"score\": 7,\n \"max_score\": 10,\n \"question_reference\": \"c0a3f0c8-eac7-4795-8c7a-adf98e336a7b\",\n \"item_reference\": \"Adaptive_Item2_extract_USMOs\",\n \"sequenceNumber\": 1\n }\n ]\n },\n \"group\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"class-01\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/lis/CourseOffering\",\n \"name\": null,\n \"description\": null,\n \"extensions\": {\n \"pageNumber\": null,\n \"courseCode\": \"course-01\",\n \"calmsCourseId\": \"calms-course-01\",\n \"lessonId\": \"lesson-01\",\n \"platform\": \"D2L\",\n \"classroomTypeId\": \"3500.0\",\n \"activityId\": \"10\",\n \"gradeLevel\": \"8\",\n \"CourseOfferingId\": \"1200.0\",\n \"adaptivewrapperId\": \"\",\n \"schoolYear\": \"2015-20116\",\n \"unitId\": \"3201.0\",\n \"moduleId\": \"1110.0\",\n \"courseId\": \"2550.0\",\n \"assessmentId\": \"4520.0\",\n \"originSystemId\": \"sams\",\n \"businessLineId\": \"1300.0\",\n \"contextId\": \"587279312bf9a9afd947ddab\"\n },\n \"dateCreated\": null,\n \"dateModified\": null,\n \"courseNumber\": null,\n \"academicSession\": null,\n \"subOrganizationOf\": {\n \"@context\": \"http://purl.imsglobal.org/ctx/caliper/v1/Context\",\n \"@id\": \"1001.0\",\n \"@type\": \"http://purl.imsglobal.org/caliper/v1/w3c/Organization\",\n \"name\": null,\n \"description\": null,\n \"extensions\": {},\n \"dateCreated\": null,\n \"dateModified\": null,\n \"subOrganizationOf\": null\n }\n },\n \"eventTime\": \"2017-01-09T14:21:00Z\"\n }\n }\n ]''')\n assert response.status == '200 OK'\n assert len(response.json) == 1\n #assert response.json[0][\"error\"][\"code\"] == 21\n #assert \"role\" in response.json[0][\"error\"][\"message\"]", "def extract_trpt_data(udp_packet):\n logger.debug('UDP packet sport [%s], dport [%s], len [%s]',\n udp_packet.sport, udp_packet.dport, udp_packet.len)\n\n trpt_pkt = TelemetryReport(_pkt=udp_packet.payload)\n trpt_eth = EthInt(trpt_pkt.payload)\n logger.debug('TRPT ethernet dst - [%s], src - [%s], type - [%s]',\n trpt_eth.dst, trpt_eth.src, trpt_eth.type)\n return extract_int_data(trpt_eth)", "def extract(soup):\r\n table = soup.find('div', id='dnn_ctr11396_TimeTableView_PlaceHolder').find('table')\r\n rows = table.findChildren('tr', recursive=False)\r\n return [[col.findAll('div', {'class': 'TTLesson'}) for col in row.findChildren('td', recursive=False)[1:]]\r\n for row in rows[1:]]", "def read_triplets(seed_candidates):\n if \"pickle\" in seed_candidates:\n if \"*\" in seed_candidates:\n all_files = glob.glob(seed_candidates)\n new_data = []\n for file_name in all_files:\n with open(file_name, 'rb') as f:\n data = pickle.load(f)\n for dd in data:\n new_data.append((dd[0], dd[1], dd[2], dd[3]))\n df_seed = pd.DataFrame(new_data, columns=['evtid', 'h1', 'h2', 'h3'], dtype=np.int64)\n else:\n with open(seed_candidates, 'rb') as f:\n data = pickle.load(f)\n new_data = []\n for dd in data:\n new_data.append((dd[0], dd[1], dd[2], dd[3]))\n # idx = int(dd[0][10:])\n # new_data.append((idx, dd[1], dd[2], dd[3]))\n df_seed = pd.DataFrame(new_data, columns=['evtid', 'h1', 'h2', 'h3'], dtype=np.int64)\n else:\n column_names = ['evtid', 'h1', 'h2', 'h3']\n if \"*\" in seed_candidates:\n all_files = glob.glob(seed_candidates)\n new_data = []\n for file_name in all_files:\n df_seed_tmp = pd.read_csv(file_name, header=None, names=column_names,)\n new_data.append(df_seed_tmp)\n df_seed = pd.concat(new_data)\n else:\n df_seed = pd.read_csv(seed_candidates, header=None,\n names=column_names)\n return df_seed", "def get_teacher_career_results(self, teacher, career):\n data = []\n\n # Get the active exams of the career.\n exams = EvaluationsExam.objects.filter(\n type__exact=career.type, status=\"ACTIVE\")\n\n # Get the results for each exam.\n for exam in exams:\n\n # Get the signatures of the teacher for the career in the exam.\n signatures_dtl = EvaluationsTeacherSignature.objects.filter(\n fk_teacher__exact=teacher.id, fk_period__exact=exam.fk_period, status=\"ACTIVE\").select_related('fk_signature')\n\n signatures_results = []\n for signature_dtl in signatures_dtl:\n \n # If it raise an exception, it means that the signature isn't evaluated yet or other error.\n try:\n # Get the results of the signature.\n signature_results = EvaluationsSignatureResult.objects.get(\n group=signature_dtl.group,\n fk_signature=signature_dtl.fk_signature.id,\n fk_exam=exam.id,\n status=\"ACTIVE\"\n )\n\n # Get the results for each question in the exam for the signature.\n questions_results = EvaluationsSignatureQuestionResult.objects.filter(\n group=signature_dtl.group,\n fk_signature=signature_dtl.fk_signature.id,\n fk_exam=exam.id,\n fk_question__optional='NO',\n status=\"ACTIVE\"\n ).values_list('fk_question__description', 'result')\n\n # Get the comments of the signature/group.\n comments_result = EvaluationsSignatureQuestionResult.objects.get(\n group=signature_dtl.group,\n fk_signature=signature_dtl.fk_signature.id,\n fk_exam=exam.id,\n fk_question__optional='YES',\n status=\"ACTIVE\"\n ).result\n\n # Split the comments and add them to a list, only the ones that are not empty.\n comments = list(filter(None, comments_result.split('|')))\n\n # Crate a dictionary with the results of the signature and the questions.\n signatures_results.append({\n 'teacher': teacher.name + ' ' + teacher.last_name + ' ' + teacher.last_name_2,\n 'signature': signature_dtl.fk_signature.description,\n 'group': signature_dtl.group,\n 'average': signature_results.average,\n 'comments': comments,\n 'total_evaluated': signature_results.total_evaluated,\n 'questions': questions_results\n })\n except Exception:\n pass\n\n # Add the results to the exam dictionary.\n exam_results = {\n 'exam': exam.description,\n 'career': career.description,\n 'signatures_results': signatures_results,\n 'period': exam.fk_period.period\n }\n\n # Add the exam results to the list that will be returned at the end.\n data.append(exam_results)\n\n return data", "def extractDDE(self, lang, username, screenname, description, tweets):\n if isinstance(tweets, list):\n tweets = ' '.join(tweets)\n form = {\n 'lang': lang,\n 'username': username,\n 'screenname': screenname,\n 'description': description,\n 'tweet': tweets\n }\n return self.POST('extract', {}, form)", "def get_quiz_teacher():\n quiz_data = query_db(\n \"SELECT id, name FROM quizzes WHERE creator_id=?;\", [flask.session[\"id\"]]\n )\n quizzes = []\n for quiz in quiz_data:\n quiz_dict = {}\n quiz_dict[\"id\"] = quiz[0]\n quiz_dict[\"name\"] = quiz[1]\n quizzes.append(quiz_dict)\n return quizzes", "def decode(self, session, dev_example):\n unzipped_dev_example = list(zip(*dev_example))\n input_feed = self.create_feed_dict(unzipped_dev_example[0:4], dropout = 1)\n output_feed = [self.h_s, self.h_e, self.relevence]\n outputs = session.run(output_feed, input_feed)\n h_s = outputs[0]\n h_e = outputs[1]\n rel = outputs[2]\n return h_s, h_e, rel", "def extract_unpack(self, args):\n return self.extract_features(args)", "def load_tamper(self, dataset_dir, subset):\n # Add classes. We have one class.\n # Naming the dataset nucleus, and the class nucleus\n self.add_class(\"tampers\", 1, \"tampers\")\n\n # Which subset?\n # \"val\": use hard-coded list above\n # \"train\": use data from stage1_train minus the hard-coded list above\n # else: use the data from the specified sub-directory\n # assert subset in [\"train\", \"val\", \"stage1_train\", \"stage1_test\", \"stage2_test\"]\n # subset_dir = \"stage1_train\" if subset in [\"train\", \"val\"] else subset\n dataset_dir = os.path.join(dataset_dir, subset, 'images')\n if subset == \"val\" or subset == \"test\":\n image_ids = next(os.walk(dataset_dir))[2]\n else:\n # Get image ids from directory names\n image_ids = next(os.walk(dataset_dir))[2]\n \n\n # dircopy_move = '/data/twj/copy-move/data_zoo/dataset/images/train'\n # image_ids_copy_move = next(os.walk(os.path.join(dircopy_move, 'images')))[2]\n\n # dirnew_splicing = '/data/tamper'\n # image_ids_new_splicing = next(os.walk(os.path.join(dirnew_splicing, 'images')))[2]\n\n # dircopy_move = '/home/as/deeplab/wpmrcnn/ca2new/test'\n # image_ids_copy_move = next(os.walk(os.path.join(dircopy_move, 'images')))[2]\n\n \n # dircopy_move = '/data/gy/ca2att/train3'\n # image_ids_copy_move = next(os.walk(os.path.join(dircopy_move, 'images')))[2]\n\n # # # dirtxt_sp = '/data/gy/tamperpre/train'\n # # # image_ids_txt_sp = next(os.walk(os.path.join(dirtxt_sp, 'images')))[2]\n\n # dirnew_sp = '/data/gy/c2newsp/train'\n # image_ids_new_sp = next(os.walk(os.path.join(dirnew_sp, 'images')))[2]\n\n # Add images\n for image_id in image_ids:\n self.add_image(\n \"tampers\",\n image_id=image_id[:-4],\n path=os.path.join(dataset_dir, image_id))\n\n # for image_id in image_ids_copy_move:\n # self.add_image(\n # \"tampers\",\n # image_id=image_id[:-4],\n # path=os.path.join(dircopy_move, 'images', image_id))\n\n # for image_id in image_ids_new_splicing:\n # self.add_image(\n # \"tampers\",\n # image_id=image_id[:-4],\n # path=os.path.join(dirnew_splicing, 'images', image_id))\n\n # # for image_id in image_ids_txt_sp:\n # # self.add_image(\n # # \"tampers\",\n # # image_id=image_id[:-4],\n # # path=os.path.join(dirtxt_sp, 'images', image_id))\n\n # for image_id in image_ids_new_sp:\n # self.add_image(\n # \"tampers\",\n # image_id=image_id[:-4],\n # path=os.path.join(dirnew_sp, 'images', image_id))", "def inference(self):\r\n\t\tfor partition, loader in self.loaders.items():\r\n\t\t\tavg_loss, (y, y_hat), post, attentions, tags = self.eval_loader(\r\n\t\t\t\tloader)\r\n\t\t\tself.preds[partition] = {\r\n\t\t\t\t'tag': tags,\r\n\t\t\t\t'y': y,\r\n\t\t\t\t'y_hat': y_hat,\r\n\t\t\t\t# 'posteriors': post,\r\n\t\t\t\t# 'attentions': attentions\r\n\t\t\t}", "def deepscore(teacher):\n if teacher == None:\n print(\"Not a valid teacher\")\n return\n if teacher.get('rmpdata') == None: snc.rateThisProfessor(teacher,instructors)\n print(\"# \" + teacher['name'])\n scoreTeacherlegacy(teacher)\n scoreTeacherues(teacher)\n scoreTeacherrmp(teacher)", "def partition_dataset(data, labels, nb_teachers, teacher_id):\n\n # Sanity check\n assert (int(teacher_id) < int(nb_teachers))\n\n # This will floor the possible number of batches\n batch_len = int(len(data) / nb_teachers)\n\n # Compute start, end indices of partition\n start = teacher_id * batch_len\n end = (teacher_id + 1) * batch_len\n\n # Slice partition off\n partition_data = data[start:end]\n if labels is not None:\n partition_labels = labels[start:end]\n else:\n partition_labels = None\n\n return partition_data, partition_labels", "def get_train(self, even=None):\n\n #self.images, self.labels, self.traces = trace_data.get_my_teacher()\n _, self.images, self.labels, self.traces, _ = trace_data.get_my_teacher()\n #print(self.labels)\n self.length = len(self.images)\n self.create_teacher()", "def legacydatapoints(teacher):\n points = 0\n for surveytype in ['2','1','H']:\n for criteria in range(0,22):\n for degree in range(0,6):\n try:\n points += teacher['data'][surveytype][criteria]['degree' + str(degree)]\n except:\n pass\n for criteria in range(22,27):\n for degree in range(0,7):\n try:\n points += teacher['data'][surveytype][criteria]['degree' + str(degree)]\n except:\n pass\n return points", "def get_teacher(teacher_account_id):\n query = 'SELECT * FROM teacher JOIN person ON teacher.teacher_account_id=person.account_id WHERE teacher.teacher_account_id=%s;'\n args = (teacher_account_id,)\n return database.connection.get_data(query, args)", "def subjects_parser(root, study, outdir):\n # Welcome\n print(\"Starting subjects parsing...\")\n\n # Parameters\n subjects = defaultdict(\n lambda: defaultdict(dict))\n participants_file = os.path.join(root, \"participants.tsv\")\n\n # Parse the paticipants tsv file\n with open(participants_file) as csvfile:\n reader = csv.DictReader(csvfile, delimiter=\"\\t\")\n for row in reader:\n subject = row[\"participant_id\"].replace(\"sub-\", \"\")\n center = row.get(\"site\", DEFAULT_CENTER)\n subjects[center][subject] = {\n \"identifier\": md5_sum(subject),\n \"code_in_study\": subject,\n \"handedness\": row.get(\"handedness\", \"unknown\"),\n \"gender\": row.get(\"gender\", \"unknown\")}\n\n # Save the results\n print(\"Saving data in '{0}'...\".format(outdir))\n save_parsing(subjects, outdir, study, \"subjects\")\n\n # Goodbye\n print(\"Done.\")\n\n return subjects", "def extract_testsuite(self, testsuite, info):\n\n for testcase in testsuite.getchildren():\n\n self.extract_testcase(testcase, info, testsuite.get(\"name\"))", "def parse_pitcher(self, response):\n\n params = parse_qs(\n urlparse(response.url).query,\n keep_blank_values=True)\n\n data = {}\n data['year'] = params['year'][0]\n data['month'] = params['month'][0]\n data['day'] = params['day'][0]\n data['pitcher_id'] = params['pitchSel'][0]\n data['game_id'] = params['game'][0]\n\n # Find the raw html table link.\n xpath = '//a[text()=\"Get Expanded Tabled Data\"]'\n\n # Generate the url to open the raw html table page.\n sel = response.xpath(xpath)[0]\n\n link = urlparse(sel.xpath('@href').extract()[0])\n params = parse_qs(link.query, keep_blank_values=True)\n\n for key, value in params.items():\n params[key] = value[0]\n # Override the rows/column size just in case.\n params['h_size'] = 3000\n params['v_size'] = 3000\n\n url = self.base_url + link.path + \"?\" + urlencode(params)\n\n req = Request(url, callback=self.parse_pitcher_stats, meta={'data': data})\n\n yield req", "def get_teacher_topic_all():\n topic_data = query_db(\n \"SELECT topics.id, topics.name, classes.name FROM topics JOIN classes \"\n \"ON topics.class_id=classes.id WHERE teacher_id=?;\",\n [flask.session[\"id\"]],\n )\n topics = []\n for topic in topic_data:\n topic_dict_teacher = {}\n topic_dict_teacher[\"id\"] = topic[0]\n topic_dict_teacher[\"name\"] = flask.escape(str(topic[1]))\n topic_dict_teacher[\"class\"] = flask.escape(str(topic[2]))\n topics.append(topic_dict_teacher)\n return topics", "def get_data_loaders():\n dataset_path = \"\"\n dataset_cache = None\n personachat = get_dataset(dataset_path, dataset_cache)\n\n tokenizer_selected = OpenAIGPTTokenizer.from_pretrained('openai-gpt')\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": defaultdict(list), \"valid\": defaultdict(list)}\n personality = []\n history_complete = []\n count_persona = 0\n with open('data_faiss_pegasus_2sentences_finalgenerated.pkl', 'rb') as f:\n persona_selected_list = pickle.load(f)\n for dataset_name, dataset in personachat.items():\n num_candidates = len(dataset[0][\"utterances\"][0][\"candidates\"])\n if num_candidates > 0 and dataset_name == 'train':\n num_candidates = min(1, num_candidates)\n for dialog in dataset:\n persona = dialog[\"persona_info\"].copy()\n #datasets[personality].append(persona)\n count_history = 0\n for utterance in dialog[\"utterances\"]:\n count_history = count_history + 1\n history = utterance[\"history\"][-(2*2+5):]\n \n #history_complete.append(history)\n if len(persona) == 4:\n if len(history) > (len(persona)+3):\n history_chatbot = history[1::2]\n persona_selected = persona_selected_list[count_persona]\n instance = build_input_from_segments_faiss_2(persona, history_chatbot) \n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n count_persona = count_persona + 1\n return datasets", "def test_practitioner_6(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"practitioner-example-f002-pv.json\"\n inst = practitioner.Practitioner.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Practitioner\" == inst.resource_type\n\n impl_practitioner_6(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Practitioner\" == data[\"resourceType\"]\n\n inst2 = practitioner.Practitioner(**data)\n impl_practitioner_6(inst2)", "def teacher(self):\n if \"teacher\" in self._prop_dict:\n if isinstance(self._prop_dict[\"teacher\"], OneDriveObjectBase):\n return self._prop_dict[\"teacher\"]\n else :\n self._prop_dict[\"teacher\"] = EducationTeacher(self._prop_dict[\"teacher\"])\n return self._prop_dict[\"teacher\"]\n\n return None", "def extract(src_dir,feat_file,ivectors_dir,num_gselect):\n os.system(\"./extract_ivectors.sh --num-gselect \"+str(num_gselect)+ \" \" + src_dir + \" \" + feat_file + \" \" + ivectors_dir)\n keys=[]\n ivectors=np.empty((0,0))\n for key,mat in kaldi_io.read_vec_flt_scp(ivectors_dir+'/ivector.scp'):\n if ivectors.shape[1] != mat.shape[0]:\n ivectors=ivectors.reshape((0,mat.shape[0]))\n ivectors=np.vstack((ivectors,mat))\n keys.append(key)\n\n ivectors=np.asarray(ivectors)\n keys=np.asarray(keys)\n return ivectors,keys", "def extract_table_data(path):\n document = zipfile.ZipFile(path)\n xml_content = document.read('word/document.xml')\n document.close()\n tree = XML(xml_content)\n table = tree.find(BODY).find(TABLE)\n rows = table.findall(ROW)\n\n day = \"\"\n dates = \"\"\n teachers = {}\n\n for row in rows[1:]:\n cols = row.findall(COL)\n\n #Code\n code = extract_text(cols[0])\n\n #course\n course = extract_text(cols[1])\n\n #day\n dayTemp = extract_text(cols[2])\n if dayTemp != \"\":\n day = dayTemp\n\n #Calendar/Date\n temp_date = extract_date(cols[3])\n if len(temp_date) != 0:\n dates = temp_date\n\n #hour\n times = extract_time(cols[4])\n\n #Teachers\n teacher = extract_text(cols[5])\n\n if teacher == \"\":\n continue\n\n if teacher not in teachers:\n teachers[teacher] = Teacher(teacher)\n\n segment = Segment(code, course, day, dates, times)\n\n teachers[teacher].add_segment(segment)\n\n return teachers", "def decomposeSeed( self, bSeed ):\n\n\t\ttry:\n\t\t\tbSeed = long( bSeed )\n\t\t\t\n\t\t\t# This is a copy of the functions in product-key/ck-common.c\n\t\t\t# They are rewritten here to save on shelling out with each\n\t\t\t# rda-backend that checks in.\n\t\t\tbSerial = self._getSerial( bSeed )\n\t\t\tsVersion = self._getVersion( bSeed )\n\t\t\tbNumcam = self._getNumcam( bSeed )\n\t\t\tsMac = self._getMac( bSeed )\n\n\t\t\treturn ( bSerial, sVersion, bNumcam, sMac )\n\n\t\texcept Exception, e:\n\t\t\terrMsg( 'error while decomposing key' )\n\t\t\traise", "def extract_speaker_data(self, X, y):\n\n speaker_names = []\n global_idx = 0\n curr_speaker_num = -1\n old_speaker = ''\n\n # Crawl the base and all sub folders\n for root, directories, filenames in os.walk(self.base_folder):\n # Ignore crp and DOC folder\n if self.valid_speakers and os.path.split(root)[1] not in self.valid_speakers:\n continue\n\n # Check files\n for filename in [filename for filename in filenames if self.file_match_regex.search(filename)]:\n # Extract speaker\n speaker = os.path.split(root)[1]\n if speaker != old_speaker:\n curr_speaker_num += 1\n old_speaker = speaker\n speaker_names.append(speaker)\n print('Extraction progress: %d/%d' % (curr_speaker_num + 1, self.max_speakers))\n\n if curr_speaker_num < self.max_speakers:\n full_path = os.path.join(root, filename)\n global_idx += extract_mel_spectrogram(full_path, X, y, global_idx, curr_speaker_num)\n\n return X[0:global_idx], y[0:global_idx], speaker_names", "def get_topics_for_alt(alt_list, pgm_dict):\n for epv in alt_list:\n name = epv.get('package', {}).get('name', [''])[0]\n if name:\n for pgm_pkg_key, pgm_list in pgm_dict.items():\n for pgm_epv in pgm_list:\n if name == pgm_epv.get('package_name', ''):\n epv['package']['pgm_topics'] = pgm_epv.get('topic_list', [])\n\n return alt_list", "def parse_identifying_data(path, passwd, embedded_file='participants.txt'):\n if path is not None:\n zf = zipfile.ZipFile(path)\n zf.setpassword(passwd)\n\n participants = {}\n for l in zf.read(embedded_file).splitlines():\n if l.startswith('#'):\n continue\n\n bc, name = l.strip().split('\\t')[:2]\n participants[bc] = name.replace(\",\", \"\")\n\n print \"Using identified data!\"\n else:\n participants = None\n\n return participants", "def get_teacher():\n\n rows = db.engine.execute(f\"SELECT * FROM teacher_login WHERE loginid = {g.user.loginid}\")\n res = []\n for row in rows:\n res.append(dict(row))\n return jsonify(res)", "def extract_pp(self):\n build([srna.ExtractPPW(fastq_dic=self.fastq_dic, num_cpus=self.num_cpus,\n indexfile=self.hisat_index, workdir=self.workdir,\n kingdom=self.kingdom)],\n local_scheduler=self.local_scheduler)", "def extract(self, data):", "def extract_speaker_data(self, X, y):\r\n\r\n speaker_names = []\r\n global_idx = 0\r\n curr_speaker_num = -1\r\n old_speaker = ''\r\n\r\n # Crawl the base and all sub folders\r\n for root, directories, filenames in os.walk(self.base_folder):\r\n\r\n # Ignore crp and DOC folder\r\n if root[-5:] not in self.valid_speakers:\r\n continue\r\n\r\n # Check files\r\n for filename in filenames:\r\n\r\n # Can't read the other wav files\r\n if '_RIFF.WAV' not in filename:\r\n continue\r\n\r\n # Extract speaker\r\n speaker = root[-5:]\r\n if speaker != old_speaker:\r\n curr_speaker_num += 1\r\n old_speaker = speaker\r\n speaker_names.append(speaker)\r\n print('Extraction progress: %d/%d' % (curr_speaker_num + 1, self.max_speakers))\r\n\r\n if curr_speaker_num < self.max_speakers:\r\n full_path = os.path.join(root, filename)\r\n global_idx += extract_mel_spectrogram(full_path, X, y, global_idx, curr_speaker_num)\r\n\r\n return X[0:global_idx], y[0:global_idx], speaker_names", "def extract(self):\n pass", "def get_test_loader(id_list = './data/sample_submission.csv', root_dir = './data/test/'):\n data = HumanProteinDataset(id_list, root_dir, transform = transforms.Compose([\n Rescale((256, 256)), \n ToTensor()\n ]))\n\n indices = np.arange(len(data))\n dataloader_test = DataLoader(data, batch_size=10, num_workers=5)\n\n return dataloader_test", "def unpack(self) -> Tuple[list, list, list, list, float, int, list]:\n # (nice to have) todo:refactor --> as a namedtuple\n unpacked_super = super().unpack()\n\n observations, actions, rewards, Q_values, trajectory_return, _trajectory_lenght = unpacked_super\n\n return observations, actions, rewards, Q_values, trajectory_return, _trajectory_lenght, self.V_estimates", "def _init_extractors(self):\n @self.extractors_wrapper(\"networkx\")\n def get_nx_extractor(graph):\n \"\"\"\n :param graph: networkx.Graph\n :returns: projx.nx_extractor\n \"\"\"\n return nx_xtrct.nx_extractor(\n self.extractor_json[self.extractor_name], graph\n )\n\n @self.extractors_wrapper(\"neo4j\")\n def get_neo4j_extractor(graph):\n \"\"\"\n :returns: projx.nx_extractor\n \"\"\"\n return neo4j_xtrct.neo4j_extractor(\n self.extractor_json[self.extractor_name], graph\n )\n\n @self.extractors_wrapper(\"edgelist\")\n def get_edgelist_extractor(graph):\n \"\"\"\n :returns: projx.nx_extractor\n \"\"\"\n return edgelist_xtrct.edgelist_extractor(\n self.extractor_json[self.extractor_name], graph\n )", "def setup_experiment(testruns, droplist=\"\"):\n ex = Experiment()\n ex.addSoluFile(ALL_SOLU)\n\n regexlist = []\n for x in droplist.split(\",\"):\n # defaultvalue, if empty we don't want to exclude everything\n if x == \"\":\n continue\n try:\n y = re.compile(x)\n regexlist.append(y)\n except:\n pass\n\n excluded_inst = []\n # get data\n for t in testruns:\n # update representation\n additional_data = {\"RubberbandId\": get_rbid_representation(t, \"extended\")}\n\n # collect data and pass to ipet\n ipettestrun = TestRun()\n tr_raw_data = t.get_data(add_data=additional_data)\n\n tr_data = {}\n for i in tr_raw_data.keys():\n for r in regexlist:\n if r.match(i):\n excluded_inst.append(i)\n break\n else:\n tr_data[i] = tr_raw_data[i]\n\n ipettestrun.data = pd.DataFrame(tr_data).T\n\n ex.testruns.append(ipettestrun)\n return ex, excluded_inst", "def extract_all_puzzles(self) -> List[Puzzle]:\n pages_urls = self.extract_pages()\n print(f\"\\n{pages_urls}\\n\")\n\n # Extract all puzzles\n puzzles = []\n for path in pages_urls:\n full_path = f\"{DOMAIN}{path}\"\n extracted_puzzle = self.extract_puzzle(full_path)\n puzzles.append(extracted_puzzle)\n # print(extracted_puzzle.title, extracted_puzzle.url, extracted_puzzle.description, extracted_puzzle.clues)\n print(extracted_puzzle.title)\n return puzzles", "def unpackRecords(self,unpackTypes):\n for record in self.records:\n if record.name in unpackTypes:\n record.load(unpack=True)", "def _forecasters(self):\n return self._get_estimator_tuples(self.forecasters, clone_ests=False)", "def generate_teacher_family(p_adults, p_children):\n\tN_adults = np.random.choice(list(p_adults.keys()), p=list(p_adults.values()))\n\tN_children = np.random.choice(list(p_children[N_adults].keys()),\n\t\t\t\t\t\t\t\tp=list(p_children[N_adults].values()))\n\t\n\tages = np.random.randint(0, 18, N_children)\n\treturn ages, N_adults", "def getTelemarketers():\n callers = getAllKnownCallers()\n callees = getAllKnownCallees()\n texters = getAllKnownTexters()\n textees = getAllKnownTextees()\n telemarketers = getKnownTelemarketers()\n\n # Remove known telemarketers from list\n t = set(callers).difference(telemarketers)\n # Remove known numbers that have received calls\n t.difference_update(callees)\n # Remove those who have sent a text\n t.difference_update(texters)\n # Remove those who have received a text\n t.difference_update(textees)\n # Join with known telemarketers\n t = set(telemarketers).union(t)\n\n return sorted(t)", "def get_data_loaders_2sentences():\n dataset_path = \"\"\n dataset_cache = None\n personachat = get_dataset(dataset_path, dataset_cache)\n\n tokenizer_selected = OpenAIGPTTokenizer.from_pretrained('openai-gpt')\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": defaultdict(list), \"valid\": defaultdict(list)}\n personality = []\n history_complete = []\n count_persona = 0\n with open('data_faiss_pegasus_2generated.pkl', 'rb') as f:\n persona_selected_list = pickle.load(f)\n for dataset_name, dataset in personachat.items():\n num_candidates = len(dataset[0][\"utterances\"][0][\"candidates\"])\n if num_candidates > 0 and dataset_name == 'train':\n num_candidates = min(1, num_candidates)\n for dialog in dataset:\n persona = dialog[\"persona_info\"].copy()\n #datasets[personality].append(persona)\n count_history = 0\n for utterance in dialog[\"utterances\"]:\n count_history = count_history + 1\n history = utterance[\"history\"][-(2*2+1):]\n #history_complete.append(history)\n if len(history) > 4:\n history_chatbot = history[1::2]\n\n persona_selected = persona_selected_list[count_persona]\n instance = build_input_from_segments_faiss_2(persona_selected, history_chatbot) \n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n count_persona = count_persona + 1\n return datasets", "def test_practitioner_4(base_settings):\n filename = (\n base_settings[\"unittest_data_dir\"] / \"practitioner-example-xcda-author.json\"\n )\n inst = practitioner.Practitioner.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Practitioner\" == inst.resource_type\n\n impl_practitioner_4(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Practitioner\" == data[\"resourceType\"]\n\n inst2 = practitioner.Practitioner(**data)\n impl_practitioner_4(inst2)", "def get_data_loaders_4sentence():\n dataset_path = \"\"\n dataset_cache = None\n personachat = get_dataset(dataset_path, dataset_cache)\n\n tokenizer_selected = OpenAIGPTTokenizer.from_pretrained('openai-gpt')\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": defaultdict(list), \"valid\": defaultdict(list)}\n personality = []\n history_complete = []\n count_persona = 0\n with open('data_faiss_pegasus_1generated.pkl', 'rb') as f:\n persona_selected_list = pickle.load(f)\n for dataset_name, dataset in personachat.items():\n num_candidates = len(dataset[0][\"utterances\"][0][\"candidates\"])\n if num_candidates > 0 and dataset_name == 'train':\n num_candidates = min(1, num_candidates)\n for dialog in dataset:\n persona = dialog[\"persona_info\"].copy()\n #datasets[personality].append(persona)\n count_history = 0\n for utterance in dialog[\"utterances\"]:\n count_history = count_history + 1\n history = utterance[\"history\"]\n #history_complete.append(history)\n if len(history_splitted) > (len(persona)-1):\n history_chatbot = history[1::2]\n persona_selected = persona_selected_list[count_persona]\n instance = build_input_from_segments_faiss(persona_selected, history_chatbot) \n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n count_persona = count_persona + 1\n return datasets", "def load_premiership_teams():\n # list of PremTeams to add\n team_list = [\n {'name': 'Arsenal', 'code': 'ARS', 'is_prem': True},\n {'name': 'Aston Villa', 'code': 'AVL', 'is_prem': True},\n {'name': 'Brighton and Hove Albion', 'code': 'BTN', 'is_prem': True},\n {'name': 'Brentford', 'code': 'BRE', 'is_prem': True},\n {'name': 'Bournemouth', 'code': 'BOU', 'is_prem': False},\n {'name': 'Burnley', 'code': 'BUR', 'is_prem': True},\n {'name': 'Cardiff City', 'code': 'CAR', 'is_prem': False},\n {'name': 'Chelsea', 'code': 'CHE', 'is_prem': True},\n {'name': 'Crystal Palace', 'code': 'CRY', 'is_prem': True},\n {'name': 'Everton', 'code': 'EVE', 'is_prem': True},\n {'name': 'Fulham', 'code': 'FUL', 'is_prem': False},\n {'name': 'Hull', 'code': 'HUL', 'is_prem': False},\n {'name': 'Huddersfield Town', 'code': 'HUD', 'is_prem': False},\n {'name': 'Leeds United', 'code': 'LEE', 'is_prem': True},\n {'name': 'Leicester City', 'code': 'LEI', 'is_prem': True},\n {'name': 'Liverpool', 'code': 'LIV', 'is_prem': True},\n {'name': 'Manchester City', 'code': 'MCY', 'is_prem': True},\n {'name': 'Manchester United', 'code': 'MUN', 'is_prem': True},\n {'name': 'Middlesbrough', 'code': 'MID', 'is_prem': False},\n {'name': 'Newcastle United', 'code': 'NEW', 'is_prem': True},\n {'name': 'Norwich City', 'code': 'NOR', 'is_prem': True},\n {'name': 'Queens Park Rangers', 'code': 'QPR', 'is_prem': False},\n {'name': 'Sheffield United', 'code': 'SHF', 'is_prem': False},\n {'name': 'Southampton', 'code': 'SOT', 'is_prem': True},\n {'name': 'Stoke City', 'code': 'STO', 'is_prem': False},\n {'name': 'Sunderland', 'code': 'SUN', 'is_prem': False},\n {'name': 'Swansea City', 'code': 'SWA', 'is_prem': False},\n {'name': 'Tottenham Hotspur', 'code': 'TOT', 'is_prem': True},\n {'name': 'Watford', 'code': 'WAT', 'is_prem': True},\n {'name': 'West Bromwich Albion', 'code': 'WBA', 'is_prem': False},\n {'name': 'West Ham United', 'code': 'WHM', 'is_prem': True},\n {'name': 'Wolverhampton Wanderers', 'code': 'WLV', 'is_prem': True},\n ]\n\n for team in team_list:\n print(PremTeam.objects.update_or_create(\n name=team['name'],\n code=team['code'],\n defaults={'is_prem': team['is_prem']}\n ))\n # print(pt, created)", "def get_teacher(self) -> str :\n return self.teacher", "def posters(self, imdb_id):\n return self.get_art(imdb_id, \"posters\")", "def generate_teachers(G, school_type, N_classes, family_member_counter, \n\t\t\t\t\t family_counter, teacher_p_adults, teacher_p_children):\n\tN_teachers = get_N_teachers(school_type, N_classes)\n\tteacher_nodes = ['t{:04d}'.format(i) for i in range(1, N_teachers + 1)]\n\tG.add_nodes_from(teacher_nodes)\n\t\n\tfor t in teacher_nodes:\n\t\tfamily_nodes = [t]\n\t\t# draw a random number of children and adults for the family\n\t\tages, N_adults = generate_teacher_family(teacher_p_adults, teacher_p_children)\n\t\t\n\t\tages = list(ages)\n\t\tfor adult in range(N_adults - 1):\n\t\t\tages.append(20.5) # default age for adults\n\t\t\n\t\t# add the family member nodes and their attributes to the graph\n\t\tfor age in ages:\n\t\t\tfamily_member_ID = 'f{:04d}'.format(family_member_counter)\n\t\t\tfamily_nodes.append(family_member_ID)\n\t\t\tG.add_node(family_member_ID)\n\t\t\tfamily_member_counter += 1\n\t\t\tnx.set_node_attributes(G, \\\n\t\t\t\t\t\t{family_member_ID:{'type':'family_member',\n\t\t\t\t\t\t\t\t\t\t 'age':age,\n\t\t\t\t\t\t\t\t\t\t 'family':family_counter,\n\t\t\t\t\t\t\t\t\t\t 'unit':'family'}})\n\t\t\t\t\t\n\t\t# finally, also set the teacher's node attributes\n\t\tnx.set_node_attributes(G, \\\n\t\t\t\t\t{t:{'type':'teacher', \n\t\t\t\t\t\t# Note: 20.5 is the age at which\n\t\t\t\t\t\t# the symptom and transmission risk\n\t\t\t\t\t\t# is that of an adult\n\t\t\t\t\t\t'age':20.5,\n\t\t\t\t\t\t'unit':'faculty_room',\n\t\t\t\t\t\t'family':family_counter}})\n\t\tfamily_counter += 1", "def get_participants_data(self):\n participants = []\n for (email, uid) in self.tokens.items():\n participant = {} \n participant['uid'] = uid\n participant['email'] = email\n response = 0\n questions = 0\n sections = [x for x in self.values() if ISurveySection.providedBy(x)]\n for section in sections:\n response += len(section.responses.get(uid, {}))\n questions += len(section.question_ids)\n if response != 0:\n participant['finished'] = Decimal(response) / Decimal(questions) * 100\n else:\n participant['finished'] = 0 \n participants.append(participant)\n return participants", "def unpack(self):\n return []", "def prepare_student_data(dataset, nb_teachers, save=False):\n assert input.create_dir_if_needed(FLAGS.train_dir)\n\n # Load the dataset\n if dataset == 'svhn':\n test_data, test_labels = input.ld_svhn(test_only=True)\n elif dataset == 'cifar10':\n test_data, test_labels = input.ld_cifar10(test_only=True)\n elif dataset == 'mnist':\n test_data, test_labels = input.ld_mnist(test_only=True)\n else:\n print(\"Check value of dataset flag\")\n return False\n\n # Make sure there is data leftover to be used as a test set\n assert FLAGS.stdnt_share < len(test_data)\n\n # Prepare [unlabeled] student training data (subset of test set)\n stdnt_data = test_data[:FLAGS.stdnt_share]\n\n # Compute teacher predictions for student training data\n teachers_preds = ensemble_preds(dataset, nb_teachers, stdnt_data)\n\n # Aggregate teacher predictions to get student training labels\n if not save:\n stdnt_labels = aggregation.noisy_max(teachers_preds, FLAGS.lap_scale)\n else:\n # Request clean votes and clean labels as well\n stdnt_labels, clean_votes, labels_for_dump = aggregation.noisy_max(teachers_preds, FLAGS.lap_scale, return_clean_votes=True) #NOLINT(long-line)\n\n # Prepare filepath for numpy dump of clean votes\n filepath = FLAGS.data_dir + \"/\" + str(dataset) + '_' + str(nb_teachers) + '_student_clean_votes_lap_' + str(FLAGS.lap_scale) + '.npy' # NOLINT(long-line)\n\n # Prepare filepath for numpy dump of clean labels\n filepath_labels = FLAGS.data_dir + \"/\" + str(dataset) + '_' + str(nb_teachers) + '_teachers_labels_lap_' + str(FLAGS.lap_scale) + '.npy' # NOLINT(long-line)\n\n # Dump clean_votes array\n with tf.gfile.Open(filepath, mode='w') as file_obj:\n np.save(file_obj, clean_votes)\n\n # Dump labels_for_dump array\n with tf.gfile.Open(filepath_labels, mode='w') as file_obj:\n np.save(file_obj, labels_for_dump)\n\n # Print accuracy of aggregated labels\n ac_ag_labels = metrics.accuracy(stdnt_labels, test_labels[:FLAGS.stdnt_share])\n print(\"Accuracy of the aggregated labels: \" + str(ac_ag_labels))\n\n # Store unused part of test set for use as a test set after student training\n stdnt_test_data = test_data[FLAGS.stdnt_share:]\n stdnt_test_labels = test_labels[FLAGS.stdnt_share:]\n\n if save:\n # Prepare filepath for numpy dump of labels produced by noisy aggregation\n filepath = FLAGS.data_dir + \"/\" + str(dataset) + '_' + str(nb_teachers) + '_student_labels_lap_' + str(FLAGS.lap_scale) + '.npy' #NOLINT(long-line)\n\n # Dump student noisy labels array\n with tf.gfile.Open(filepath, mode='w') as file_obj:\n np.save(file_obj, stdnt_labels)\n\n return stdnt_data, stdnt_labels, stdnt_test_data, stdnt_test_labels", "def matlab_csv_to_teacher_data(dirname):\n samples = np.genfromtxt(os.path.join(dirname, 'samples.csv'), dtype=float,\n delimiter=\",\")\n labels = np.genfromtxt(os.path.join(dirname, 'labels.csv'), dtype=int,\n delimiter=\",\")\n data = [None]*max(labels) # matlab is 1-indexed, so no need to add 1\n for i, z in enumerate(labels):\n if data[z-1] is None:\n data[z-1] = np.copy(samples[i, :])\n else:\n data[z-1] = np.vstack((data[z-1], np.copy(samples[i, :])))\n return data", "def ParsePassport(arr):\n\t# initialize new array for values to be copied to\n\tparsed_arr = []\n\tfor p in arr:\n\t\tparsed_arr.append(p[4:])\n\treturn parsed_arr", "def applyDemapping(self):\n pass", "def extract(self):\n self.build_path_pairs()\n self.extract_field_blocks()\n self.assert_filenames()", "def extract_thread(self):\n \"TODO: check if hotel seletecd\"\n hotel_name = SelectableLabel.selected_hotel\n hlink = CityListView.hotels_data[hotel_name]\n self.extract_list, self.hotel_address = scrapper.scrap(\"http://www.booking.com/\"\n + scrapper.make_hotel_review_url(hlink))", "def test_gathering_page_pdf_urls(\n lep_dl: LepDL,\n) -> None:\n json_test = \"\"\"\\\n [\n {\n \"episode\": 555,\n \"files\": {\n \"audios\": [],\n \"page_pdf\": [\"https://someurl555.local\"]\n },\n \"index\": 2022011303\n },\n {\n \"episode\": 554,\n \"files\": {\n \"audios\": [],\n \"page_pdf\": [\"https://someurl554.local1\", \"https://someurl554.local2\"]\n },\n \"index\": 2022011302\n },\n {\n \"episode\": 553,\n \"files\": {\n \"audios\": [],\n \"page_pdf\": [\n \"https://someurl553.local1\",\n \"https://someurl553.local2\",\n \"https://someurl553.local3\"\n ]\n },\n \"index\": 2022011302\n }\n ]\n \"\"\" # noqa: E501,B950\n db_episodes = Lep.extract_only_valid_episodes(json_test)\n lep_dl.files = downloader.gather_all_files(db_episodes)\n\n assert len(lep_dl.files) == 3\n\n assert lep_dl.files[0].primary_url == \"https://someurl553.local1\"\n assert lep_dl.files[0].secondary_url == \"https://someurl553.local2\"\n assert lep_dl.files[0].tertiary_url == \"https://someurl553.local3\"\n\n assert lep_dl.files[1].primary_url == \"https://someurl554.local1\"\n assert lep_dl.files[1].secondary_url == \"https://someurl554.local2\"\n\n assert lep_dl.files[2].primary_url == \"https://someurl555.local\"\n assert lep_dl.files[2].secondary_url == \"\"", "def decompose_sample(lfads_hps, z):\n ib_dim = lfads_hps['ib_dim']\n ic_dim = lfads_hps['enc_dim']\n ib_k = z[:ib_dim]\n ic_j = z[ib_dim:(ib_dim+ic_dim)]\n ii_ti = z[(ib_dim+ic_dim):]\n ii_txi = np.reshape(ii_ti, (-1, lfads_hps['ii_dim']))\n return ib_k, ic_j, ii_txi", "def extract_trials(self):\n self.logdir = os.path.expanduser(os.path.expandvars(self.logdir))\n logdir_content = os.listdir(self.logdir)\n # For the standard hypo_testing analysis, this logdir_content\n # will contain what we need it to and so we can proceed to\n # extract the trials.\n if self.test_type == 'analysis':\n if 'config_summary.json' in logdir_content:\n self.organise_trials(logdir_content=logdir_content)\n else:\n raise ValueError(\n 'config_summary.json cannot be found in the specified '\n 'logdir. It should have been created as part of the '\n 'output of hypo_testing.py and so this postprocessing '\n 'cannot be performed.'\n )\n elif self.test_type == 'injparamscan':\n pickle_there = self.check_pickle_files(logdir_content)\n if pickle_there:\n self.load_from_pickle()\n else:\n toy_names = []\n scan_variables = []\n for folder in logdir_content:\n if '.pckl' not in folder and 'Plots' not in folder:\n bits = folder.split('toy')[1].split('_')\n toy_name = bits[1]\n toy_names.append(\n toy_name\n )\n scan_variable = None\n add_bit = True\n for bit in bits:\n try:\n float(bit)\n add_bit = False\n except:\n if not (bit == '') and not (bit == toy_name):\n if add_bit:\n if scan_variable is None:\n scan_variable = bit\n else:\n scan_variable += '_%s'%bit\n scan_variables.append(scan_variable)\n toy_names = np.array(toy_names)\n scan_variables = np.array(scan_variables)\n # Require all to be the same injected truth model\n if not np.alltrue(toy_names == toy_names[0]):\n raise ValueError(\n 'Not all output is for the same injected truth '\n 'hypothesis. Got %s'%set(toy_names)\n )\n # Require all to be scanning the same variable\n if not np.alltrue(scan_variables == scan_variables[0]):\n raise ValueError(\n 'Not all output is for the same scanned parameter. '\n 'Got %s'%set(scan_variables)\n )\n self.labels = {}\n self.all_params = {}\n self.data_sets = {}\n self.minimiser_info = {}\n for scandir in logdir_content:\n if '.pckl' not in scandir and 'Plots' not in scandir:\n self.scandir = os.path.join(self.logdir, scandir)\n scandir_content = os.listdir(self.scandir)\n if 'config_summary.json' in scandir_content:\n self.extract_scans()\n else:\n raise ValueError(\n 'config_summary.json cannot be found in the '\n 'specified scandir, %s. It should have been '\n 'created as part of the output of '\n 'hypo_testing.py and so this postprocessing '\n 'cannot be performed.'%self.scandir\n )\n # Pickle at the end so all of the scans are in the output\n self.pickle_data()\n self.organise_scans()\n elif self.test_type == 'systtests':\n pickle_there = self.check_pickle_files(logdir_content)\n if pickle_there:\n self.load_from_pickle()\n else:\n self.labels = {}\n self.all_params = {}\n self.data_sets = {}\n for systdir in logdir_content:\n if '.pckl' not in systdir and 'Plots' not in systdir:\n self.systdir = os.path.join(self.logdir, systdir)\n systdir_content = os.listdir(self.systdir)\n if 'config_summary.json' in systdir_content:\n self.extract_systtests()\n else:\n raise ValueError(\n 'config_summary.json cannot be found in the '\n 'specified directory, %s. It should have been '\n 'created as part of the output of '\n 'hypo_testing.py and so this postprocessing '\n 'cannot be performed.'%self.systdir\n )\n # Pickle at the end so all of the truths/systematics\n # are in the output\n self.pickle_data()", "def extract_features(extractor, loader, device, feature_transform=None):\r\n for batch_idx, data in enumerate(loader):\r\n with torch.no_grad():\r\n data = data.to(device).float()\r\n features = extractor(data)\r\n if feature_transform:\r\n features = feature_transform(features)\r\n yield features", "def test_practitioner_7(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"practitioner-example.json\"\n inst = practitioner.Practitioner.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Practitioner\" == inst.resource_type\n\n impl_practitioner_7(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Practitioner\" == data[\"resourceType\"]\n\n inst2 = practitioner.Practitioner(**data)\n impl_practitioner_7(inst2)", "def getPUsers(self):\n model = self.tvPUsers.get_model()\n result = []\n model.foreach(lambda model, path, iter, data:\n result.append(model.get(iter, 0)[0]), None)\n result.sort()\n return result", "def _unpack_ies(buf):\n\t\t# each IE starts with an ID and a length\n\t\ties = []\n\t\toff = 0\n\t\tbuflen = len(buf)\n\t\t# logger.debug(\"lazy dissecting: %s\" % buf)\n\n\t\twhile off < buflen:\n\t\t\tie_id = buf[off]\n\t\t\ttry:\n\t\t\t\tparser = IEEE80211.ie_decoder[ie_id]\n\t\t\texcept KeyError:\n\t\t\t\t# some unknown tag, use standard format\n\t\t\t\tparser = IEEE80211.IE\n\n\t\t\tdlen = buf[off + 1]\n\t\t\t# logger.debug(\"IE parser is: %d = %s = %s\" % (ie_id, parser, buf[off: off+2+dlen]))\n\t\t\tie = parser(buf[off: off + 2 + dlen])\n\t\t\ties.append(ie)\n\t\t\toff += 2 + dlen\n\n\t\treturn ies", "def test_parse_trflp(self):\r\n\r\n data = \\\r\n \"\"\"\tBin (10bp)\tBin (20bp)\tBin (30bp)\tBin (40 bp)\r\nSamp-le 1\t1000\t2000\t3000\t4000\r\nSample 2\t\t2000\t3000\t4000\r\nSample 3\t\t\t3000\t4000\r\nSample 4\t\t\t\t4000\r\nSample 5\t25\t\t\t\"\"\"\r\n samples, otus, data = parse_trflp(data.split('\\n'))\r\n\r\n samples_exp = [\r\n 'Samp.le.1',\r\n 'Sample.2',\r\n 'Sample.3',\r\n 'Sample.4',\r\n 'Sample.5']\r\n otus_exp = ['Bin__10bp_', 'Bin__20bp_', 'Bin__30bp_', 'Bin__40_bp_']\r\n data_exp = array([[1000, 0, 0, 0, 25],\r\n [2000, 2000, 0, 0, 0],\r\n [3000, 3000, 3000, 0, 0],\r\n [4000, 4000, 4000, 4000, 0]])\r\n\r\n self.assertEqual(samples, samples_exp)\r\n self.assertEqual(otus, otus_exp)\r\n assert_almost_equal(data, data_exp)", "def prepare_data(self,d):\n train_loaders, train_iters = {}, {}\n unlabeled_loaders, unlabeled_iters = {}, {}\n for domain in opt.domains:\n #CONVERT TO FLOAT32\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape((-1,1))\n train = data_utils.TensorDataset(features,target)\n train_loaders[domain] = DataLoader(train, opt.batch_size, shuffle = True)\n train_iters[domain] = iter(train_loaders[domain])\n for domain in opt.unlabeled_domains:\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape(-1,1))\n uset = data_utils.TensorDataset(features,target)\n unlabeled_loaders[domain] = DataLoader(uset,opt.batch_size, shuffle = True)\n unlabeled_iters[domain] = iter(unlabeled_loaders[domain])\n \n return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters", "def challenge23():\n seed = random.randint(1, 2 ** 31)\n orig_mt = MersenneTwister(seed)\n copy_mt = MersenneTwister(0)\n for index, p in enumerate(orig_mt.generate(624)):\n copy_mt.y[index] = untempering(p)\n return orig_mt, copy_mt", "def populate_targets(prog):\n #proto = []\n #for b in prog.args.inputs:\n # for a in b:\n # if a not in proto:\n # proto += [ a ]\n for p in prog.args.inputs:\n t = Prog.Target(p.cert)\n tlsa = Prog.Tlsa(p.params, None, None, None)\n tlsa.publish = False\n # hack the tlsa.publish member to mean that the Tlsa object\n # corresponds to a specific record, as opposed to one set\n # by the config file, where tlsa.publish is set to 'True'.\n t.tlsa += [ tlsa ]\n prog.target_list += [ t ]\n return Prog.RetVal.ok", "def extract(self, data):\n\n # Read authors from the cell with NAME and COLLABORATORS variables\n authors = []\n for cell in data['cells']:\n if cell['cell_type'] != 'code':\n continue\n\n src = cell['source']\n if src and src[0].startswith('NAME'):\n authors = [*self._extract_name(src, 'NAME'),\n *self._extract_name(src, 'COLLABORATORS')]\n break\n\n return [x for x in authors if x.name]", "def parse(self, response):\n\n soup = self.get_soup(response.text)\n try:\n results = soup\\\n .find('table', {'id': 'searchResult'})\\\n .find_all('tr')[1:]\n except AttributeError:\n return\n\n for result in results:\n torrent = items.Torrent(spider=self.name)\n torrent['categories'] = [\n self._category_map.get(\n furl.furl(category.attrs['href']).path.segments[-1],\n items.TorrentCategory.Unknown\n ) for category in result.find(\n 'td', {'class': 'vertTh'}\n ).find_all('a')\n ]\n torrent['magnet'] = result.find(\n 'a', {'href': re.compile('^magnet\\:.*')}\n )['href']\n torrent['hash'] = re.match(\n r'.*magnet:\\?xt=urn:(?:btih)+:([a-zA-Z0-9]+).*',\n torrent['magnet']\n ).groups()[0].lower()\n (torrent['seeders'], torrent['leechers'],) = tuple([\n int(column.contents[0])\n for column in result.find_all('td', {'align': 'right'})\n ])\n\n result_links = result.find('a', {'class': 'detLink'})\n if 'href' in result_links.attrs:\n torrent['source'] = furl.furl(response.url).set(\n path=result_links.attrs['href'], args={}\n ).url\n\n torrent['name'] = result_links.contents[0].strip()\n\n result_desc = result.find('font', {'class': 'detDesc'})\n (time_content, size_content,) = \\\n result_desc.contents[0].split(',')[:2]\n torrent['uploaded'] = self.parse_datetime(\n time_content.split(' ')[-1],\n formats=[\n '%m-%d %Y',\n '%m-%d %H:%M',\n '%H:%M',\n 'Y-day %H:%M'\n ]\n )\n torrent['size'] = self.parse_size(\n size_content.split(' ')[-1]\n )\n\n try:\n torrent['uploader'] = result_desc.find(\n 'a', {'href': re.compile('^/user/.*')}\n ).contents[0]\n except AttributeError:\n pass\n\n yield torrent", "def ptD(self):\n \n p = dict()\n for d, episode in enumerate(self.episodes):\n users, tempsU = episode[:,0], np.unique(episode[:,1]) # List of users of an episode and distinct\n p[d] = np.ones(self.nbUser) # time\n # A voir ******\n for t in range(1, len(tempsU)): # For each time tU of the episode D\n ptd, hasPred = 1., False\n # Peut être parcourir seulement les users seraient plus interessants\n for u, user in enumerate(users):\n predU = episode[episode[:,1] < tempsU[t]][:,0] # List of predecessors of user u at time tU\n for v in predU: # Proba que ça ne soit aucun des predecesseurs \n if v in self.predecessors[user]: # qui l'infectent\n ptd *= (1 - self.theta[v][user])\n hasPred = True\n if hasPred:\n p[d][u] = 1-ptd # Proba que ça soit l'un deux.\n\n return p", "def test_read_participants_data():\n bids_root = _TempDir()\n bids_path = _bids_path.copy().update(root=bids_root, datatype='meg')\n raw = _read_raw_fif(raw_fname, verbose=False)\n\n # if subject info was set, we don't roundtrip birthday\n # due to possible anonymization in mne-bids\n subject_info = {\n 'hand': 1,\n 'sex': 2,\n }\n raw.info['subject_info'] = subject_info\n write_raw_bids(raw, bids_path, overwrite=True, verbose=False)\n raw = read_raw_bids(bids_path=bids_path)\n print(raw.info['subject_info'])\n assert raw.info['subject_info']['hand'] == 1\n assert raw.info['subject_info']['sex'] == 2\n assert raw.info['subject_info'].get('birthday', None) is None\n\n # if modifying participants tsv, then read_raw_bids reflects that\n participants_tsv_fpath = op.join(bids_root, 'participants.tsv')\n participants_tsv = _from_tsv(participants_tsv_fpath)\n participants_tsv['hand'][0] = 'n/a'\n _to_tsv(participants_tsv, participants_tsv_fpath)\n raw = read_raw_bids(bids_path=bids_path)\n assert raw.info['subject_info']['hand'] == 0\n assert raw.info['subject_info']['sex'] == 2\n assert raw.info['subject_info'].get('birthday', None) is None\n\n # make sure things are read even if the entries don't make sense\n participants_tsv = _from_tsv(participants_tsv_fpath)\n participants_tsv['hand'][0] = 'righty'\n participants_tsv['sex'][0] = 'malesy'\n _to_tsv(participants_tsv, participants_tsv_fpath)\n with pytest.warns(RuntimeWarning, match='Unable to map'):\n raw = read_raw_bids(bids_path=bids_path)\n assert raw.info['subject_info']['hand'] is None\n assert raw.info['subject_info']['sex'] is None\n\n # make sure to read in if no participants file\n raw = _read_raw_fif(raw_fname, verbose=False)\n write_raw_bids(raw, bids_path, overwrite=True, verbose=False)\n os.remove(participants_tsv_fpath)\n with pytest.warns(RuntimeWarning, match='Participants file not found'):\n raw = read_raw_bids(bids_path=bids_path)\n assert raw.info['subject_info'] is None", "def extractor(self):\n \n if self._extractor is None:\n if self.extractor_type == '29v2':\n self._extractor = self.build_extractor_29layers_v2(name='extract29v2', block=self._res_block, layers=[1, 2, 3, 4])\n elif self.extractor_type == '29':\n self._extractor = self.build_extractor_29layers(name='extract29', block=self._res_block, layers=[1, 2, 3, 4])\n elif self.extractor_type == '9':\n self._extractor = self.build_extractor_9layers(name='extract9')\n \n if self.extractor_weights is not None:\n self._extractor.load_weights(self.extractor_weights)\n \n return self._extractor", "def getDetailedSpeakers(self, speakers):\n\n new_speakers = []\n try:\n for speaker in speakers:\n for user in self.users_data:\n if speaker[\"username\"] == user[\"username\"]:\n new_speakers.append(user)\n return new_speakers\n except KeyError as e:\n print(e)\n return \"Invalid\"", "def test_NeurosynthDecoder_featuregroup(testdata_laird):\n ids = testdata_laird.ids[:5]\n decoder = discrete.NeurosynthDecoder(feature_group=\"Neurosynth_TFIDF\")\n decoder.fit(testdata_laird)\n decoded_df = decoder.transform(ids=ids)\n assert isinstance(decoded_df, pd.DataFrame)", "def test_ipv4v6_secondary_pdn_with_ded_bearer_multi_ue(self):\n num_ues = 4\n ue_ids = []\n default_ip = []\n sec_ip_ipv4 = []\n sec_ip_ipv6 = []\n\n # APN of the secondary PDN\n ims_apn = {\n \"apn_name\": \"ims\", # APN-name\n \"qci\": 5, # qci\n \"priority\": 15, # priority\n \"pre_cap\": 0, # preemption-capability\n \"pre_vul\": 0, # preemption-vulnerability\n \"mbr_ul\": 200000000, # MBR UL\n \"mbr_dl\": 100000000, # MBR DL\n \"pdn_type\": 2, # PDN Type 0-IPv4,1-IPv6,2-IPv4v6\n }\n # UL Flow description #1\n ulFlow1 = {\n \"ipv4_dst\": \"192.168.129.42\", # IPv4 destination address\n \"tcp_dst_port\": 5002, # TCP dest port\n \"ip_proto\": FlowMatch.IPPROTO_TCP, # Protocol Type\n \"direction\": FlowMatch.UPLINK, # Direction\n }\n\n # UL Flow description #2\n ulFlow2 = {\n \"ipv4_dst\": \"192.168.129.42\", # IPv4 destination address\n \"tcp_dst_port\": 5001, # TCP dest port\n \"ip_proto\": FlowMatch.IPPROTO_TCP, # Protocol Type\n \"direction\": FlowMatch.UPLINK, # Direction\n }\n\n # UL Flow description #3\n ulFlow3 = {\n \"ipv6_dst\": \"5e90:db7b:b18e::1556\", # IPv6 destination address\n \"tcp_dst_port\": 5003, # TCP dest port\n \"ip_proto\": FlowMatch.IPPROTO_TCP, # Protocol Type\n \"direction\": FlowMatch.UPLINK, # Direction\n }\n\n # DL Flow description #1\n dlFlow1 = {\n \"ipv4_src\": \"192.168.129.42\", # IPv4 source address\n \"tcp_src_port\": 5001, # TCP source port\n \"ip_proto\": FlowMatch.IPPROTO_TCP, # Protocol Type\n \"direction\": FlowMatch.DOWNLINK, # Direction\n }\n\n # DL Flow description #2\n dlFlow2 = {\n \"ipv4_src\": \"192.168.129.64\", # IPv4 source address\n \"tcp_src_port\": 5002, # TCP source port\n \"ip_proto\": FlowMatch.IPPROTO_TCP, # Protocol Type\n \"direction\": FlowMatch.DOWNLINK, # Direction\n }\n\n # DL Flow description #3\n dlFlow3 = {\n \"ipv6_src\": \"6e31:1a95:1e7c::df1\", # IPv6 source address\n \"tcp_src_port\": 5003, # TCP source port\n \"ip_proto\": FlowMatch.IPPROTO_TCP, # Protocol Type\n \"direction\": FlowMatch.DOWNLINK, # Direction\n }\n\n # Flow lists to be configured\n flow_list = [\n ulFlow1,\n ulFlow2,\n ulFlow3,\n dlFlow1,\n dlFlow2,\n dlFlow3,\n ]\n\n # QoS\n qos = {\n \"qci\": 1, # qci value [1 to 9]\n \"priority\": 1, # Range [0-255]\n \"max_req_bw_ul\": 10000000, # MAX bw Uplink\n \"max_req_bw_dl\": 15000000, # MAX bw Downlink\n \"gbr_ul\": 1000000, # GBR Uplink\n \"gbr_dl\": 2000000, # GBR Downlink\n \"arp_prio\": 1, # ARP priority\n \"pre_cap\": 1, # pre-emption capability\n \"pre_vul\": 1, # pre-emption vulnerability\n }\n\n policy_id = \"ims\"\n\n self._s1ap_wrapper.configUEDevice(num_ues)\n for i in range(num_ues):\n req = self._s1ap_wrapper.ue_req\n ue_id = req.ue_id\n\n apn_list = [ims_apn]\n self._s1ap_wrapper.configAPN(\n \"IMSI\" + \"\".join([str(i) for i in req.imsi]), apn_list,\n )\n print(\n \"*********************** Running End to End attach for UE id \",\n ue_id,\n )\n\n print(\"***** Sleeping for 5 seconds\")\n time.sleep(5)\n # Attach\n attach = self._s1ap_wrapper.s1_util.attach(\n ue_id,\n s1ap_types.tfwCmd.UE_END_TO_END_ATTACH_REQUEST,\n s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND,\n s1ap_types.ueAttachAccept_t,\n )\n addr = attach.esmInfo.pAddr.addrInfo\n default_ip.append(ipaddress.ip_address(bytes(addr[:4])))\n ue_ids.append(ue_id)\n\n # Wait on EMM Information from MME\n self._s1ap_wrapper._s1_util.receive_emm_info()\n\n print(\"***** Sleeping for 5 seconds\")\n time.sleep(5)\n self._s1ap_wrapper._ue_idx = 0\n for i in range(num_ues):\n req = self._s1ap_wrapper.ue_req\n ue_id = req.ue_id\n\n apn = \"ims\"\n # PDN Type 2 = IPv6, 3 = IPv4v6\n pdn_type = 3\n # Send PDN Connectivity Request\n self._s1ap_wrapper.sendPdnConnectivityReq(\n ue_id, apn, pdn_type=pdn_type,\n )\n # Receive PDN CONN RSP/Activate default EPS bearer context request\n response = self._s1ap_wrapper.s1_util.get_response()\n assert response.msg_type == s1ap_types.tfwCmd.UE_PDN_CONN_RSP_IND.value\n act_def_bearer_req = response.cast(s1ap_types.uePdnConRsp_t)\n\n addr = act_def_bearer_req.m.pdnInfo.pAddr.addrInfo\n sec_ip_ipv4.append(ipaddress.ip_address(bytes(addr[8:12])))\n\n print(\n \"********************** Sending Activate default EPS bearer \"\n \"context accept for APN-%s, UE id-%d\" % (apn, ue_id),\n )\n print(\n \"********************** Added default bearer for apn-%s,\"\n \" bearer id-%d, pdn type-%d\"\n % (\n apn,\n act_def_bearer_req.m.pdnInfo.epsBearerId,\n pdn_type,\n ),\n )\n\n # Receive Router Advertisement message\n response = self._s1ap_wrapper.s1_util.get_response()\n assert response.msg_type == s1ap_types.tfwCmd.UE_ROUTER_ADV_IND.value\n routerAdv = response.cast(s1ap_types.ueRouterAdv_t)\n print(\n \"******************* Received Router Advertisement for APN-%s\"\n \" ,bearer id-%d\" % (apn, routerAdv.bearerId),\n )\n\n ipv6_addr = \"\".join([chr(i) for i in routerAdv.ipv6Addr]).rstrip(\n \"\\x00\",\n )\n print(\"******* UE IPv6 address: \", ipv6_addr)\n sec_ip_ipv6.append(ipaddress.ip_address(ipv6_addr))\n\n print(\"***** Sleeping for 5 seconds\")\n time.sleep(5)\n\n # Add dedicated bearer\n print(\"********************** Adding dedicated bearer to ims PDN\")\n print(\n \"********************** Sending RAR for IMSI\",\n \"\".join([str(i) for i in req.imsi]),\n )\n self._sessionManager_util.send_ReAuthRequest(\n \"IMSI\" + \"\".join([str(i) for i in req.imsi]),\n policy_id,\n flow_list,\n qos,\n )\n response = self._s1ap_wrapper.s1_util.get_response()\n assert response.msg_type == s1ap_types.tfwCmd.UE_ACT_DED_BER_REQ.value\n act_ded_ber_req_ims_apn = response.cast(\n s1ap_types.UeActDedBearCtxtReq_t,\n )\n self._s1ap_wrapper.sendActDedicatedBearerAccept(\n req.ue_id, act_ded_ber_req_ims_apn.bearerId,\n )\n print(\n \"************* Added dedicated bearer\",\n act_ded_ber_req_ims_apn.bearerId,\n )\n\n print(\"***** Sleeping for 10 seconds\")\n time.sleep(10)\n # ipv4 default pdn + ipv4v6(ims) pdn +\n # dedicated bearer for ims pdn for 4 UEs\n num_ul_flows = 12\n for i in range(num_ues):\n dl_flow_rules = {\n default_ip[i]: [],\n sec_ip_ipv4[i]: [flow_list],\n sec_ip_ipv6[i]: [flow_list],\n }\n # Verify if flow rules are created\n self._s1ap_wrapper.s1_util.verify_flow_rules(\n num_ul_flows, dl_flow_rules,\n )\n\n print(\"***** Sleeping for 5 seconds\")\n time.sleep(5)\n for ue in ue_ids:\n print(\n \"******************* Running UE detach (switch-off) for \",\n \"UE id \",\n ue,\n )\n # Now detach the UE\n self._s1ap_wrapper.s1_util.detach(\n ue, s1ap_types.ueDetachType_t.UE_SWITCHOFF_DETACH.value, False,\n )", "def test_parse_trflp_headerless(self):\r\n\r\n data = \\\r\n \"\"\"Samp-le 1\t1000\t2000\t3000\t4000\r\nSample 2\t\t2000\t3000\t4000\r\nSample 3\t\t\t3000\t4000\r\nSample 4\t\t\t\t4000\r\nSample_5__\t25\t\t\t\"\"\"\r\n samples, otus, data = parse_trflp(data.split('\\n'))\r\n\r\n samples_exp = [\r\n 'Samp.le.1',\r\n 'Sample.2',\r\n 'Sample.3',\r\n 'Sample.4',\r\n 'Sample.5..']\r\n otus_exp = ['Bin__0', 'Bin__1', 'Bin__2', 'Bin__3']\r\n data_exp = array([[1000, 0, 0, 0, 25],\r\n [2000, 2000, 0, 0, 0],\r\n [3000, 3000, 3000, 0, 0],\r\n [4000, 4000, 4000, 4000, 0]])\r\n\r\n self.assertEqual(samples, samples_exp)\r\n self.assertEqual(otus, otus_exp)\r\n assert_almost_equal(data, data_exp)", "def get_teacher_class():\n class_data = query_db(\n \"SELECT id, name FROM classes WHERE teacher_id = ?;\", [flask.session[\"id\"]]\n )\n classes = []\n for part in class_data:\n class_dict = {}\n class_dict[\"id\"] = part[0]\n class_dict[\"name\"] = str(part[1])\n classes.append(class_dict)\n return classes", "def scrape(self):\n locations = (\n 'Half Moon Bay, California',\n 'Huntington Beach, California',\n 'Providence, Rhode Island',\n 'Wrightsville Beach, North Carolina',\n )\n for location in locations:\n tides = self.extract_tides(location)\n print('\\nLocation:', location)\n for dt, tide_rows in tides.items():\n for tide_row in tide_rows:\n print(dt, tide_row)", "def unpack_data(self, data):\n\n datadict = {'filenames': [], 'features': [], 'labels': [] }\n\n for l in data:\n \n datadict['filenames'].append(l[0])\n datadict['features'].append(l[1])\n datadict['labels'].append(l[2])\n \n return datadict", "def test_practitioner_5(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"practitioner-example-f003-mv.json\"\n inst = practitioner.Practitioner.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Practitioner\" == inst.resource_type\n\n impl_practitioner_5(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Practitioner\" == data[\"resourceType\"]\n\n inst2 = practitioner.Practitioner(**data)\n impl_practitioner_5(inst2)", "def test_practitioner_9(base_settings):\n filename = base_settings[\"unittest_data_dir\"] / \"practitioner-example-f204-ce.json\"\n inst = practitioner.Practitioner.parse_file(\n filename, content_type=\"application/json\", encoding=\"utf-8\"\n )\n assert \"Practitioner\" == inst.resource_type\n\n impl_practitioner_9(inst)\n\n # testing reverse by generating data from itself and create again.\n data = inst.dict()\n assert \"Practitioner\" == data[\"resourceType\"]\n\n inst2 = practitioner.Practitioner(**data)\n impl_practitioner_9(inst2)", "def prep_steps(self, *, yearmon: str) -> List[Step]:\n steps = []\n\n year, month = dates.parse_yearmon(yearmon)\n\n monthly_vars = ['2m_temperature', 'total_precipitation']\n hourly_vars = ['total_precipitation']\n\n steps += era5.download(output_filename=era5.filename(self.source, 'month', yearmon),\n duration='month',\n yearmon=yearmon,\n variables=monthly_vars)\n\n if year >= 1979:\n steps += era5.download(output_filename=era5.filename(self.source, 'hour', yearmon),\n duration='hour',\n yearmon=yearmon,\n variables=hourly_vars)\n steps += era5.calc_wetdays(input_filename=era5.filename(self.source, 'hour', yearmon),\n output_filename=self.p_wetdays(yearmon=yearmon).file,\n threshold_mm=0.1)\n\n return steps", "def _unpack(self, headerBytes):\n xtraH = struct.unpack(self.PACKAGING_FORMAT, headerBytes)\n\n self.qubit_id = xtraH[0]\n self.remote_app_id = xtraH[1]\n self.remote_node = xtraH[2]\n self.datetime = xtraH[3]\n self.remote_port = xtraH[4]\n self.outcome = xtraH[5]" ]
[ "0.6385784", "0.5618853", "0.54759747", "0.54556435", "0.5198481", "0.5048664", "0.49990323", "0.4985974", "0.49433678", "0.4874634", "0.47989118", "0.47896883", "0.4787353", "0.47753018", "0.4730215", "0.46576267", "0.46548298", "0.46495625", "0.4646888", "0.46451658", "0.46388948", "0.454687", "0.4523255", "0.45193264", "0.4513335", "0.449714", "0.44720286", "0.44492343", "0.4447388", "0.44390026", "0.44346026", "0.44097203", "0.43845743", "0.43841347", "0.43805403", "0.43672597", "0.43646434", "0.43463144", "0.43346798", "0.43297213", "0.43160826", "0.43066314", "0.43051037", "0.43034545", "0.42883697", "0.42834306", "0.4271848", "0.4259581", "0.42592984", "0.42537972", "0.4251826", "0.4250439", "0.42376214", "0.42317843", "0.42263317", "0.4221276", "0.42098", "0.42023504", "0.4202325", "0.420112", "0.42002514", "0.41996342", "0.4196491", "0.4195288", "0.41941756", "0.4189602", "0.41756892", "0.41734007", "0.41723236", "0.416866", "0.4167351", "0.41578582", "0.41512817", "0.41453055", "0.41438392", "0.41363174", "0.41303328", "0.4130061", "0.41295213", "0.4114103", "0.41135308", "0.4110586", "0.4109239", "0.40998915", "0.40940896", "0.40899107", "0.4088083", "0.4087046", "0.407948", "0.40701076", "0.4069658", "0.40660182", "0.40649423", "0.40626246", "0.40607655", "0.40518227", "0.40490636", "0.40440974", "0.40414512", "0.40369195" ]
0.67790115
0
Prune teachers to only use the first num_teachers of them.
def prune_teachers(self): self.teacher_policies = self.teacher_policies[: self.num_teachers] self.teacher_envs = self.teacher_envs[: self.num_teachers] self.teacher_expl_strats = self.teacher_expl_strats[: self.num_teachers] self.teacher_critics = self.teacher_critics[: self.num_teachers] self.teacher_ex_dirs = self.teacher_ex_dirs[: self.num_teachers]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cleanOrphanedLearners(self):\n\n # Before deleting Learners, ensure that if any Learners that are about to be\n # deleted point to a Team as their action, then that Team's count of\n # referincing Learners is decremented.\n for learner in self.learner_pop:\n if learner.getNumReferencingTeams() == 0 and not learner.isActionAtomic():\n learner.action.decrementNumReferencingLearners()\n\n # Remove all orphaned Learners from the Learner population\n self.learner_pop = [l for l in self.learner_pop if not l.getNumReferencingTeams() == 0]", "def prune(self, min_count):\n if not self.sorted:\n self.sort()\n for k, count in enumerate(self.Nx):\n if count < min_count:\n self.truncate(k)\n break", "def _prune_candidates(self, beam_width=None):\n if beam_width is None:\n beam_width = self.beam_width\n if len(self.candidates) <= beam_width:\n return\n neg_scores = np.array([-cand.logp_total() for cand in self.candidates])\n parted_indices = np.argpartition(neg_scores, beam_width - 1)\n self.candidates = np.array(self.candidates)[parted_indices[:beam_width]].tolist()", "def prune(self):\n self.sort(key=lambda chunk: chunk.probability)\n del self[:-self.model.num_parses]", "def prune_features(self, verbose=False):\n # Collect all features and prune those occurring only once.\n features = defaultdict(int)\n for k in self.utterance_features:\n for f in self.utterance_features[k]:\n features[f] += 1\n\n if verbose:\n print \"Total number of features: \", len(features)\n\n self.remove_features = []\n for k in features:\n if features[k] <= 2:\n self.remove_features.append(k)\n\n if verbose:\n print \"Number of unique features: \", len(self.remove_features)\n\n self.remove_features = set(self.remove_features)\n for k in self.utterance_features:\n self.utterance_features[k].prune(self.remove_features)\n\n features = defaultdict(int)\n for k in self.utterance_features:\n for f in self.utterance_features[k]:\n features[f] += 1\n\n if verbose:\n print \"Total number of features: \", len(features)", "def load_teachers(self):\n # Get the experiment's directory to load from\n ex_dir = ask_for_experiment(max_display=10, env_name=self.env_real.name, perma=False)\n self.load_teacher_experiment(ex_dir)\n if len(self.teacher_policies) < self.num_teachers:\n print(\n f\"You have loaded {len(self.teacher_policies)} teachers - load at least {self.num_teachers - len(self.teacher_policies)} more!\"\n )\n self.load_teachers()", "def init_prune_list(self):\n\t\tfor i in range(1, len(self.elements)+1):\n\t\t\tcount = self.getSupport({i})\n\t\t\tif(count >= self.support):\n\t\t\t\tself.prune_list[(i,)] = count", "def helper(reviewer: Any, graph: Graph) -> set:\n reviewers_so_far = set()\n\n for movie in graph.get_neighbours(reviewer):\n for user in graph.get_neighbours(movie):\n if graph.get_weight(user, movie) >= 8:\n reviewers_so_far.add(user)\n return reviewers_so_far", "def prune_features(self):\r\n for i, features in enumerate(self.curr_features):\r\n # Continue if the number of features in this grid does\r\n # not exceed the upper bound.\r\n if len(features) <= self.config.grid_max_feature_num:\r\n continue\r\n self.curr_features[i] = sorted(features, key=lambda x:x.lifetime, \r\n reverse=True)[:self.config.grid_max_feature_num]", "def _remove_experts(self):\n self.experts = [ex for ex in self.experts if np.mean(\n ex.weight) >= self.theta]", "def delete_n_volunteers(app_id):\r\n delete_memoized(n_volunteers, app_id)", "def prune_(self):\n idx = self.factor_lams() > 0\n self.factors = [f[:, idx] for f in self.factors]\n self.rank = np.sum(idx)", "def delSpillOppIRunder(trekk, vinnendeSekvenser):\n runder = []\n i = 0\n runde = []\n while i < len(trekk):\n runde.append(trekk[i])\n personX, personO = delOppRunde(runde)\n if len(runde) >= 5 and (sjekkVinnerRunde(personX, vinnendeSekvenser) or sjekkVinnerRunde(personO, vinnendeSekvenser)):\n runder.append(runde)\n runde = []\n if len(runde) == 9:\n runder.append(runde) \n runde = []\n i += 1\n return runder", "def delete_n_anonymous_volunteers(app_id):\r\n delete_memoized(n_anonymous_volunteers, app_id)", "def prune_tree(tree, cutoff, posteriors):\n new_tree = []\n for e in tree:\n try:\n if posteriors[e] > cutoff:\n new_tree.append(e)\n except KeyError:\n if posteriors[e[::-1]] > cutoff:\n new_tree.append(e)\n return new_tree", "def prune(neuron,\n number_of_nodes):\n n = len(neuron.nodes_list)\n for i in range(n - number_of_nodes):\n index = shortest_tips(neuron)\n neuron = remove_node(neuron, index)\n return neuron", "def remove_numbers(self):\n for i in range(len(self.board.board[0])):\n while self.board.board[i].count(0) < 6:\n random_val = random.randint(0, 8)\n self.board.update_board((i, random_val), 0)", "def deepscore(teacher):\n if teacher == None:\n print(\"Not a valid teacher\")\n return\n if teacher.get('rmpdata') == None: snc.rateThisProfessor(teacher,instructors)\n print(\"# \" + teacher['name'])\n scoreTeacherlegacy(teacher)\n scoreTeacherues(teacher)\n scoreTeacherrmp(teacher)", "def _prune_heads(self, heads_to_prune):\n for layer, heads in heads_to_prune.items():\n group_idx = int(layer / self.config.inner_group_num)\n inner_group_idx = int(layer - group_idx * self.config.inner_group_num)\n self.encoder.albert_layer_groups[group_idx].albert_layers[inner_group_idx].attention.prune_heads(heads)", "def generate_teachers(G, school_type, N_classes, family_member_counter, \n\t\t\t\t\t family_counter, teacher_p_adults, teacher_p_children):\n\tN_teachers = get_N_teachers(school_type, N_classes)\n\tteacher_nodes = ['t{:04d}'.format(i) for i in range(1, N_teachers + 1)]\n\tG.add_nodes_from(teacher_nodes)\n\t\n\tfor t in teacher_nodes:\n\t\tfamily_nodes = [t]\n\t\t# draw a random number of children and adults for the family\n\t\tages, N_adults = generate_teacher_family(teacher_p_adults, teacher_p_children)\n\t\t\n\t\tages = list(ages)\n\t\tfor adult in range(N_adults - 1):\n\t\t\tages.append(20.5) # default age for adults\n\t\t\n\t\t# add the family member nodes and their attributes to the graph\n\t\tfor age in ages:\n\t\t\tfamily_member_ID = 'f{:04d}'.format(family_member_counter)\n\t\t\tfamily_nodes.append(family_member_ID)\n\t\t\tG.add_node(family_member_ID)\n\t\t\tfamily_member_counter += 1\n\t\t\tnx.set_node_attributes(G, \\\n\t\t\t\t\t\t{family_member_ID:{'type':'family_member',\n\t\t\t\t\t\t\t\t\t\t 'age':age,\n\t\t\t\t\t\t\t\t\t\t 'family':family_counter,\n\t\t\t\t\t\t\t\t\t\t 'unit':'family'}})\n\t\t\t\t\t\n\t\t# finally, also set the teacher's node attributes\n\t\tnx.set_node_attributes(G, \\\n\t\t\t\t\t{t:{'type':'teacher', \n\t\t\t\t\t\t# Note: 20.5 is the age at which\n\t\t\t\t\t\t# the symptom and transmission risk\n\t\t\t\t\t\t# is that of an adult\n\t\t\t\t\t\t'age':20.5,\n\t\t\t\t\t\t'unit':'faculty_room',\n\t\t\t\t\t\t'family':family_counter}})\n\t\tfamily_counter += 1", "def tweet_cleaner(tweets):\n n_tweets = {}\n clean = cleaner()\n for tweet in tweets:\n text = clean.clean_text(tweets[tweet][\"text\"])\n if len(text) > 15:\n n_tweets[tweet] = tweets[tweet]\n return n_tweets", "def _prune(self):\n while len(self.data) > self.limit:\n self.data.popleft()", "def distribute_reviews(asmt, perStudent):\n\n # Get all submissions for this assignment.\n subs = AssignmentSubmission.objects.filter(submission_for=asmt)\n #print subs\n course = asmt.course_code\n users = ReviewUser.objects.filter(courses=course)\n numUsers = len(users)\n latestSubmissions = get_latest(course, asmt, subs, users)\n # print 'filtered subs are ', latestSubmissions\n numSubs = len(latestSubmissions)\n print 'number of submissions: ', numSubs\n asmt.reviews_per_student = perStudent\n \n for user in users:\n print user\n # Don't want to make staff review stuff.\n if(user.djangoUser.is_staff or user.djangoUser.is_superuser):\n continue\n\n review = AssignmentReview.objects.get_or_create(by=user, assignment=asmt)[0]\n reviewsAssigned = len(review.submissions.all())\n # In case lecturer assigns reviews multiple times, or number of reviews changes. \n print (\"reviewsAssigned\", reviewsAssigned)\n if(reviewsAssigned < perStudent):\n for i in range(perStudent-reviewsAssigned):\n index = random.randint(0, numSubs-1)\n submission = latestSubmissions[index]\n \n # Make sure user isn't assigned to review their own submission\n # NB in the amazing edge case where this user is the only person who \n # submitted the assignment, we get an infinite loopevi\n # Also don't want student to be assigned same submission twice.\n while((submission.by == user) or (submission in review.submissions.all())):\n print submission.by == user\n #print review.submissions.all()\n #print \"in the while loop\"\n index = random.randint(0, numSubs-1)\n print index\n submission = latestSubmissions[index]\n \n review.submissions.add(submission)\n # Lecturer has reduced number of reviews per user \n elif(reviewsAssigned > perStudent):\n # Number of submissions we want to de-assign\n deassign = reviewsAssigned - perStudent \n # Get all the submissions the student has not yet completed.\n incomplete = []\n for sub in review.submissions.all():\n annotationsDone = AssignmentReview.numAnnotations(review, sub)\n if(annotationsDone < asmt.min_annotations):\n incomplete.append((sub, annotationsDone))\n\n # Choose the least-complete deassign submission reviews to remove.\n # Ascending list \n sortedList = sorted(incomplete, key=itemgetter(1))\n removeFromIncomplete = min(deassign, len(sortedList))\n for i in range(removeFromIncomplete):\n review.submissions.remove(sortedList[i][0]) \n \n if deassign > len(sortedList):\n for i in range(deassign):\n review.submissions.remove(reviews.submissions.all()[i])\n else: # nothing to assign or remove\n return \n \n return", "def prune(self, age_hours):\r\n pass", "def remove_promotional_tweets(tweets):\n clean = cleaner()\n n_tweets = {}\n for tweet in tweets:\n if not clean.linkChecker(tweets[tweet][\"text\"]):\n n_tweets[tweet] = tweets[tweet]\n return n_tweets", "def _prunelowestweight(self):\r\n # note: must be called with acquired self._lock!\r\n numentries = len(self._dict)\r\n if numentries >= self.maxentries:\r\n # evict according to entry's weight\r\n items = [(entry.weight, key) for key, entry in self._dict.iteritems()]\r\n items.sort()\r\n index = numentries - self.prunenum\r\n if index > 0:\r\n for weight, key in items[:index]:\r\n del self._dict[key]", "def suggest_movies(self, reviewer: Any, other: Any) -> list[Any]:\n potential_recs = self.get_neighbours(other)\n suggestions_so_far = []\n neighbours = self.get_neighbours(reviewer)\n\n for p_rec in potential_recs:\n if p_rec not in neighbours and self.get_weight(other, p_rec) >= 9:\n suggestions_so_far.append(p_rec)\n\n return suggestions_so_far", "def prune(self, upper, lower):\n # max_count = sorted([self.counts[key] for key in self.counts.keys()])[::-1][upper]\n max_count = upper\n\n print('Removed all words that occur less than {} times and more than {} times'.format(lower, upper))\n for i, doc in enumerate(self.docs):\n new_doc = []\n for word in doc:\n if self.counts[word] <= max_count and self.counts[word] > lower:\n new_doc.append(word)\n self.docs[i] = new_doc", "def ensemble_preds(dataset, nb_teachers, stdnt_data):\n\n # Compute shape of array that will hold probabilities produced by each\n # teacher, for each training point, and each output class\n result_shape = (nb_teachers, len(stdnt_data), FLAGS.nb_labels)\n\n # Create array that will hold result\n result = np.zeros(result_shape, dtype=np.float32)\n\n # Get predictions from each teacher\n for teacher_id in xrange(nb_teachers):\n # Compute path of checkpoint file for teacher model with ID teacher_id\n if FLAGS.deeper:\n ckpt_path = FLAGS.teachers_dir + '/' + str(dataset) + '_' + str(nb_teachers) + '_teachers_' + str(teacher_id) + '_deep.ckpt-' + str(FLAGS.teachers_max_steps - 1) #NOLINT(long-line)\n else:\n ckpt_path = FLAGS.teachers_dir + '/' + str(dataset) + '_' + str(nb_teachers) + '_teachers_' + str(teacher_id) + '.ckpt-' + str(FLAGS.teachers_max_steps - 1) # NOLINT(long-line)\n\n # Get predictions on our training data and store in result array\n result[teacher_id] = deep_cnn.softmax_preds(stdnt_data, ckpt_path)\n\n # This can take a while when there are a lot of teachers so output status\n print(\"Computed Teacher \" + str(teacher_id) + \" softmax predictions\")\n\n return result", "def remove_n_nos(self, num_nos):\n for i in range(num_nos):\n elem = random.randint(1, 11 ** 4)\n self.remove(elem)", "def delete_n_registered_volunteers(app_id):\r\n delete_memoized(n_registered_volunteers, app_id)", "def get_teacher_num(self):\n return self.teacher_set.all().count()", "def _next_to_prune(tree, children=None):\n\n if children is None:\n children = tree.children\n\n t_nodes = _get_terminal_nodes(children)\n g_i = tree.init_error[t_nodes] - tree.best_error[t_nodes]\n\n return t_nodes[np.argmin(g_i)]", "def _reweight_and_discard_irrelevant(self, weighted_sample_pool, t):\n tmp = []\n ret = []\n wc = self.classifiers[t]\n theta_a = wc.theta_a\n theta_b = wc.theta_b\n\n norm_factor = 0\n discarded = 0\n for patch, w in weighted_sample_pool:\n response = self.h_t(patch, t)\n # if t > 3:\n # if response < theta_a or response > theta_b: # throw it away\n # discarded += 1\n # continue\n r = self.classify(patch)\n label = patch.label\n new_weight = w * np.exp(-label * r)\n\n tmp.append([patch, new_weight])\n norm_factor += new_weight\n for patch, w in tmp: # normalize weights\n normalized_weight = w / norm_factor\n ret.append([patch, normalized_weight])\n print \"Discarded %d training samples\" % discarded\n return ret", "def get_teachers(self):\n query = Teacher.all().order('teacher')\n return query.fetch()", "def _prune_heads(self, heads_to_prune):\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)", "def _prune_heads(self, heads_to_prune):\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)", "def _prune_heads(self, heads_to_prune):\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)", "def prune_trie(trie, threshold):\n\tnode = trie.root\n\tpq = []\n\tfor i in node.children.keys():\n\t\tpq.append((node.children[i],node.children[i].char))\n\twhile len(pq) > 0:\n\t\tcur_node, char = pq.pop()\n\t\tif cur_node.isEnd == False:\n\t\t\tfor i in cur_node.children.keys():\n\t\t\t\tpq.append((cur_node.children[i],char + cur_node.children[i].char))\n\t\telse:\n\t\t\tif cur_node.weight < threshold:\n\t\t\t\tdelete(trie, char)\n\t\t\telse:\n\t\t\t\tcontinue\n\treturn trie", "def _prior_teach(self):\n pass", "def _prune_heads(self, heads_to_prune):\n\n for layer, heads in heads_to_prune.items():\n\n self.encoder.layer[layer].attention.prune_heads(heads)", "def test18_remove_first_student_with_teacher(self):\n first_student = self.students_page.students_table()[0]\n students_list_without_first_student = self.students_page. \\\n click_edit_students_list_button(). \\\n click_delete_first_student_button(). \\\n click_exit_students_list_editor_button(). \\\n students_table()\n self.assertEqual(self.main_page.get_current_url(),\n data['expected_url'])\n self.assertNotIn(first_student,\n students_list_without_first_student)", "def delete_all_teachers(connection):\r\n with connection:\r\n return connection.execute(DELETE_ALL_TEACHERS)", "def remove_prob():\n\n #var for tuple\n weekly_probs = display_problems()\n\n cc_name1 = weekly_probs[0]\n url_link1 = weekly_probs[1]\n cc_name2 = weekly_probs[2]\n url_link2 = weekly_probs[3]\n cc_name3 = weekly_probs[4]\n url_link3 = weekly_probs[5]\n\n names_to_remove = [cc_name1, cc_name2, cc_name3]\n urls_to_remove = [url_link1, url_link2, url_link3]\n\n for name in names_to_remove:\n pass\n\n for url in urls_to_remove:\n pass\n\n return names_to_remove, urls_to_remove", "def wipe_all_topics(self):\n # doc_count = self.posts_read.find({'subreddit':self.subreddit, 'postwise.topic_assignment':{'$exists':True}}).count()\n doc_count = self.posts_write.update({'subreddit':self.subreddit, 'postwise.topic_assignment':{'$exists':True}},\n {'$unset':{'postwise.topic_distro':True,'postwise.topic_assignment':True}}, multi=True)\n\n print 'wiped topics from %i documents' % doc_count['nModified']", "def review(self):\n for player in self.team.players:\n player.career.potentially_retire()", "def filterByReview(minReviewNum, stories):\n\tsortedstories = filter(lambda s: s[\"reviews\"] >= minReviewNum, stories)\n\treturn [x for x in sortedstories]", "def prune(self, alignment_infos):\n alignments = []\n best_score = 0\n\n for alignment_info in alignment_infos:\n score = IBMModel4.model4_prob_t_a_given_s(alignment_info, self)\n best_score = max(score, best_score)\n alignments.append((alignment_info, score))\n\n threshold = IBMModel5.MIN_SCORE_FACTOR * best_score\n alignments = [a[0] for a in alignments if a[1] > threshold]\n return set(alignments)", "def test_with_data(data):\r\n i = 0\r\n tuning_set = []\r\n training_set = []\r\n num_reps = len(data)\r\n for i in range(0, num_reps-1):\r\n if (i % 4 == 0):\r\n tuning_set.append(data[i])\r\n else:\r\n training_set.append(data[i])\r\n\r\n unpruned = induce_node_tree(training_set, original_issues, \"D\", -1)\r\n pruned = prune_tree(unpruned, tuning_set)\r\n\r\n return pruned", "def pickten(divforround):\n seeds = [seed for seed in \"abcdefghij\"]\n players = []\n for seed in seeds:\n k = list(filter(lambda stu: stu.seed == seed, divforround))\n players.append(sample(k, 1)[0])\n for player in players:\n divforround.remove(player)\n return players", "def prune(self):\n target_user_ids = self.get_queryset().values_list('id', flat=True)\n exclude_user_ids = SentDrip.objects.filter(date__lt=conditional_now(),\n drip=self.drip_model,\n user__id__in=target_user_ids)\\\n .values_list('user_id', flat=True)\n self._queryset = self.get_queryset().exclude(id__in=exclude_user_ids)", "def trim_texts_by_count(self, min_count=100):\n\n for tid, text in self.graph.nodes(data=True):\n if text['count'] < min_count:\n self.graph.remove_node(tid)", "def _reducedProtToPeps(protToPeps, proteins):\n return {k: v for k, v in viewitems(protToPeps) if k not in proteins}", "def _prune_vocab(self,\n counter,\n n_total_files,\n n_seen_files,\n min_count=None,\n max_count=None,\n trim_rule=None):\n ## Initialize Exclusion List\n if not hasattr(self, \"_exclusion_list\"):\n self._exclusion_list = set()\n ## Compute Thresholds (With Margin)\n if min_count is None:\n min_count = None if self._min_token_freq == 0 else math.ceil(self._min_token_freq * 0.8 * n_seen_files / n_total_files)\n if max_count is None:\n max_count = None if self._max_token_freq is None else math.ceil(self._max_token_freq * 1.2 * n_seen_files / n_total_files)\n ## Get Acceptable Terms\n accept = set()\n for ngram, count in counter.items():\n keep = self._keep_vocab_item(ngram,\n count,\n min_count=min_count,\n max_count=max_count,\n trim_rule=trim_rule)\n if keep:\n accept.add(ngram)\n ## Update and Return Count\n counter_pruned = Counter(dict((n, c) for n, c in counter.items() if n in accept))\n return counter_pruned", "def prune_ratios(ratios, bad_words):\n for word in bad_words:\n ratios.pop(word, None)", "def trim_recommendation_pool(self, n):\n # {k:v for k, v in list(d.items())[:2]}\n self.recommendation_pool = {\n k: v for k, v in list(self.recommendation_pool.items())[:n]}", "def towers(n: int, from_pile=1, to_pile=2):\n if n == 1:\n print(\"Remove 1 disk from {} pile to {} pile\"\n .format(from_pile, to_pile))\n else:\n tmp = 6 - from_pile - to_pile\n towers(n-1, from_pile, tmp)\n print(\"Remove {} disk from {} pile to {} pile\"\n .format(n, from_pile, to_pile))\n towers(n-1, tmp, to_pile)", "def prune(i):\n\n return {'return':1, 'error':'pruning is not yet supported in this scenario'}", "def _remove_additional_elements(self):\n # Produces a list of keys in sample sorted by seed\n sorted_elements = sorted(self.elements.items(), key=lambda x: x[1][0])\n\n # Removes the keys with largest seed values (beyond the\n # first k keys)\n for i in range(self.k, len(sorted_elements)):\n del self.elements[sorted_elements[i][0]]", "def refilter_previous_rts():\n rtlist = get_list_of_rts()\n if rtlist == None or len(rtlist) == 0:\n return\n for tweet in rtlist:\n if not filter_tweet(tweet['retweeted_status']):\n print(\"Previous tweet does not pass the filter anymore !\")\n print(\"@%s (%s) : %s\" % (\n tweet['retweeted_status']['user']['screen_name'],\n tweet['retweeted_status']['source'],\n tweet['retweeted_status']['text']))\n answer = raw_input(\"Delete ? [y/N] \")\n if answer == 'y':\n delete_tweet(tweet['id_str'])", "def test_partition_may_skip_train():\n ratings = lktu.ml_test.ratings\n # make a data set where some users only have 1 rating\n ratings = ratings.sample(frac=0.1)\n users = ratings.groupby('user')['rating'].count()\n assert users.min() == 1.0 # we should have some small users!\n users.name = 'ur_count'\n\n splits = xf.partition_users(ratings, 5, xf.SampleN(1))\n splits = list(splits)\n assert len(splits) == 5\n\n # now we go make sure we're missing some users! And don't have any NaN ratings\n for train, test in splits:\n # no null ratings\n assert all(train['rating'].notna())\n # see if test users with 1 rating are missing from train\n test = test.join(users, on='user')\n assert all(~(test.loc[test['ur_count'] == 1, 'user'].isin(train['user'].unique())))\n # and users with more than one rating are in train\n assert all(test.loc[test['ur_count'] > 1, 'user'].isin(train['user'].unique()))", "def getTelemarketers():\n callers = getAllKnownCallers()\n callees = getAllKnownCallees()\n texters = getAllKnownTexters()\n textees = getAllKnownTextees()\n telemarketers = getKnownTelemarketers()\n\n # Remove known telemarketers from list\n t = set(callers).difference(telemarketers)\n # Remove known numbers that have received calls\n t.difference_update(callees)\n # Remove those who have sent a text\n t.difference_update(texters)\n # Remove those who have received a text\n t.difference_update(textees)\n # Join with known telemarketers\n t = set(telemarketers).union(t)\n\n return sorted(t)", "def prune(self, n_leaves):\n self.tree_ = prune(self.tree_, n_leaves)\n return self", "def _remove_additional_elements(self):\n # Produces a list of keys in sample sorted by seed\n sorted_elements = sorted(self.elements.items(), key=lambda x: x[1][0])\n\n # Removes the keys with largest seed values (beyond the first k keys)\n for i in range(self.k, len(sorted_elements)):\n del self.elements[sorted_elements[i][0]]", "def top_controversial(self, n):\n return top_movies", "def pruning_routine(self):\n pass", "def k_most_talkative(self):\n word_counts = self.get_usercounts() # {u1: 3, u2: 4, }\n word_counts_heap = [(-count, username) for username, count in word_counts.items()] # [(-4, username), (-3, username)]\n heapify(word_counts_heap) # [(-4, u2), (-3, u1)]\n counter = 0\n while word_counts_heap or counter < k:\n _, username = heappop(word_counts_heap)\n counter += 1 # 1, 2\n yield username # u2, u1", "def prune_losers(self):\n self.log.debug(\"PRUNE LOSERS\")\n # check to see if people i followed follow me back\n cutoff_time = (datetime.now()\n - timedelta(hours=self.reciprocation_window))\n ingrates = Target.objects.filter(\n hunter=self.user, status=Target.PURGATORY,\n modified__lt=cutoff_time) # They didn't follow back in time\n\n for ingrate in ingrates:\n ingrate.status = Target.INGRATE\n ingrate.save()\n self.log.debug(\" => Unfollowed %s\" % ingrate.hunted.screen_name)\n try:\n self.api.destroy_friendship(ingrate.hunted)\n except Exception, e:\n print e\n return\n finally:\n pass\n #self.contact(ingrate)", "def select_people(torrent, select_seeders=True, num=50):\n selector = torrent.seeders if select_seeders else torrent.leechers\n resp = random.sample(selector, min(num, len(selector)))\n return resp", "def _compactness_pruning(self):\n feature_phrases = [phrase for phrase in self.frequent_features if self._is_compact(phrase)]\n self.features_phrases = feature_phrases", "def remove_premature_departures(trips):\n\t# sort ascending by arrival \n\t# then iteratively remove trips not also sorted by departure\n\tstarting_length = len(trips) # for logging\n\t#\n\ttrips.sort(key = lambda x: x.arrive_ts) # arrival, first to last\n\ti = 1\n\twhile i < len(trips):\n\t\t# if departure is before that of earlier-arriving trip\n\t\tif trips[i].depart_ts <= trips[i-1].depart_ts: \n\t\t\ttrips.pop(i)\n\t\t\tcontinue\n\t\ti+=1\n\t# there should be no simultaneous departures\n\tassert len(set([t.depart_ts for t in trips])) == len(trips)", "def test_remove_learner_specific_for_coach_pt1(self):\n self.assertTrue(self.coach2.has_perm(self.AUTH_REMOVE_LEARNER, self.learner_groups[1]))", "def first_phase(players):\n\n free_players = players[:]\n while free_players:\n player = free_players.pop()\n favourite = player.get_favourite()\n\n current = favourite.matching\n if current is not None:\n favourite._unmatch()\n free_players.append(current)\n\n favourite._match(player)\n\n for successor in favourite.get_successors():\n _delete_pair(successor, favourite)\n if not successor.prefs and successor in free_players:\n free_players.remove(successor)\n\n return players", "def get_to_review(self) -> VerbsList:\n to_review = [verb for verb, stats in self.practice_list.items() if pick(stats)]\n\n if len(to_review) < self.min_to_review:\n for _ in range(self.min_to_review - len(to_review)):\n to_review.append(self.to_learn_list.pop())\n\n return to_review", "def selection(self):\n\n # Sort root Teams from best to worst\n ranked_agents = sorted(self.agent_pop, key=lambda rt : rt.team.fitness, reverse=True)\n\n # Save trainer and top agent so far\n self.save()\n if self.AGENT_NAME == \"\":\n ranked_agents[0].save(Trainer.ENV_NAME)\n else:\n ranked_agents[0].save(Trainer.AGENT_NAME)\n\n # Sanity check: There should always be R_SIZE root Teams at this point\n if len(self.agent_pop) != Trainer.R_SIZE:\n print(\"WARNING - Trainer::selection - len(self.agent_pop) != Trainer.R_SIZE\")\n print(\" len(self.agent_pop) = \", len(self.agent_pop))\n\n # Calculate the number of root Teams to retain\n num_keep = int(Trainer.PERCENT_KEEP * Trainer.R_SIZE)\n\n # Grab slice of sorted root Team references to delete\n agents_to_delete = ranked_agents[num_keep:]\n\n # Clean all root Teams in the Teams-to-delete list\n # Note: Still need to clean the population of Learners which\n # may now contain orphans and update the root Team population.\n for agent in agents_to_delete:\n\n team = agent.team\n\n # Safety and sanity check: These should ALL be root Teams\n if team.getNumReferencingLearners() != 0:\n print(\"WARNING - Trainer::selection - A non-root Team is being deleted!\")\n\n team.removeLearners()\n self.team_pop.remove(team)\n\n # Clean up orphanced Learners after Team removal\n self.cleanOrphanedLearners()", "def _remove_points(self, points_to_remove, teams_population):\n for team in teams_population:\n for point in points_to_remove:\n if point.point_id_ in team.results_per_points_:\n team.results_per_points_.pop(point.point_id_)", "def remove_pruned_supersets(supersets, max_non_deps):\n for n in supersets[:]:\n if max_non_deps.contains_subset(n.attrs):\n supersets.remove(n)", "def prune_heads(self, heads_to_prune):\n base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed\n base_model._prune_heads(heads_to_prune)", "def prune_conformers(self, param={'M':'cml1', 'rp':1.0,'thresh':0.25,'wz':F,'sort':T}):\n if param['M'] in ['rmsd']:\n ds = self.get_rmsd()\n elif param['M'] in ['cm','cml1']:\n ds = self.get_dcm(param)\n else:\n raise '#ERROR: unknow rep'\n #print ' ++ ds = ', ds\n #print ' |__ es = ', np.array(self.es)\n seq = np.argsort(self.es) # sort by increasing energy\n ccids = []\n for i in seq:\n # always keep lowest-energy conformer\n if len(ccids) == 0:\n ccids.append(i)\n continue\n\n # discard conformers within the RMSD threshold\n if np.all(ds[i][ccids] >= thresh):\n ccids.append(i)\n self.nconf = len(ccids)\n # creat a new mol object with unique conformers\n new = Chem.Mol(self.mol)\n new.RemoveAllConformers()\n for i in ccids:\n ci = self.mol.GetConformer(i)\n new.AddConformer(ci, assignId=True)\n self.mol = new", "def trim(self, ratio=10000):\n trimmed, total = 0, 0\n for sources in self.sources():\n for s in (self.tp_by_source_and_text[sources],\n self.fp_by_source_and_text[sources],\n self.fn_by_source_and_text[sources],\n self.overlap_by_source_and_text[sources]):\n try:\n max_count = s.most_common(1)[0][1]\n except IndexError:\n continue\n for k, v in list(s.items()):\n if v * ratio < max_count:\n trimmed += 1\n del s[k]\n total += 1\n print(f'trimmed {trimmed}/{total} ({trimmed/total:.1%})',\n file=sys.stderr, flush=True)", "def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"teachers\"] = Teacher.objects.filter(grade__isnull=True)\n return context", "def getMembersForReproduction(self, numMembers, pickProb):\n\t\tselectedMembers = []\n\t\tmembersWithErrorsModifiable = list(self.membersWithErrors)\n\t\twhile len(selectedMembers) < numMembers:\n\t\t\tindexSelected = 0\n\t\t\twhile rnd.randint(0, 100) > int(pickProb * 100) and indexSelected != len(membersWithErrorsModifiable) - 1:\n\t\t\t\tindexSelected += 1\n\t\t\tmemberWithErrorSelected = membersWithErrorsModifiable[indexSelected]\n\t\t\tif memberWithErrorSelected[0] not in selectedMembers:\n\t\t\t\tselectedMembers.append(memberWithErrorSelected[0])\n\t\t\t\tmembersWithErrorsModifiable.remove(memberWithErrorSelected)\n\t\treturn selectedMembers", "def reduce_class_size(ratio, class_size, N_classes, G, student_schedule, \n\t\t\t\t\t copy=False):\n\tif copy:\n\t\tG = G.copy()\n\t\tstudent_schedule = student_schedule.copy()\n\t\n\tN_remove = round(ratio * class_size)\n\n\t# link types that are affected by students not being present at school\n\taffected_links = ['student_student_intra_class', \n\t\t\t\t\t 'student_student_table_neighbour',\n\t\t\t\t\t 'student_student_daycare',\n\t\t\t\t\t 'teaching_teacher_student',\n\t\t\t\t\t 'daycare_supervision_teacher_student']\n\n\tfor wd in range(1, 6):\n\t\tfor c in range(1, N_classes + 1):\n\t\t\tstudent_nodes = student_schedule[student_schedule['hour_1'] == c]\\\n\t\t\t\t\t.loc[wd].index\n\t\t\t# pick a number of students from every class and remove them\n\t\t\tstudents_to_remove = np.random.choice(student_nodes, N_remove, \\\n\t\t\t\treplace=False)\n\n\t\t\t## remove edges from the graph\n\t\t\t# find all edges on the given weekday in which at least one student\n\t\t\t# from the list of students to remove is involved. Only edges with a\n\t\t\t# link type that is affected by the absence from school are selected \n\t\t\t# (i.e. no family or friendship contacts)\n\t\t\tedges_to_remove = [(u, v, k) for u, v, k, data in \\\n\t\t\tG.edges(keys=True, data=True) if data['link_type'] in \\\n\t\t\taffected_links and data['weekday'] == wd and \\\n\t\t\t(u in students_to_remove or v in students_to_remove)]\n\t\t\t# remove affected edges from the graph\n\t\t\tfor e in edges_to_remove:\n\t\t\t\tG.remove_edge(e[0], e[1], key=e[2])\n\t\n\t\t\t## remove entries in the student schedule at the corresponding days\n\t\n\t\t\t# set all entries for students on the given weekday to nan in the \n\t\t\t# student schedule\n\t\t\tfor s in students_to_remove:\n\t\t\t\tfor hour in range(1, 10):\n\t\t\t\t\tstudent_schedule.loc[wd, s]['hour_{}'.format(hour)] = pd.NA\n\t\t\t\t\t\t\t\t\t\n\tif copy:\n\t\treturn G, student_schedule", "def reject_fairness(experiment):\n num_heads = len([flip for flip in experiment if flip])\n return num_heads < 469 or num_heads > 531", "def reject_fairness(experiment):\n num_heads = len([flip for flip in experiment if flip])\n return num_heads < 469 or num_heads > 531", "def reject_fairness(experiment):\n num_heads = len([flip for flip in experiment if flip])\n return num_heads < 469 or num_heads > 531", "def reject_fairness(experiment):\n num_heads = len([flip for flip in experiment if flip])\n return num_heads < 468 or num_heads > 531", "def delete_small_trajectories(trajectories, best_parameters):\n print('Filtering small trajectories...', end = ' ')\n size = best_parameters['min_size']\n pop_ind =[]\n for k, trajectory in enumerate(trajectories):\n traj = vis.get_points(trajectory)\n if len(np.unique(traj, axis = 0))<=size:\n pop_ind.append(k)\n for index in sorted(pop_ind, reverse = True):\n del trajectories[index]\n print('Done.')", "def reset(self):\n self.epochs = 0\n self.num_classes = 2 # Minimum of 2 classes\n self._random_state = check_random_state(self.random_state)\n if self.base_estimators:\n self.experts = [\n self.WeightedExpert(\n cp.deepcopy(be), 1, self.labels)\n for be in self.base_estimators\n ]\n else:\n self.experts = [\n self._construct_new_expert()\n ]", "def trim_vocab(n: int, vocab: Dict[str, int]) -> None:\r\n pair_counts = sorted(vocab.items(), key=lambda p: -p[1])\r\n pairs_to_trim = [pair for pair, count in pair_counts[n:]]\r\n for pair in pairs_to_trim:\r\n del vocab[pair]", "def get_teacher(self, **fields):\n existing_fields = [i.name for i in self._db.get_columns('teachers')]\n teacher_fields = {}\n for key, value in fields.items():\n if key in existing_fields:\n teacher_fields[key] = value\n teachers = [i for i in Teachers.select().filter(**teacher_fields)]\n # Expect single value if search by unique fields, list if by non-unique\n return teachers if len(teachers) > 1 else teachers[0] if len(teachers) == 1 else None", "def prune(tree, testSet, res, technique):\n assert technique in [\"reduced_error\"]\n if technique == \"reduced_error\":\n tbSet = testSet[testSet[tree.col] >= tree.value] #find which test observations belong to this tree's true branch\n fbSet = testSet[testSet[tree.col] < tree.value] #find which test observations belong to this tree's false branch\n \n if tree.tb.results is None: #Check if the true branch of this sub-tree is a leaf\n ptb = prune(tree.tb, tbSet, res, technique) #If not, recursively travel down the true branch and prune it.\n else:\n ptb = tree.tb #If the true branch is a leaf, then the true branch has--in essence--already been pruned.\n if tree.fb.results is None: #Check if the false branch of this sub-tree is a leaf\n pfb = prune(tree.fb, fbSet, res, technique) #If not, recursively travel down the false branch and prune it.\n else:\n pfb = tree.fb #If the false branch is a leaf, then the false branch has--in essence--already been pruned.\n \n #Sum the number of misclassifications of the test data at each of the leaves of this node\n wrong_in_leaves = __deep_count_errors(ptb, tbSet, res) + __deep_count_errors(pfb, fbSet, res)\n \n #Count the number of misclassificationsof the test data that would occur if this node were treated as a leaf\n wrong_at_node = __count_errors(tree, testSet, res)\n \n #Assess whether or not treating the node as a leaf improves the accuracy on the test set\n if wrong_at_node <= wrong_in_leaves: \n #NOTE:The following line of code seems slightly redundant since count_errors(tree, testSet, res) had to call \n #__get_results(tree). I should set up some way to save the output of that function call instead of calling it twice.\n return decisionNode(results = __get_results(tree)) #If so, return a decisionNode where the node is a leaf\n else:\n #If not, return a decisionNode where the node splits on the same column and value as before, but the \n #true and false branches are the pruned-versions of the original true and false branches. See above for\n #definition of ptb and pfb\n return decisionNode(col = tree.col, value = tree.value, tb = ptb, fb = pfb)", "def test_remove_learner_specific_for_coach_pt2(self):\n self.assertFalse(self.coach1.has_perm(self.AUTH_REMOVE_LEARNER, self.learner_groups[1]))", "def cleanUp(self):\n\n tapeList = sorted(glob.glob('TAPE?'))\n tapeList = ['TAPE%d' % num for num in [1, 2, 5, 6, 7, 10]]\n for tape in tapeList:\n if os.path.isfile(tape): os.remove(tape)\n # end TAPE loop", "def agents_cleanup(agents, n) -> set:\n return set(agent for agent in agents if agent[0] < n and agent[1] < n)", "def _normalise(self):\n if len(self.user_skip) == 0:\n return\n m = min(self.user_skip.values())\n for x in [k for k in self.user_skip]:\n self.user_skip[x] -= m\n if self.user_skip[x] == 0 and not self.user_queue[x]:\n self._purge_user(x)\n if not self.user_order:\n self.nonempty.clear()", "def pop_krakens(cls):\n pass", "def get_N_teachers(school_type, N_classes):\n\tteachers = {\n\t\t'primary':N_classes + int(N_classes / 2),\n\t\t'primary_dc':N_classes * 2,\n\t\t'lower_secondary':int(N_classes * 2.5),\n\t\t'lower_secondary_dc':N_classes * 3,\n\t\t'upper_secondary':int(N_classes * 2.85),\n\t\t'secondary':int(N_classes * 2.5),\n\t\t'secondary_dc':int(N_classes * 2.5)\n\t}\n\treturn teachers[school_type]", "def straight_prune_subsample(neuron, number_of_nodes):\n if(neuron.n_node > 200):\n neuron, distance = straight_subsample_with_fixed_number(neuron, 200)\n sp_neuron, state = prune(neuron=neuron,\n threshold=2*distance,\n lowest_number=number_of_nodes)\n while(~state):\n distance += 1\n sp_neuron = straigh_subsample(neuron, distance)\n sp_neuron, state = prune(neuron=sp_neuron,\n threshold=2*distance,\n lowest_number=number_of_nodes)\n return sp_neuron", "def prune(self, rel=None):\n deleted = self._registry._get_not_reachable(self.root, rel=rel)\n for d in deleted:\n self._delete_cuds_triples(d)" ]
[ "0.5696694", "0.545549", "0.54449904", "0.5432693", "0.52488464", "0.5203805", "0.5048298", "0.50102764", "0.493123", "0.49195728", "0.48889333", "0.48790887", "0.4860135", "0.48572624", "0.48336893", "0.48332903", "0.4776946", "0.47586012", "0.47427273", "0.47353852", "0.47345516", "0.47345436", "0.4726536", "0.46756825", "0.46719763", "0.46627593", "0.46603566", "0.4659124", "0.46532997", "0.4649696", "0.46484143", "0.46233067", "0.46178305", "0.46160367", "0.45931882", "0.45848283", "0.45848283", "0.45848283", "0.45828706", "0.45763105", "0.45720527", "0.456591", "0.45650417", "0.45636752", "0.45499948", "0.45364475", "0.4534927", "0.4527715", "0.4520064", "0.451617", "0.45153177", "0.45110956", "0.45040894", "0.4496145", "0.44913995", "0.44843227", "0.44806832", "0.44722754", "0.44650915", "0.4465083", "0.44646874", "0.44614813", "0.44579953", "0.44541898", "0.44501442", "0.44464856", "0.44399077", "0.4436096", "0.44324774", "0.44224966", "0.44174382", "0.44155967", "0.44127753", "0.44062304", "0.439987", "0.43987185", "0.43986547", "0.4391068", "0.43898523", "0.4383399", "0.43755078", "0.4372566", "0.43508196", "0.43417758", "0.43417758", "0.43417758", "0.43346128", "0.4333685", "0.4328661", "0.43270615", "0.4326436", "0.43243933", "0.43135744", "0.4312008", "0.43106455", "0.42990547", "0.42919475", "0.42898074", "0.42873552", "0.4286367" ]
0.7730154
0
Mirror a point over the diagonal of the map
def __mirror(self, x, y): return (self.width - x - 1, self.height - y - 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mirror_point_point(point, mirror):\n return add_vectors(mirror, subtract_vectors(mirror, point))", "def mirror(self, p1=(0, 1), p2=(0, 0)):\n self.position = _reflect_points(self.position, p1, p2)\n return self", "def op_mirror():\n mir = np.array([[1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, -1, 0],\n [0, 0, 0, -1]])\n return mir", "def horizontalMirror(self, mirror):\n mirror_x = self.x\n distToMir = self.y - mirror\n mirror_y = self.y - (2 * distToMir)\n return Point(mirror_x, mirror_y)", "def horizontalMirror(self, mirror):\n mirror_x = self.x\n distToMir = self.y - mirror\n mirror_y = self.y - (2 * distToMir)\n return Point(mirror_x, mirror_y)", "def mirror(self, about_x=False, about_y=False, about_axis=False):\n if about_axis:\n c = (0, 0)\n elif about_axis is False:\n c = self.center()\n\n if about_y:\n x_new = []\n for p in self.x:\n x_new.append(c[0] + (c[0] - p))\n else:\n x_new = self.x\n\n if about_x:\n y_new = []\n for p in self.y:\n y_new.append(c[1] + (c[1] - p))\n else:\n y_new = self.y\n\n return Route(x_new, y_new, z=self.z)", "def mirrored( self ):\n return self._modifier(\n self,\n lambda x: mirror_bits( x, self.nr_of_pins )\n )", "def mirror(self, p1=(0, 1), p2=(0, 0)):\n if hasattr(p1, \"center\"):\n p1 = p1.center\n if hasattr(p2, \"center\"):\n p2 = p2.center\n p1 = np.array(p1)\n p2 = np.array(p2)\n # Translate so reflection axis passes through origin\n self.origin = self.origin - p1\n\n # Rotate so reflection axis aligns with x-axis\n angle = np.arctan2((p2[1] - p1[1]), (p2[0] - p1[0])) * 180 / pi\n self.origin = _rotate_points(self.origin, angle=-angle, center=[0, 0])\n self.rotation -= angle\n\n # Reflect across x-axis\n self.x_reflection = not self.x_reflection\n self.origin[1] = -self.origin[1]\n self.rotation = -self.rotation\n\n # Un-rotate and un-translate\n self.origin = _rotate_points(self.origin, angle=angle, center=[0, 0])\n self.rotation += angle\n self.origin = self.origin + p1\n\n if self.owner is not None:\n self.owner._bb_valid = False\n return self", "def mirror_feature(self, x=None, y=None):\n\n #mirrored = Feature()\n mirrored = copy.deepcopy(self)\n if not x == None:\n for i in range(mirrored.feature_len()):\n mirrored.coord[i] = np.array([[2*x-c[0],c[1]] for c in mirrored.coord[i]])\n if not y == None:\n for i in range(mirrored.feature_len()):\n mirrored.coord[i] = np.array([[c[0],2*y-c[1]] for c in mirrored.coord[i]]) \n return mirrored", "def new_mirror(self,alongx,alongy):\n Knew = K.clone()\n if alongx:\n Knew[0,2] = size[0]-Knew[0,2]\n if alongy:\n Knew[1,2] = size[1]-Knew[1,2]\n return CameraInfo(self.size,Knew,self.dist)", "def verticalMirror(self, mirror):\n mirror_y = self.y\n distToMir = self.x - mirror\n mirror_x = self.x - (2 * distToMir)\n return Point(mirror_x, mirror_y)", "def verticalMirror(self, mirror):\n mirror_y = self.y\n distToMir = self.x - mirror\n mirror_x = self.x - (2 * distToMir)\n return Point(mirror_x, mirror_y)", "def mirror(img):\n return img[:, ::-1]", "def mirror_points_point(points, mirror):\n return [mirror_point_point(point, mirror) for point in points]", "def mirror(self, p1=(0, 1), p2=(0, 0)):\n for n, points in enumerate(self.polygons):\n self.polygons[n] = _reflect_points(points, p1, p2)\n if self.parent is not None:\n self.parent._bb_valid = False\n return self", "def reverse(self, lon, lat):", "def mirror_point_to_plane(point, plane):\n assert isinstance(plane, cg3d_plane.CGPlane)\n pn, norm = plane.get_point_and_normal()\n norm.normalize()\n return point - 2.0 * ((point - pn) * norm) * norm", "def mirror(f, j=0):\n return f.per(dmp_mirror_in(f.rep, j, f.lev, f.dom))", "def draw_mirrored_cell(self, pos, color):\n self.hexes.set_cells(helpfunc.mirror_hexes(pos), color)", "def mirror(self):\n newRot = [x for x in self.rot]\n newRot[1] = -newRot[1]\n newLRot = [x for x in self.lrot]\n newLRot[1] = -newLRot[1]\n newLRot[2] = -newLRot[2]\n return Wall(self.beat, self.dur, -self.r, -self.l, self.d, self.u, newRot, newLRot)", "def panTo(self, p=None):\n if p == None:\n p = self.focus\n MV = self.MV\n vr = self.getViewRight()\n vu = self.getViewUp()\n p = -p\n x = np.dot(p, vr) # dot product\n y = np.dot(p, vu)\n MV[3, :2] = x, y # set first two entries of 4th row to x, y\n self.MV = MV", "def panTo(self, p=None):\n if p == None:\n p = self.focus\n MV = self.MV\n vr = self.getViewRight()\n vu = self.getViewUp()\n p = -p\n x = np.dot(p, vr) # dot product\n y = np.dot(p, vu)\n MV[3, :2] = x, y # set first two entries of 4th row to x, y\n self.MV = MV", "def transform_matrix_offset_center(matrix, x, y):\n o_x = float(x) / 2 + 0.5\n o_y = float(y) / 2 + 0.5\n offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])\n reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])\n transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)\n return transform_matrix", "def transform_matrix_offset_center(matrix, y, x):\n o_x = (x - 1) / 2.0\n o_y = (y - 1) / 2.0\n offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])\n reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])\n transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)\n return transform_matrix", "def distort_point(point):\n fx, fy = _camera_tuned_matrix[0][0], _camera_tuned_matrix[1][1]\n cx, cy = _camera_tuned_matrix[0][2], _camera_tuned_matrix[1][2]\n x, y = (point.x - cx) / fx, (point.y - cy) / fy\n\n k1, k2, p1, p2, k3 = _camera_distortion[0]\n r2 = x ** 2 + y ** 2\n r4 = r2 * r2\n r6 = r2 * r4\n x = x * (1 + k1 * r2 + k2 * r4 + k3 * r6) + 2 * p1 * x * y + p2 * (r2 + 2 * x * x)\n y = y * (1 + k1 * r2 + k2 * r4 + k3 * r6) + p1 * (r2 + 2 * y * y) + 2 * p2 * x * y\n\n fx2, fy2 = _camera_matrix[0][0], _camera_matrix[1][1]\n cx2, cy2 = _camera_matrix[0][2], _camera_matrix[1][2]\n x2 = x * fx2 + cx2\n y2 = y * fy2 + cy2\n return ge.Point(x2, y2)", "def mirror(self):\n self.__mirror = not self.__mirror", "def un_distort_point(point):\n points = np.array([[(point.x, point.y)]], np.float32)\n temp = cv2.undistortPoints(points, _camera_matrix, _camera_distortion)\n fx, fy = _camera_tuned_matrix[0][0], _camera_tuned_matrix[1][1]\n cx, cy = _camera_tuned_matrix[0][2], _camera_tuned_matrix[1][2]\n x = temp[0][0][0] * fx + cx\n y = temp[0][0][1] * fy + cy\n return ge.Point(x, y)", "def mirrorTour(tour):\n M0 = reflection_matrix((0,0,0), (1,0,0))\n mvps = []\n for vp in tour:\n p = vp.camera.position\n q = vp.camera.orientation\n print(p,q)\n mp = np.array((p.x,p.y,p.z,1.0))*np.matrix(M0)\n mp = np.squeeze(np.asarray(mp))\n # mq = quaternion_matrix((q.x,q.y,q.z,q.w))*np.matrix(M0)\n # mq = quaternion_from_matrix(mq)\n mq = (q.x,-q.y,-q.z,q.w)\n # print(mp,mq)\n mvp = ViewPoint(vp.id+10000,mp[0],mp[1],mp[2],mq[0],mq[1],mq[2],mq[3])\n mvps.append(mvp)\n return mvps", "def shift_point_wline(mutated_genome,index):\n Xval = random.randint(-int(imagewidth*0.1),int(imagewidth*0.1))\n Yval = random.randint(-int(imageheight*0.1),int(imageheight*0.1))\n point_index = random.randint(1,max(1,len(mutated_genome[index][2])-1))\n point = mutated_genome[index][2][point_index]\n newpoint = (point[0]+Xval,point[1]+Yval)\n mutated_genome[index][2][point_index] = newpoint", "def setAsMirror(self, mirrorNp):\r\n mirrorNp.setTexture( self.mirrorBuffer.getTexture(), 1 )", "def recenter(self, point=(0, 0)):\n self.center = Point(*point)", "def reflect_ghost(self, p0):\n # Instead of self.p1, one could take any point on the line p1--p2.\n dist = self.p1 - p0\n alpha = numpy.einsum(\"ij, ij->i\", dist, self.mirror_edge)\n # q is sits at the perpendicular intersection of the reflection\n q = dist - (alpha / self.beta)[:, None] * self.mirror_edge\n return p0 + 2 * q", "def mirror(self):\n \n screen_width = self.game.screen.get_width()\n screen_height = self.game.screen.get_height()\n\n if self.rect.centerx < 0 and self.vx < 0:\n self.rect.centerx = screen_width\n if self.rect.centerx > screen_width:\n self.rect.centerx = 0\n if self.rect.centery < 0 and self.vy < 0:\n self.rect.centery = 0\n if self.rect.centery > screen_height and self.vy > 0:\n self.rect.centery = screen_height", "def rotate(self):\n\n self.pins = self.pins[1:] + list(self.pins[0])\n self.mapping = self.mapping[1:] + list(self.mapping[0])", "def RearangePoinAnnotation(point,PaddZAxis=0,PaddYAxis=0,PaddXAxis=0):\n point[:,0] = point[:,0] ^ point[:,2]\n point[:,2] = point[:,2] ^ point[:,0]\n point[:,0] = point[:,0] ^ point[:,2]\n if PaddZAxis!=0:\n point[:,0]+=PaddZAxis\n if PaddYAxis!=0:\n point[:,1]+=PaddYAxis\n if PaddXAxis!=0:\n point[:,2]+=PaddXAxis\n return(point)", "def swap_xy(tensor: tf.Tensor) -> tf.Tensor:\n return moveaxis(tensor, -2, -1)", "def zoom(self, dr):\n d = self.getDistance()\n vn = self.getViewNormal()\n vn *= dr*d\n GL.glTranslate(vn[0], vn[1], vn[2])", "def diagonalAtPoint(self, point=..., idx:int=...):\n point, idx = self.resolvePoint(point=point, idx=idx)\n print(idx, type(idx))\n return point.lineToPoint(self.vertices[idx-2])", "def inv_projmap(self, img, nside=None):\n pass", "def rotate(self, matrix):\n n = len(matrix)\n #转置\n for i in range(n):\n for j in range(i+1,n):\n matrix[i][j],matrix[j][i] = matrix[j][i],matrix[i][j]\n #镜像\n mid = n//2\n for i in range(n):\n for j in range(mid):\n matrix[i][j],matrix[i][n-j-1] = matrix[i][n-j-1],matrix[i][j]", "def mirror_matrix(theta):\n c = np.cos(theta)\n s = np.sin(theta)\n return np.array([[c**2-s**2, 2*c*s],\n [2*c*s, s**2-c**2]])", "def reproject(point):\n wgs84 = pyproj.Proj('+init=epsg:4326')\n native = pyproj.Proj(DEM_PROJECTION)\n x, y = pyproj.transform(wgs84, native, point.x, point.y)\n return geom.Point(x, y)", "def zoom(self, dr):\n d = self.getDistance()\n vn = self.getViewNormal()\n vn *= dr*d\n GL.glTranslatef(vn[0], vn[1], vn[2])", "def mirror_gun (glider_gun):\n return glider_gun[2] (-1, 0, swap_xy)", "def diagonal_across(self, current_state):\n # Find the index of the '0' tile\n index = current_state.index('0')\n tile_that_was_swapped = '0'\n\n # If the current state can't perform the 'diagonal' action, then exit the function\n if not self.can_move_diagonally(index):\n return 0, tile_that_was_swapped\n\n # Else, we can move the '0' tile to the other side of the grid\n else:\n # We need to find the diagonal-across position of the '0' tile\n index_to_swap = index\n\n # If the '0' tile is in the top-left corner, then we need to switch it with the bottom-right corner\n if index == 0:\n index_to_swap = self.puzzle_length - 1\n\n # If the '0' tile is in the top-right corner, then we need to switch it with the bottom-left corner\n elif index == self.puzzle_width - 1:\n index_to_swap = self.puzzle_length - self.puzzle_width\n\n # If the '0' tile is in the bottom-left corner, then we need to switch it with the top-right corner\n elif index == self.puzzle_length - self.puzzle_width:\n index_to_swap = self.puzzle_width - 1\n\n # If the '0' tile is in the bottom-right corner, then we need to switch it with the top-left corner\n elif index == self.puzzle_length - 1:\n index_to_swap = 0\n\n # Swap the '0' tile with the other index\n tile_that_was_swapped = current_state[index_to_swap]\n current_state[index] = current_state[index_to_swap]\n current_state[index_to_swap] = '0'\n # end: if-else\n\n return self.cost_of_diagonal_across, tile_that_was_swapped", "def mirror(self):\n for l in range(self.numRows):\n mirrorGrid = []\n for l in range(self.numCols):\n row = self.grid[l][::-1]\n mirrorGrid.append(row)\n return mirrorGrid", "def move_to_origin(x):\n # Correct x so it is centered at (0,0)\n tx = np.mean(x[:no_points, :])\n ty = np.mean(x[no_points:, :])\n x[:no_points, :] = (x[:no_points, :] - tx)\n x[no_points:, :] = (x[no_points:, :] - ty)\n return x, tx, ty", "def rotate(self, m):\n n = len(m)\n for i in range(n//2):\n for j in range(i,n-i-1):\n m[j][~i],m[~i][~j],m[~j][i],m[i][j] = \\\n m[i][j],m[j][~i],m[~i][~j],m[~j][i]", "def mirror_horiz(pixels):\n copy = blank_image(len(pixels), len(pixels[0])) \n for r in range(len(pixels)):\n for c in range(len(pixels[0])):\n copy[r][c] = pixels[r][c]\n a = len(pixels[0])//2\n for r in range(len(pixels)):\n for c in range(a, len(pixels[0])):\n copy[r][c] = copy[r][-c-1]\n return copy", "def mirror_augmentation():\n return lambda image: ImageOps.mirror(image)", "def lift(point):\n return gs.copy(point)", "def inverse_coord_shift(kalman_coord):\n x_c, y_c, aspect_ratio, height = kalman_coord[:4]\n width = height / aspect_ratio\n x_min = x_c - width / 2\n x_max = x_c + width / 2\n y_min = y_c - height / 2\n y_max = y_c + height / 2\n std_coord = np.array([x_min, y_min, x_max, y_max])\n return np.int_(np.round(std_coord))", "def move_diagonal(self):\r\n if self.movement == \"diagonal\" and self.flag_move:\r\n self.move_ball()\r\n self.canvas.after(50, self.move_diagonal)", "def rotate(self, matrix: List[List[int]]) -> None:\r\n # Vertical Mirror\r\n for i in range(len(matrix)):\r\n for j in range(floor(len(matrix)/2)):\r\n t = matrix[i][j]\r\n matrix[i][j] = matrix[i][len(matrix)-1-j]\r\n matrix[i][len(matrix)-1-j] = t\r\n \r\n # Top right to bottom left diagonal mirror\r\n for i in range(len(matrix)):\r\n for j in range(len(matrix)):\r\n if i < len(matrix)-1-j:\r\n t = matrix[i][j]\r\n matrix[i][j] = matrix[len(matrix)-j-1][len(matrix)-1-i]\r\n matrix[len(matrix)-j-1][len(matrix)-1-i] = t", "def mirrorImage(self):\n\n im = Image.open(self.ActivePhoto)\n out = im.transpose(PIL.Image.FLIP_LEFT_RIGHT)\n out.save(self.ActivePhoto)\n self.photo.setPixmap(QtGui.QPixmap(self.ActivePhoto))\n print (\"Flipped image\")", "def mirror(to='IN'):\n to = to.strip().upper()\n if (to==\"OUT\"):\n opticalcoupler.MoveMirror(0)\n logger.info('Mirror: '+to)\n camera.status.mirror = 'OUT'\n elif (to==\"IN\"):\n opticalcoupler.MoveMirror(opticalcoupler.MIRROR_IN)\n logger.info('Mirror: '+to)\n camera.status.mirror = 'IN'\n else:\n logger.error(\"Usage: 'mirror IN' or 'mirror OUT'\")", "def putDiagonal(self, vector):\n if type(vector) in [type(1), type(1.)]:\n ids = numerix.arange(self._shape[0])\n tmp = numerix.zeros((self._shape[0],), 'd')\n tmp[:] = vector\n self.put(tmp, ids, ids)\n else:\n ids = numerix.arange(len(vector))\n self.put(vector, ids, ids)", "def mirror_y(board):\n new_board = board[:]\n new_board.reverse()\n return new_board", "def diag_line((lat0, lon0, alt0), (lat, lon, alt), k=5):\n\tlats = np.linspace(lat0, lat, k)\n\tlons = np.linspace(lon0, lon, k)\n\talts = np.linspace(alt0, alt, k)\n\tp = zip(lats, lons, alts)\n\treturn p", "def pinhole_projection_image_to_world(uv, z, K):\n\n u_v_1 = np.array([uv[0], uv[1], 1])\n pos = z * np.matmul(inv(K),u_v_1)\n return pos", "def reflect(self):\n self.vertices[-1, :] = self.reflected", "def steerright(self):\n self.direction = self.direction-self.steering\n if self.direction < 0:\n self.direction = 360-90\n self.image, self.rect = rot_center(self.image_orig,self.rect,self.direction)", "def diagonal(t, x, y):\n from math import atan2, sqrt, pi\n angle = atan2(y, x) * 180 / pi\n dist = sqrt(x**2 + y**2)\n lt(t, angle)\n fdbk(t, dist)\n rt(t, angle)", "def pad(self, nxp, nyp):\n assert (nxp > self.nx)\n assert (nyp > self.ny)\n assert (np.mod(nxp - self.nx, 2) == 0)\n assert (np.mod(nyp - self.ny, 2) == 0)\n\n ret = rmap(nx=nxp, dx=self.dx, ny=nyp, dy=self.dy)\n ret.map[(nyp - self.ny) / 2:(nyp + self.ny) / 2, (nxp - self.nx) / 2:(\n nxp + self.nx) / 2] = self.map\n return ret", "def diagonal_adjacent(self, current_state):\n # Find the index of the '0' tile\n index = current_state.index('0')\n tile_that_was_swapped = '0'\n\n # If the current state can't perform the 'diagonal' action, then exit the function\n if not self.can_move_diagonally(index):\n return 0, tile_that_was_swapped\n\n # Else, we can move the '0' tile to the other side of the grid\n else:\n # We need to find the adjacent-diagonal position of the '0' tile\n index_to_swap = index\n\n # If the '0' tile is in the top-left corner, then we need to switch it with the tile +1 row and +1 column\n if index == 0:\n index_to_swap = index + self.puzzle_width + 1\n\n # If the '0' tile is in the top-right corner, then we need to switch it with the tile +1 row and -1 column\n elif index == self.puzzle_width - 1:\n index_to_swap = index + self.puzzle_width - 1\n\n # If the tile is in the bottom-left corner, then we need to switch with the tile -1 row and +1 column\n elif index == self.puzzle_length - self.puzzle_width:\n index_to_swap = index - self.puzzle_width + 1\n\n # If the tile is in the bottom-right corner, then we need to switch with the tile -1 row and -1 column\n elif index == self.puzzle_length - 1:\n index_to_swap = index - self.puzzle_width - 1\n\n # Swap the '0' tile with the other index\n tile_that_was_swapped = current_state[index_to_swap]\n current_state[index] = current_state[index_to_swap]\n current_state[index_to_swap] = '0'\n # end: if-else\n\n return self.cost_of_diagonal_adjacent, tile_that_was_swapped", "def change_map_down(self):\n self.change_map(self.current_map_idx + 1)", "def rotate(self, matrix):\n newCoord = np.zeros(self.coord.shape)\n newCoord[:,0]=matrix[0,0]+matrix[0,1]*self.coord[:,0]+matrix[0,2]*self.coord[:,1]+matrix[0,3]*self.coord[:,2]\n newCoord[:,1]=matrix[1,0]+matrix[1,1]*self.coord[:,0]+matrix[1,2]*self.coord[:,1]+matrix[1,3]*self.coord[:,2]\n newCoord[:,2]=matrix[2,0]+matrix[2,1]*self.coord[:,0]+matrix[2,2]*self.coord[:,1]+matrix[2,3]*self.coord[:,2]\n self.coord = deepcopy(newCoord)", "def mirror_x(board):\n new_board = board[:]\n for (ix, item) in enumerate(board):\n new_board[ix] = (len(board) - 1) - item\n\n return new_board", "def clearCross(self, coord, lmap):\n for n in range(-3, 3):\n color = self.colorMap(lmap.getCell((coord[0] + n, coord[1] + n)))\n self._drawPoint(color, (coord[0] + n, coord[1] + n))\n color = self.colorMap(lmap.getCell((coord[0] + n, coord[1] - n)))\n self._drawPoint(color, (coord[0] + n, coord[1] - n))\n self.zoomMap(self.scale)", "def shift_point(mutated_genome,index):\n Xval = random.randint(-int(imagewidth*0.1),int(imagewidth*0.1))\n Yval = random.randint(-int(imageheight*0.1),int(imageheight*0.1))\n point_index = random.randint(0,max(0,len(mutated_genome[index][2])-1))\n point = mutated_genome[index][2][point_index]\n newpoint = (point[0]+Xval,point[1]+Yval)\n mutated_genome[index][2][point_index] = newpoint", "def add_on_land_position(self) -> Point2:\n return self.position.offset(Point2((-2.5, 0.5)))", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix) # 行\n\n # 以x=y为轴翻转\n # [[1,2,3],\n # [4,5,6],\n # [7,8,9]]\n # 变为\n # [1 4 7]\n # [2 5 8]\n # [3 6 9]\n for i in range(n):\n for j in range(i, n):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n\n # 以中点为轴翻转\n for i in range(n):\n for j in range(n // 2):\n matrix[i][j], matrix[i][n - j - 1] = matrix[i][n - j - 1], \\\n matrix[i][j]\n\n # 非原地修改写法,先上下翻转,再以x=y为轴复制对应数字\n # n = len(matrix)\n # r = list(zip(*matrix[::-1]))\n # for i in range(n):\n # for j in range(n):\n # matrix[i][j] = r[i][j]", "def _maping(x,y,l,a):\n newx = (x**2 *(l* ((x**2 + y**2)**(a/2) - 1) + 2) - l * y**2 *((x**2 + y**2)**(a/2) - 1))/(x**2 + y**2) \n newy = (2 * x* y *(l* ((x**2 + y**2)**(a/2) - 1) + 1))/(x**2 + y**2)\n return newx, newy", "def mirror_positions(positions, domain, mirror):\n for d in range(3):\n if mirror[d]:\n mirrored_xs_grid = positions.copy()\n mirrored_xs_grid[:, :, :, d] = (domain[2 * d + 1] - domain[2 * d]) + positions[:, :, :, d]\n positions = np.concatenate((positions, mirrored_xs_grid), axis=d)\n return positions", "def mirror_densities(densities, mirror):\n nx, ny, nz = densities.shape\n if mirror[0]:\n densities = np.concatenate(\n (densities, densities[range(nx - 1, -1, -1), :, :]), axis=0)\n if mirror[1]:\n densities = np.concatenate(\n (densities, densities[:, range(ny - 1, -1, -1), :]), axis=1)\n if mirror[2]:\n densities = np.concatenate(\n (densities, densities[:, :, range(nz - 1, -1, -1)]), axis=2)\n return densities", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n for l in range(n // 2):\n r = n - 1 - l\n for p in range(l, r):\n q = n - 1 - p\n cache = matrix[l][p]\n matrix[l][p] = matrix[q][l]\n matrix[q][l] = matrix[r][q]\n matrix[r][q] = matrix[p][r]\n matrix[p][r] = cache", "def in_place_offset(self, offset):\n self.p += offset * self.cross_z.normalized()", "def __invert__(self) -> PointType:\n return Point(~self.x, ~self.y)", "def cw_rotate(self):\n self.grid = [list(x) for x in zip(*self.grid[::-1])]\n self.find_edges()", "def implicit_matrix_1d(xsize, centervalue, neighborvalue):\n matrix1d = np.zeros([xsize, xsize])\n np.fill_diagonal(matrix1d, centervalue)\n matrix1d = np.roll(matrix1d, 1, 0)\n np.fill_diagonal(matrix1d, neighborvalue)\n matrix1d = np.roll(matrix1d, -2, 0)\n np.fill_diagonal(matrix1d, neighborvalue)\n matrix1d = np.roll(matrix1d, 1, 0)\n return matrix1d", "def rotater(self, direction):\n if self.center:\n mapDel(self, theFallen)\n rotate(self, direction)\n mapAdd(self, theFallen)", "def mrotate(self):\n result_matrix = [[0 for col in range(len(self.matrix[0]))] for row in range(len(self.matrix))]\n for i in range(len(self.matrix)):\n for j in range(len(self.matrix[0])):\n result_matrix[i][j] = self.matrix[i][len(self.matrix[0]) - 1 - j]\n # left turn -> result_matrix[i][j] = self.matrix[len(self.matrix) - 1 - i][j]\n self.matrix = result_matrix\n pass", "def reverse(self):\n x = self._x * -1\n y = self._y * -1\n return Point(x,y)", "def change_map_up(self):\n if self.current_map_idx > 0:\n self.change_map(self.current_map_idx + 1)", "def mirror_path(path):\n points, env = path\n \n points = [mirror(p) for p in points]\n if env == 'c':\n env = 'l'\n elif env == 'l':\n env = 'c'\n else:\n raise Exception('unknown env in table')\n\n return (points, env)", "def warp_pos(pos, warp_matrix):\n import torch\n p1 = torch.Tensor([pos[0], pos[1], 1]).view(3, 1)\n p2 = torch.Tensor([pos[2], pos[3], 1]).view(3, 1)\n p1_n = torch.mm(warp_matrix, p1).view(1, 2)\n p2_n = torch.mm(warp_matrix, p2).view(1, 2)\n return torch.cat((p1_n, p2_n), 1).view(1, -1)", "def flip_around(self, row: int, col: int) -> None:\n # we treat the 8 adjacent cases :\n neighbours = np.array([[-1, -1], [-1, 0], [-1, 1],\n [0, -1], [0, 1],\n [1, -1], [1, 0], [1, 1]], dtype=int)\n for dr, dc in neighbours:\n row_neighbour = row + dr\n col_neighbour = col + dc\n if not self.is_in_the_grid(row_neighbour, col_neighbour):\n continue\n self.flip(row_neighbour, col_neighbour)", "def rec_lat(matrix):\n rec_lat = np.zeros([3,3])\n V = np.linalg.det(matrix)\n rec_lat[0] = np.cross(matrix[1], matrix[2])/V\n rec_lat[1] = np.cross(matrix[2], matrix[0])/V\n rec_lat[2] = np.cross(matrix[0], matrix[1])/V\n return rec_lat #* 2 * pi", "def translate(self, displacement):\n\n self.center = (self.center[0] + displacement[0],\n self.center[1] + displacement[1])", "def rotate_to_local(self,vxyz):\n return sp.mxv(self.mtxtofov,vxyz)", "def translate_point(point, y_offset=0, x_offset=0):\n\n out_point = point.copy()\n\n out_point[:, 0] += y_offset\n out_point[:, 1] += x_offset\n\n return out_point", "def rotate(self, matrix):\n # matrix[:] = zip(*matrix[::-1])\n n = len(matrix)\n # 水平翻转\n for i in range(n // 2):\n for j in range(n):\n matrix[i][j], matrix[n - i - 1][j] = matrix[n - i - 1][j], matrix[i][j]\n # 主对角线翻转\n for i in range(n):\n for j in range(i):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n for dig in range(n):\n row = dig\n for col in range(dig+1, n):\n matrix[row][col] , matrix[col][row] = matrix[col][row], matrix[row][col]\n print(matrix)\n left = 0\n right = n-1\n while left < right:\n for row in range(n):\n matrix[row][left], matrix[row][right] = matrix[row][right], matrix[row][left]\n left+=1\n right-=1", "def move_point_wline(mutated_genome,index):\n Xval = random.randint(-int(imagewidth/5.),int(imagewidth*6./5.))\n Yval = random.randint(-int(imageheight/5.),int(imageheight*6./5.))\n point = (Xval,Yval)\n point_index = random.randint(1,max(1,len(mutated_genome[index][2])-1))\n mutated_genome[index][2][point_index] = point", "def test_pan():\n _c = _a.copy()\n _c = _c.piv.pan(1.0, -1.0) # note the use of .piv.\n assert np.allclose(_c.coords[\"x\"][0], 1.312480)\n assert np.allclose(_c.coords[\"y\"][0], -1.31248)", "def mirror_world(some_list):\n l = some_list[::-1]\n print(f\"Our original list: {some_list}\")\n print(f\"Our original list location: {id(some_list)}\")\n print(f\"Our new list: {l}\")\n print(f\"Our new list location: {id(l)}\")\n return \"done!\"", "def set_diag(M,d,idx=0):\n n, m = shape_mat(M)\n if idx >= 0:\n for i, di in enumerate( d ):\n M[i][i+idx] = di\n else:\n for i, di in enumerate( d ):\n M[i-idx][i] = di", "def apply(self, point):\n m = numpy.dot(self.matrix, numpy.array([point[0], point[1], 1.0]).transpose())\n return pygame.Vector2(m[0], m[1])", "def rotateMatrixAttempt(matrix):\n for idxring in range(len(matrix) / 2):\n swap = []\n ringgen = ringCoords(len(matrix), idxring)\n for x, y in ringgen:\n swap.append(matrix[y][x])\n ringgen = ringCoords(len(matrix), idxring)\n start_idx = len(matrix) - 1 - idxring\n for swapidx in range(-1 * start_idx, len(swap) - start_idx):\n x, y = ringgen.next()\n matrix[y][x] = swap[swapidx]", "def d4out():\n\td4x.moveTo(d4x_out)\n\td4y.moveTo(d4y_out)" ]
[ "0.71411884", "0.640484", "0.6257936", "0.59739065", "0.59739065", "0.58550274", "0.5843202", "0.5798668", "0.5773249", "0.57277185", "0.57091093", "0.57091093", "0.5707049", "0.56990564", "0.5662024", "0.5583394", "0.55673605", "0.55462587", "0.5533044", "0.54987687", "0.54721487", "0.54721487", "0.54370487", "0.54353464", "0.5419352", "0.5380543", "0.53656626", "0.53381765", "0.53187495", "0.53162867", "0.53144664", "0.53015316", "0.5299219", "0.5290637", "0.52742565", "0.52707374", "0.52675045", "0.5259585", "0.5250074", "0.52480274", "0.52479047", "0.52451587", "0.5244753", "0.5240814", "0.52229154", "0.5221927", "0.5221772", "0.5213725", "0.52020156", "0.5177779", "0.5166243", "0.5157699", "0.51389366", "0.5135721", "0.513556", "0.5122954", "0.50986314", "0.5085434", "0.50755465", "0.50648725", "0.50639504", "0.50637275", "0.5062372", "0.50613266", "0.5039482", "0.5038953", "0.5029106", "0.5026238", "0.5017619", "0.5015508", "0.5011479", "0.50075585", "0.50068176", "0.5001002", "0.49915054", "0.49866605", "0.49739286", "0.4960575", "0.49586886", "0.49566677", "0.49491137", "0.49425083", "0.49419555", "0.4941934", "0.49393725", "0.4938464", "0.49346706", "0.4930358", "0.4926582", "0.49228695", "0.49223375", "0.49221238", "0.4915631", "0.49095634", "0.49008864", "0.48969388", "0.48951173", "0.48943552", "0.48915234", "0.48898923" ]
0.6138612
3
Compute several gaussians and build the payout map
def __generate_payouts(self, num_hills, hill_size): hills = [] for i in range(num_hills): cx = random.randint(0, self.width - 1) cy = random.randint(0, self.height - 1) sx = random.random() * hill_size + 1 sy = random.random() * hill_size + 1 theta = random.random() * math.pi hills.append(Gaussian2D((cx, cy), (sx, sy), theta)) # Add a mirror image one too to make the map fair hills.append(Gaussian2D(self.__mirror(cx, cy), (sx, sy), theta + math.pi)) # Sum all the hills money_payout_rates = [[0.0] * self.height for x in range(self.width)] for y in range(self.height): for x in range(self.width): money_payout_rates[x][y] = sum([h.value((x,y)) for h in hills]) # Normalize the rates from 0->1 max_payout = max([max(row) for row in money_payout_rates]) min_payout = min([min(row) for row in money_payout_rates]) for y in range(self.height): for x in range(self.width): offset = money_payout_rates[x][y] - min_payout money_payout_rates[x][y] = offset / (max_payout - min_payout) money_payout_rates[x][y] = int(1000 * money_payout_rates[x][y]) / 1000.0 return money_payout_rates
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Generate_BG_Template(outputSize=300, angularSize = 10, fileOut = 'BGRateMap.pickle' ):\r\n template = np.zeros((outputSize,outputSize))\r\n ppd=float(outputSize)/float(angularSize) # pixels per deg\r\n \r\n events110 = ParseFermi.Import_File('photons.txt', energyRange = (120000,140000),lonRange=(-5,5),latRange = (-5,5))\r\n events130 = ParseFermi.Import_File('photons.txt', energyRange = (100000,120000),lonRange=(-5,5),latRange = (-5,5))\r\n events150 = ParseFermi.Import_File('photons.txt', energyRange = (140000,200000),lonRange=(-5,5),latRange = (-5,5))\r\n \r\n for i in range(10000,200001,20000):\r\n if i == 130000:\r\n continue\r\n events = ParseFermi.Import_File('photons.txt', energyRange = (i-10000,i+10000),lonRange=(-5,5),latRange = (-5,5))\r\n BG = np.zeros((outputSize,outputSize)) \r\n for j in events:\r\n xIDX = int(j[1]*ppd+float(outputSize/2))\r\n yIDX = int(j[2]*ppd+float(outputSize/2))\r\n BG[yIDX][xIDX] += 1.0\r\n \r\n psfDeg = .2+float(200)/float(i)\r\n psfOut = psfDeg*ppd\r\n #print i/1e3, psfDeg, psfOut\r\n \r\n template += scipy.ndimage.filters.gaussian_filter(BG, psfOut)\r\n \r\n template = template/np.max(template)\r\n \r\n # Write to file \r\n outFile = open(fileOut, \"wb\" )\r\n pickle.dump(template, outFile)\r\n print 'Rate Map saved to ', fileOut\r\n \r\n plt.imshow(scipy.fliplr(template), 'jet',extent=[5,-5,-5,5])\r\n\r\n plt.xlabel(r'$l [^\\circ]$')\r\n plt.ylabel(r'$b [^\\circ]$')\r\n plt.xlim(5,-5)\r\n plt.ylim(-5,5)\r\n plt.colorbar()\r\n\r\n x,y = Find_Centroid(template)\r\n x,y = (x/ppd -angularSize/2.0,) ,(y/ppd -angularSize/2.0,)\r\n print x,y\r\n plt.scatter(x,y, s=10, c='r', marker = '+')\r\n \r\n X,Y = FormatEvents(events110)\r\n plt.scatter(X, Y, label = '100-120 GeV', marker = 'o' , c = 'k')\r\n \r\n X,Y = FormatEvents(events130)\r\n plt.scatter(X, Y, label = '120-140 GeV', marker = 'o' , c = 'r')\r\n \r\n X,Y = FormatEvents(events150)\r\n plt.scatter(X, Y, label = '140-200 GeV', marker = 'o' , c = 'g' )\r\n \r\n from matplotlib.font_manager import FontProperties\r\n fontP = FontProperties()\r\n fontP.set_size('small')\r\n plt.legend(loc=1, ncol=1, fancybox=True, shadow=False,prop=fontP,borderaxespad=0.,labelspacing = .2)\r\n \r\n from matplotlib.backends.backend_pdf import PdfPages\r\n if fileOut != '':\r\n pp = PdfPages(fileOut + '_sideband.pdf')\r\n plt.savefig(pp, format='pdf')\r\n print \"Figures saved to \", str(fileOut)+ '_sideband.pdf\\n',\r\n pp.close()\r\n \r\n plt.show()\r\n return template", "def sw_gaussians(mu1, mu2, Sigma1, Sigma2, n_proj=100):\n d = mu1.shape[0]\n # Project data\n thetas = np.random.randn(n_proj, d)\n thetas = thetas / (np.sqrt((thetas ** 2).sum(axis=1)))[:, None] # Normalize\n proj_mu1 = thetas @ mu1\n proj_mu2 = thetas @ mu2\n sw2 = 0\n for l in range(n_proj):\n th = thetas[l]\n proj_sigma1 = (th @ Sigma1) @ th\n proj_sigma2 = (th @ Sigma2) @ th\n sw2 += wass_gaussians(np.array([proj_mu1[l]]), np.array([proj_mu2[l]]),\n np.array([proj_sigma1]), np.array([proj_sigma2]))**2\n sw2 /= n_proj\n return np.sqrt(sw2)", "def plot(self):\n \n \n x_ibs=[] \n x_gss=[]\n y_ibs=[] \n y_gss=[]\n x_pso=[]\n x_bgd=[]\n y_bgd=[]\n y_pso=[]\n x_gd=[]\n y_gd=[]\n \n i=0.0000001\n \n # for k in range(1,51):\n # i= random.uniform(0.00000001, 1)\n # t_avg_ibs=[]\n # t_avg_gss=[]\n # for j in range(1,51):\n #L=random.randint(-100, 0)\n #U=random.randint(0, 100)\n max_iter=self.Max_iter \n L=self.Lower_bound\n U=self.Upper_bound\n \n minima=self.gss(L,U,i,1000)\n #print(\"minima at X = \",minima[1])\n x_ibs.append(self.I_bisection(L,U,minima[1],max_iter)[0])\n x_gss.append(self.gss(L,U,i,max_iter)[0])\n x_pso.append(self.particle_Swarm(self.func, L, U, 2, max_iter)[0])\n x_gd.append(self.gradient_descent(X=U ,eta=0.01, tol=minima[1],iter= max_iter)[0])\n x_bgd.append(self.b_gradient_descent(LB=L,UB=U ,eta=0.01, tol=minima[1],iter=max_iter)[0])\n #print(x_pso)\n for i in x_ibs[0]:\n #print(self.Func(i)) \n y_ibs.append(self.Func(i))\n for i in x_gss[0]:\n y_gss.append(self.Func(i)) \n for i in x_pso[0]:\n y_pso.append(self.Func(i)) \n for i in x_gd[0]:\n y_gd.append(self.Func(i)) \n for i in x_bgd[0]:\n y_bgd.append(self.Func(i)) \n #print(y_gss)\n\n plt.plot(x_ibs[0], y_ibs, 'r.')\n plt.plot(x_gss[0], y_gss, '.')\n plt.plot(x_pso[0], y_pso, 'y.')\n #plt.plot(x_gd[0], y_gd, 'y.')\n #plt.plot(x_bgd[0], y_bgd, 'k.')\n plt.xlabel('x')\n plt.ylabel('y')\n \n plt.suptitle('Interval Bisection Search (Red) vs Golden Section Search (Blue) vs Particle swarm optimization (Green)')\n #plt.axis([0, 100, 0.00000001, 1]) \n plt.show()\n plt.plot(x_gd[0], y_gd, 'r.')\n plt.plot(x_bgd[0], y_bgd, 'k.')\n plt.xlabel('x')\n plt.ylabel('y') \n plt.suptitle('Gradient Descent (Red) vs Batch Gradient Descent (Black) ')\n \n plt.show()\n \n start_time = timeit.default_timer()\n ibs=self.I_bisection(L,U,minima[1],max_iter)\n print(\" Execution time for Interval bisection Method is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n gss=self.gss(L,U,i,max_iter)\n print(\" Execution time for Golden Section Search is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n pso=self.particle_Swarm(self.func, L, U, 2, max_iter)\n print(\" Execution time for Particle swarm optimization is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n gd=self.gradient_descent(X=U ,eta=0.01, tol=minima[1],iter= max_iter)\n print(\" Execution time for Gradient Descent is\", timeit.default_timer() - start_time,\"s\")\n start_time = timeit.default_timer()\n bgd=self.b_gradient_descent(LB=L,UB=U ,eta=0.01, tol=minima[1],iter=max_iter)\n print(\" Execution time for Batch Gradient Descent is\", timeit.default_timer() - start_time,\"s\")\n plt.plot(ibs[1], ibs[2], 'r.')\n plt.text(ibs[1], ibs[2],\"IB\")\n plt.plot(gss[1], gss[2], '.')\n plt.text(gss[1], gss[2],\" GSS\")\n plt.plot(pso[1], pso[2], 'y.')\n plt.text(pso[1], pso[2],\" PSO\")\n plt.plot(gd[1], gd[2], 'g.')\n plt.text(gd[1], gd[2],\" GD \")\n plt.plot(bgd[1],bgd[2], 'k.')\n plt.text(bgd[1], bgd[2],\" Batch_GD\")\n \n plt.xlabel('Value of X')\n plt.ylabel('NUmber of iteration') \n plt.suptitle('Number of iterations vs minimum value of x')\n \n plt.show()", "def construct_mog(self, outputs):\n # check for correct number of input dimensions.\n assert outputs.shape[-1] == (self.n_components)*3 + 1\n out_idx = 0 # keep track of which output we are working with.\n self.shift = outputs[0, :, out_idx:out_idx+1]; out_idx += 1\n with tf.name_scope(\"Mixture_of_Gaussians\"):\n with tf.variable_scope('network'):\n # get mixing proportions\n theta_raw = outputs[:, :,out_idx:out_idx+self.n_components];\n out_idx += self.n_components\n self.theta = tf.nn.softmax(theta_raw)\n\n log_sigmas = outputs[:, :, out_idx:out_idx+self.n_components];\n out_idx += self.n_components\n self.sigmas = tf.exp(log_sigmas,name=\"sigmas\")\n\n self.mus = outputs[:, :, out_idx:out_idx+self.n_components];\n out_idx += self.n_components\n for k in range(self.n_components):\n tf.summary.histogram(\"Gaussian_%d_sigma\"%k,self.sigmas[:,k])\n tf.summary.histogram(\"Gaussian_%d_proportion\"%k,self.theta[:,k])\n tf.summary.histogram(\"Gaussian_%d_mus\"%k,self.mus[:,k])\n\n ## Check that every output has been used\n assert out_idx == outputs.shape[-1]", "def main():\n\n\n ## Groups showing similar noise profile\n #grp1 = [ 1, 4, 5, 8, 9 ]\n #grp2 = [ 18, 19, 22, 23, 30, 31 ]\n grp1 = [ 0, 1, 6, 7, 4, 5 ]\n grp2 = [ 12, 13, 16, 17, 18, 19 ]\n #grp3 = [ 18, 19, 22, 23, 26, 27 ]\n with tb.open_file(sys.argv[1], 'r') as dataF:\n\n npm = len(dataF.root.Sensors.DataPMT)#len(dataF.root.RD.pmtrwf[0])\n nevt = len(dataF.root.RD.pmtrwf)\n\n ## Filter definition\n fSample = 40E6\n freqLPF = 100E3\n freqLPFd = 2*freqLPF / fSample\n b, a = signal.butter(1, freqLPFd, 'low', analog=False)\n ##\n fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(20,6))\n #fig.tight_layout()\n fig.show()\n wf_len = len(dataF.root.RD.pmtrwf[0][0])\n if len(sys.argv) > 3:\n wf_len = wf_len/2+1 \n elif len(sys.argv) == 3:\n g1_first = np.zeros(wf_len, np.float64)\n g2_first = np.zeros(wf_len, np.float64)\n g3_first = np.zeros(wf_len, np.float64)\n mean_first = np.zeros(wf_len, np.float64)\n ##\n for ievt in range(nevt):\n ## clear the axies\n for ax in axes.flatten():\n ax.cla()\n plt_frq = np.zeros(wf_len, np.float64)\n fwf_mean = np.zeros(wf_len, np.float64)\n wf_mean = np.zeros(wf_len, np.float64) # No filter\n g1_mean = np.zeros(wf_len, np.float64)\n g2_mean = np.zeros(wf_len, np.float64)\n g3_mean = np.zeros(wf_len, np.float64)\n for ipm in range(npm):\n\n sg = getWF(dataF, ipm, ievt)\n sg = sg - np.mean(sg)\n\n sgf = signal.lfilter(b, a, sg)\n ## remove mean again just in case\n sgf = sgf - np.mean(sgf)\n #sgf = sg\n\n pmID = getPMid(dataF, ipm)\n\n if len(sys.argv) == 3:\n axes[0][0].plot(sgf, label='pmt '+str(pmID))\n fwf_mean += sgf/npm\n wf_mean += sg/npm\n if pmID in grp1:\n g1_mean += sgf/len(grp1)\n elif pmID in grp2:\n g2_mean += sgf/len(grp2)\n elif pmID in grp3:\n g3_mean += sgf/len(grp3)\n else:\n ft = np.fft.rfft(sgf)\n freq = np.fft.rfftfreq(len(sgf), d=25E-9)\n if ipm == 0:\n plt_frq = freq\n if sys.argv[2] == 'mag':\n ft_mag = np.absolute(ft)\n axes[0][0].plot(freq, ft_mag, label='pmt '+str(pmID))\n fwf_mean += ft_mag/npm\n if pmID in grp1:\n g1_mean += ft_mag/len(grp1)\n elif pmID in grp2:\n g2_mean += ft_mag/len(grp2)\n elif pmID in grp3:\n g3_mean += ft_mag/len(grp3)\n elif sys.argv[2] == 'phase':\n ft_pha = np.angle(ft)\n axes[0][0].plot(freq, ft_pha, label='pmt '+str(pmID))\n fwf_mean += ft_pha/npm\n if pmID in grp1:\n g1_mean += ft_pha/len(grp1)\n elif pmID in grp2:\n g2_mean += ft_pha/len(grp2)\n elif pmID in grp3:\n g3_mean += ft_pha/len(grp3)\n \n \n ## The axes not set\n if len(sys.argv) == 3:\n axes[0][1].plot(g1_mean)\n axes[0][1].set_title('Group 1 mean waveform')\n axes[1][0].plot(g2_mean)\n axes[1][0].set_title('Group 2 mean waveform')\n axes[1][1].plot(g3_mean)\n axes[1][1].set_title('Group 3 mean waveform')\n axes[2][0].plot(fwf_mean)\n axes[2][0].set_title('Mean waveform')\n if ievt == 0:\n g1_first = g1_mean\n g2_first = g2_mean\n g3_first = g3_mean\n mean_first = fwf_mean\n else:\n axes[0][1].plot(g1_first)\n axes[1][0].plot(g2_first)\n axes[1][1].plot(g3_first)\n axes[2][0].plot(mean_first)\n axes[2][1].plot(wf_mean)\n axes[2][1].set_title('Mean waveform and corrected')\n axes[2][1].plot(wf_mean-fwf_mean)\n axes[2][1].set_xlim(0, 1000)\n else:\n axes[0][0].set_xlim(0,50000)\n axes[0][1].plot(plt_frq, g1_mean)\n axes[0][1].set_title('Group 1 mean '+sys.argv[2])\n axes[0][1].set_xlim(0,50000)\n axes[1][0].plot(plt_frq, g2_mean)\n axes[1][0].set_title('Group 2 mean '+sys.argv[2])\n axes[1][0].set_xlim(0,50000)\n axes[1][1].plot(plt_frq, g3_mean)\n axes[1][1].set_title('Group 3 mean '+sys.argv[2])\n axes[1][1].set_xlim(0,50000)\n axes[2][0].plot(plt_frq, fwf_mean)\n axes[2][0].set_title('Mean '+sys.argv[2])\n axes[2][0].set_xlim(0,50000)\n plt.draw()\n #fig.legend(loc=0)\n catcher = input(\"next plot?\")\n if catcher == 'q':\n exit()\n plt.cla()", "def main(ngrains=100,sigma=15.,c2a=1.6235,mu=0.,\n prc='cst',isc=False,tilt_1=0.,\n tilts_about_ax1=0.,tilts_about_ax2=0.):\n if isc:\n h = mmm()\n else:\n h=np.array([np.identity(3)])\n gr = []\n for i in range(ngrains):\n dth = random.uniform(-180., 180.)\n if prc=='cst': g = gen_gr_fiber(th=dth,sigma=sigma,mu=mu,tilt=tilt_1,iopt=0) # Basal//ND\n elif prc=='ext': g = gen_gr_fiber(th=dth,sigma=sigma,mu=mu,tilt=tilt_1,iopt=1) # Basal//ED\n else:\n raise IOError('Unexpected option')\n for j in range(len(h)):\n temp = np.dot(g,h[j].T)\n\n ## tilts_about_ax1\n if abs(tilts_about_ax1)>0:\n g_tilt = rd_rot(tilts_about_ax1)\n temp = np.dot(temp,g_tilt.T)\n ## tilts_about_ax2?\n elif abs(tilts_about_ax2)>0:\n g_tilt = td_rot(tilts_about_ax2)\n temp = np.dot(temp,g_tilt.T)\n elif abs(tilts_about_ax2)>0 and abs(tilts_about_ax2)>0:\n raise IOError('One tilt at a time is allowed.')\n\n phi1,phi,phi2 = euler(a=temp, echo=False)\n gr.append([phi1,phi,phi2,1./ngrains])\n\n mypf=upf.polefigure(grains=gr,csym='hexag',cdim=[1,1,c2a])\n mypf.pf_new(poles=[[0,0,0,2],[1,0,-1,0]],cmap='jet',ix='TD',iy='RD')\n return np.array(gr)", "def map_all_sig_p(limitregion=False, region=\"allsky\"):\n \n # Get ids of all pixels that contain RHT data\n rht_cursor, tablename = get_rht_cursor(region = region)\n all_ids = get_all_rht_ids(rht_cursor, tablename)\n \n planck_tqu_db = sqlite3.connect(\"planck_TQU_gal_2048_db.sqlite\")\n planck_tqu_cursor = planck_tqu_db.cursor()\n planck_cov_db = sqlite3.connect(\"planck_cov_gal_2048_db.sqlite\")\n planck_cov_cursor = planck_cov_db.cursor()\n \n if limitregion is True:\n print(\"Loading all allsky data points that are in the SC_241 region\")\n # Get all ids that are in both allsky data and SC_241\n all_ids_SC = pickle.load(open(\"SC_241_healpix_ids.p\", \"rb\"))\n all_ids = list(set(all_ids).intersection(all_ids_SC))\n \n all_sigpGsq = np.zeros(len(all_ids))\n\n update_progress(0.0)\n for i, hp_index in enumerate(all_ids):\n #likelihood = Likelihood(_id[0], planck_tqu_cursor, planck_cov_cursor, p0_all, psi0_all, adaptivep0 = adaptivep0)\n (hp_index, T, Q, U) = planck_tqu_cursor.execute(\"SELECT * FROM Planck_Nside_2048_TQU_Galactic WHERE id = ?\", hp_index).fetchone()\n (hp_index, TT, TQ, TU, TQa, QQ, QU, TUa, QUa, UU) = planck_cov_cursor.execute(\"SELECT * FROM Planck_Nside_2048_cov_Galactic WHERE id = ?\", (hp_index,)).fetchone()\n \n # sigma_p as defined in arxiv:1407.0178v1 Eqn 3.\n sigma_p = np.zeros((2, 2), np.float_) # [sig_Q^2, sig_QU // sig_QU, UU]\n sigma_p[0, 0] = (1.0/T**2)*QQ #QQ\n sigma_p[0, 1] = (1.0/T**2)*QU #QU\n sigma_p[1, 0] = (1.0/T**2)*QU #QU\n sigma_p[1, 1] = (1.0/T**2)*UU #UU\n \n # det(sigma_p) = sigma_p,G^4\n det_sigma_p = np.linalg.det(sigma_p)\n sigpGsq = np.sqrt(det_sigma_p)\n \n all_sigpGsq[i] = sigpGsq\n \n update_progress((i+1.0)/len(all_ids), message='Calculating: ', final_message='Finished Calculating: ')\n \n # Place into healpix map\n hp_sigpGsq = make_hp_map(all_sigpGsq, all_ids, Nside = 2048, nest = True)\n \n out_root = \"/disks/jansky/a/users/goldston/susan/Wide_maps/\"\n if limitregion:\n hp.fitsfunc.write_map(out_root + \"planck_sigpGsq_SC_241.fits\", hp_sigpGsq, coord = \"G\", nest = True) \n else:\n hp.fitsfunc.write_map(out_root + \"planck_sigpGsq_DR2sky.fits\", hp_sigpGsq, coord = \"G\", nest = True)", "def synthetic_gen(self):\r\n logging.debug('generating synthetic map...')\r\n data = self.realData\r\n unit = Params.unitGrid\r\n x_min = np.floor(Params.LOW[0] / unit) * unit\r\n x_max = np.ceil(Params.HIGH[0] / unit) * unit\r\n y_min = np.floor(Params.LOW[1] / unit) * unit\r\n y_max = np.ceil(Params.HIGH[1] / unit) * unit\r\n\r\n x_CELL = int(np.rint((x_max - x_min) / unit))\r\n y_CELL = int(np.rint((y_max - y_min) / unit))\r\n\r\n self.root.n_box = np.array([[x_min, y_min], [x_max, y_max]])\r\n\r\n self.mapp = np.zeros((x_CELL, y_CELL)) - 1 # ## initialize every cell with -1\r\n for i in range(Params.NDATA): # ## populate the map\r\n point = data[:, i]\r\n cell_x = int(np.floor((point[0] - x_min) / unit))\r\n cell_y = int(np.floor((point[1] - y_min) / unit))\r\n if self.mapp[cell_x, cell_y] != -1:\r\n self.mapp[cell_x, cell_y] += 1\r\n else:\r\n self.mapp[cell_x, cell_y] = 1\r\n\r\n for i in range(x_CELL): # ## perturb the counts\r\n for j in range(y_CELL):\r\n if self.mapp[i, j] != -1:\r\n self.mapp[i, j] += np.rint(self.differ.getNoise(1, 0.5 * self.param.Eps))\r\n else:\r\n self.mapp[i, j] = np.rint(self.differ.getNoise(1, 0.5 * self.param.Eps))\r\n # if noisy count is negative, ignore the noise and generate no points\r\n if self.mapp[i, j] < 0:\r\n self.mapp[i, j] = 0", "def _initialize_gaussians(self, n_gaussians):\n if self.gaussian_init == 'manual':\n gaussians = torch.Tensor([\n list(product([0.25, 0.5, 0.75], repeat=2)) +\n [(0.5, 0.25), (0.5, 0.5), (0.5, 0.75)] +\n [(0.25, 0.5), (0.5, 0.5), (0.75, 0.5)] +\n [(0.5, 0.5)],\n [(-1.5, -1.5)] * 9 + [(0, -1.5)] * 3 + [(-1.5, 0)] * 3 +\n [(0, 0)],\n ]).permute(1, 2, 0)\n\n elif self.gaussian_init == 'random':\n with torch.no_grad():\n gaussians = torch.stack([\n torch.randn(\n n_gaussians, 2, dtype=torch.float) * .1 + 0.5,\n torch.randn(\n n_gaussians, 2, dtype=torch.float) * .2 - 1,],\n dim=2)\n\n else:\n raise NotImplementedError\n\n gaussians = nn.Parameter(gaussians, requires_grad=True)\n return gaussians", "def plot_gheat_g(seed=1):\n fig, ax = plt.subplots(figsize=[2.5*plotdl.latex_width_inch, 3*plotdl.latex_height_inch])\n \n r = Factory_psi1_psiN( \"aapta_of_s_N{number_of_points[0]}.npz\", N=400)\n ckg = r.create_if_missing(dict(model_name= [\"Anderson\",], \n number_of_points=[400,], bandwidth=[1,],\n dis_param=np.linspace(0,1,100),c=[1,], k=[1.57,], seed=np.arange(1,6))) \n color_seq = itertools.cycle(['b', 'g', 'r', 'c', 'm', 'y', 'k'])\n for (seed,c) in zip(np.arange(1,6),color_seq):\n ck = ckg[ckg['seed']==seed]\n g, psi_1, psi_N = ck['g'], ck['psi_N'], ck['psi_1']\n\n psi_heat = 2*(abs(psi_1)**2)*(abs(psi_N)**2) / ((abs(psi_1)**2) + (abs(psi_N)**2))\n \n phs = np.nansum(psi_heat,axis=1)\n \n psi1psiN = np.nansum(abs(psi_1*psi_N), axis=1)\n #print(ckg['dis_param'], phs)\n ax.plot(ck['dis_param'], phs,'.', color=c)\n ax.plot(ck['dis_param'], abs(g),'+', color=c)\n ax.plot(ck['dis_param'], psi1psiN,'d', color=c)\n ax.set_xlabel('dis_param')\n mkdir_and_savefig(fig, 'pta_comparison_of_s_N400.png')\n plt.close(fig)\n ## use last ck\n fig1, axes1 = plt.subplots(3,2,figsize=[2*plotdl.latex_width_inch, 3*plotdl.latex_height_inch],\n sharex=True, sharey=True)\n axes1.flat[0].xaxis.set_major_locator(MaxNLocator(4))\n axes1.flat[0].yaxis.set_major_locator(MaxNLocator(4))\n for n, ax1 in zip(range(1,20,3), axes1.flat):\n ax1.plot(abs(ck['psi_1'][n]), abs(ck['psi_N'][n]), '.') \n ax1.set_title(\"W = {:0.2}\".format(ck['dis_param'][n]))\n fig1.savefig('pta_psi_1_psi_2_N400.png')\n \n ax.cla()\n ax.plot(ck['dis_param'], np.real(g), label='real')\n ax.plot(ck['dis_param'], np.imag(g), label='imag')\n ax.plot(ck['dis_param'], np.abs(g), label='abs')\n ax.legend(loc = 'upper right')\n ax.set_xlabel('dis_param')\n ax.set_ylabel('g')\n mkdir_and_savefig(fig, 'pta_real_imag_g_s_N400')", "def FormG():\n for i in range(2):\n for j in range(2):\n G[i, j] = 0.0\n for k in range(2):\n for l in range(2):\n G[i, j] = G[i, j] + P[k, l] * (TT[i, j, k, l] - 0.5 * TT[i, j, k, l])", "def calculate_gains(self,data, map_fits, avg_map_fits):\n nFeeds,nBands,nChans,nParams = map_fits['Values'].shape\n frequencies = data[f'{self.level2}/averaged_frequency'][...]\n kb = 1.38064852e-23\n c = 2.99792458e8\n scale = 2 * kb * (1e9/ c)**2 * 1e26\n\n source = self.getSource(data)\n self.flux = np.zeros((len(self.feeds),nBands, nChans))\n self.gain = np.zeros((len(self.feeds),nBands, nChans))\n\n for i,(ifeed,feed) in enumerate(zip(self.feedlist,self.feeds)):\n for iband in range(nBands):\n nu = frequencies[iband]\n sigx = avg_map_fits[iband]['Values'][2] \n sigy = avg_map_fits[iband]['Values'][2]*avg_map_fits[iband]['Values'][4]\n amps = map_fits['Values'][i,iband,:,0]\n self.flux[i,iband,:] = 2*np.pi*amps*sigx*sigy*(np.pi/180.)**2 * scale*nu**2\n mdl_flux = self.models[source](nu,map_fits['MJD'],return_jansky=True,allpos=True)\n self.gain[i,iband,:] = self.flux[i,iband,:]/mdl_flux\n\n return self.flux, self.gain", "def g_func(init_par, alpha, delta, plx_obs, mualpha_obs, mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoef, N):\r\n\tL, g = ilike(init_par, alpha, delta, plx_obs, mualpha_obs, mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoef, N) \t\r\n\tp = np.zeros(N)\r\n\tfor i in range(N):\r\n\t if np.isfinite(vrad_obs[i]):\r\n\t p[i] = chi2.sf(g[i],3)\r\n\t else:\r\n\t p[i] = chi2.sf(g[i],2)\r\n\t \r\n\treturn p", "def sgd(self):\n for i, j, r in self.samples:\n # Computer prediction and error\n if (self.type=='bias'):\n prediction = self.get_rating_bias(i, j)\n elif(self.type=='nonbias') :\n prediction = self.get_rating(i, j)\n # print(i, j, r,prediction)\n e = (r - prediction)\n\n # Update biases\n self.b_u[i] =self.b_u[i]+ self.alpha * (e - self.beta * self.b_u[i])\n self.b_i[j] = self.b_i[j] + self.alpha * (e - self.beta * self.b_i[j])\n\n # Create copy of row of P since we need to update it but use older values for update on Q\n P_i = self.P[i, :][:]\n\n # Update user and item latent feature matrices\n # print(self.alpha * (e * self.Q[j, :] - self.beta * self.P[i, :]))\n # print(self.P[i, :])\n self.P[i, :] =self.P[i, :] + self.alpha * (e * self.Q[j, :] - self.beta * self.P[i, :])\n # print(self.P[i, :],\"&&&&&&\")\n self.Q[j, :] = self.Q[j, :] + self.alpha * (e * P_i - self.beta * self.Q[j, :])\n # print(self.Q[j, :])", "def dataModel():\n srcmap001 = fits.open('dataFiles/6gev_srcmap_001.fits')\n srcmap03 = fits.open('dataFiles/6gev_srcmap_03.fits')\n\n image_data = fits.getdata('6gev_image.fits')\n filename = get_pkg_data_filename('6gev_image.fits')\n hdu = fits.open(filename)[0]\n wcs = WCS(hdu.header)\n\n #Given the results of the fit, calculate the model\n modelData001 = np.zeros(srcmap001[0].shape)\n modelData03 = np.zeros(srcmap03[0].shape)\n\n file = open('plotsData/fitResults001.pk1','rb')\n fit001 = pickle.load(file)\n file.close()\n\n file = open('plotsData/fitResults03.pk1','rb')\n fit03 = pickle.load(file)\n file.close()\n\n\n for source in fit001:\n the_index = srcmap001.index_of(source)\n\n modelData001 += fit001[source][:, None, None]*srcmap001[the_index].data[:-1, :, :]/np.sum(np.sum(srcmap001[the_index].data, axis=2), axis=1)[:-1, None, None]\n for source in fit03:\n the_index = srcmap03.index_of(source)\n modelData03 += fit03[source][:, None, None]*srcmap03[the_index].data[:-1, :, :]/np.sum(np.sum(srcmap03[the_index].data, axis=2), axis=1)[:-1, None, None]\n\n fig = plt.figure(figsize=[12, 4.5])\n\n vmin = 0\n vmax = 70.0\n cbStep = 10.0\n ax = fig.add_subplot(121, projection=wcs)\n ax=plt.gca()\n ax.tick_params(direction='in')\n c = Wedge((gc_l, gc_b), 1.0, theta1=0.0, theta2=360.0, width=14.0, edgecolor='black', facecolor='#474747', transform=ax.get_transform('galactic'))\n ax.add_patch(c)\n mappable=plt.imshow((image_data),cmap='inferno',origin='lower',norm=colors.PowerNorm(gamma=0.6),vmin=vmin, vmax=vmax, interpolation='gaussian')#\n plt.xlabel('Galactic Longitude')\n plt.ylabel('Galactic Latitude')\n plt.title('Data ($>6$ GeV)')\n cb = plt.colorbar(mappable, label='Counts per pixel', pad=0.01,ticks=np.arange(vmin, vmax+cbStep, cbStep))\n cb.ax.tick_params(width=0)\n\n\n ax2=fig.add_subplot(122, projection=wcs)\n ax2 = plt.gca()\n\n sources = []\n sources.append({\n 'Name':'3FGL J1745.3-2903c',\n 'RA':266.3434922,\n 'DEC':-29.06274323,\n 'color':'xkcd:bright light blue'})\n\n sources.append({\n 'Name':'1FIG J1748.2-2816',\n 'RA':267.1000722,\n 'DEC':-28.27707114,\n 'color':'xkcd:fire engine red'\n })\n\n sources.append({\n 'Name':'1FIG J1746.4-2843',\n 'RA':266.5942898,\n 'DEC':-28.86244442,\n 'color':'xkcd:fluorescent green'\n })\n\n sources.append({\n 'Name':'Galactic Center',\n 'RA':266.417,\n 'DEC':-29.0079,\n 'color':'black'\n })\n\n #Add source names:\n for source in sources:\n l, b = ra_dec_to_l_b(source['RA'], source['DEC'])\n ax2.scatter(l, b, color=source['color'],marker='x',s=45.0, transform=ax2.get_transform('galactic'), label=source['Name'])\n\n c2 = Wedge((gc_l, gc_b), 1.0, theta1=0.0, theta2=360.0, width=14.0, edgecolor='black', facecolor='#474747', transform=ax2.get_transform('galactic'))\n ax2.add_patch(c2)\n mappable2 = plt.imshow((np.sum(modelData03,axis=0)), cmap='inferno',norm=colors.PowerNorm(gamma=0.6),origin='lower',vmin=vmin, vmax=vmax, interpolation='gaussian')\n plt.xlabel('Galactic Longitude')\n plt.ylabel('Galactic Latitude')\n plt.title('Model ($>6$ GeV)')\n cb2 = plt.colorbar(mappable2, label='Counts per pixel', pad=0.01, ticks=np.arange(vmin, vmax+cbStep, cbStep))\n cb2.ax.tick_params(width=0)\n leg = plt.legend(loc=1,frameon=True)\n leg.get_frame().set_alpha(0.5)\n leg.get_frame().set_edgecolor('white')\n text1 = leg.get_texts()\n for text in text1:\n text.set_color('black')\n\n fig.tight_layout()\n plt.subplots_adjust(wspace = 0.13, left=0.04, bottom=0.13, top=0.92)\n plt.show()\n #plt.savefig('plots/dataModelComparison.pdf',bbox_inches='tight')", "def make_100gaussians_image(noise=True):\n n_sources = 100\n flux_range = [500, 1000]\n xmean_range = [0, 500]\n ymean_range = [0, 300]\n xstddev_range = [1, 5]\n ystddev_range = [1, 5]\n params = {'flux': flux_range,\n 'x_mean': xmean_range,\n 'y_mean': ymean_range,\n 'x_stddev': xstddev_range,\n 'y_stddev': ystddev_range,\n 'theta': [0, 2 * np.pi]}\n\n rng = np.random.RandomState(12345)\n sources = QTable()\n for param_name, (lower, upper) in params.items():\n # Generate a column for every item in param_ranges, even if it\n # is not in the model (e.g., flux). However, such columns will\n # be ignored when rendering the image.\n sources[param_name] = rng.uniform(lower, upper, n_sources)\n xstd = sources['x_stddev']\n ystd = sources['y_stddev']\n sources['amplitude'] = sources['flux'] / (2.0 * np.pi * xstd * ystd)\n\n shape = (300, 500)\n data = make_gaussian_sources_image(shape, sources) + 5.0\n\n if noise:\n rng = np.random.RandomState(12345)\n data += rng.normal(loc=0.0, scale=2.0, size=shape)\n\n return data", "def graphe_gains(res_alea, res_glou, res_glou_e, res_ucb):\n T = np.arange(res_alea.size)\n fig, ax = plt.subplots()\n ax.grid(True)\n plt.xlabel(\"T\")\n plt.ylabel(\"Gain\")\n \n ax.plot(T, res_alea, label = 'aléatoire') \n ax.plot(T, res_glou, label = 'glouton')\n ax.plot(T, res_glou_e, label = 'e-glouton')\n ax.plot(T, res_ucb, label = 'UCB')\n ax.legend(loc = \"upper left\")\n plt.title(\"Gains des 4 algorithmes par rapport à T\")", "def test_GA():\n\tgenerationSize = 150\n\tmutationProb = 0.01\n\tgenerations = 500\n\tX = []\n\tT = []\n\tY = [] \n\tfitnesses = [0]*generationSize\n\tfor i in range(DATA_POINTS_NUM):\n\t\tX.append((i - DATA_POINTS_NUM/2)*0.1)\n\t\tT.append(polynomi_3N(REFERENCE, X[-1]))\n\t\tY.append(0)\n\t\n\tga = GA.GA(generationSize, 4, mutationProb)\n\tgenomes = ga.seedGenomes()\n\t#plot initial genomes\n\tplt.figure(1)\n\tplt.title('Initial genomes')\n\tfor i in range(len(genomes)):\n\t\tGenome = prescale(genomes[i])\n\t\tprint Genome\n\t\tfor j in range(DATA_POINTS_NUM):\n\t\t\tY[j] = (polynomi_3N(Genome, X[j]))\n\t\tfitnesses[i] = calculate_fitness(T, Y)\n\t\tplt.plot(X,Y, 'b-')\n\tplt.plot(X,T, 'r-')\n\t\t\n\t\n\t#live and learn\n\tfor k in range(generations):\n\t\tprint \".\",\n\t\tfor i in range(len(genomes)):\n\t\t\tGenome = prescale(genomes[i])\n\t\t\tfor j in range(DATA_POINTS_NUM):\n\t\t\t\tY[j] = (polynomi_3N(Genome,X[j]))\n\t\t\tfitnesses[i] = calculate_fitness(T, Y)\n\t\tga.fitnessUpdate(fitnesses)\n\t\tgenomes = ga.createNextGeneration()\n\t\t\n\t#plot final genomes\n\tplt.figure(2)\n\tplt.title('Final genomes')\n\tprint \"\\nfinal Genomes\"\n\tfor i in range(len(genomes)):\n\t\tGenome = prescale(genomes[i])\n\t\tfor j in range(DATA_POINTS_NUM):\n\t\t\tY[j] = (polynomi_3N(Genome,X[j]))\n\t\tprint \"fit:%5.1f [%7.4f, %7.4f, %7.4f, %7.4f]\"%\\\n\t\t (calculate_fitness(T, Y), Genome[0],\n\t\t Genome[1], Genome[2], Genome[3])\n\t\tplt.plot(X,Y, 'b-')\n\tplt.plot(X,T, 'r-')\n\t\t\n\t#plot progress\n\tP = []\n\thistory = ga.generations[:]\n\tfor f in history:\n\t\t#f[1].sort()\n\t\tP.append(max(f[1]))\n\tplt.figure(3)\n\tplt.title('progress')\n\tplt.plot(P)\n\tplt.show()\n\t\n\t#print the result:\t\n\tbestGene = fitnesses.index(max(fitnesses))\n\tG = prescale(genomes[bestGene])\n print \"\"\n\tprint \"And the result is:\"\n\tprint \"%.4f => %.4f (%.4f)\"%(A, G[0], abs(A - G[0]))\n\tprint \"%.4f => %.4f (%.4f)\"%(B, G[1], abs(B - G[1]))\n\tprint \"%.4f => %.4f (%.4f)\"%(C, G[2], abs(C - G[2]))\n\tprint \"%.4f => %.4f (%.4f)\"%(D, G[3], abs(D - G[3]))", "def prediction_aggregation(self, xt_s,mu_s,var_s, method='PoE', weighting='uniform', power=26):\n\n nt = xt_s.shape[0]\n mu = np.zeros([nt, self.C],dtype='float64')\n var = np.zeros([nt, self.C],dtype='float64')\n\n prior_var = self.experts[0].kernel(xt_s[0], xt_s[0])\n\n \n #Process each latent gp individually \n for j in range(self.C):\n \n mu_s_c = mu_s[:, :, j]\n var_s_c = var_s[:, :, j]\n \n weight_matrix = compute_weights(mu_s_c, var_s_c, power, weighting, prior_var)\n \n prec_s= 1/var_s_c\n\n if method == 'PoE':\n \n prec = tf.reduce_sum(prec_s, axis=0)\n \n\n if method == 'gPoE':\n \n weight_matrix = normalize_weights(weight_matrix)\n\n prec = tf.reduce_sum(weight_matrix * prec_s , axis=0)\n \n\n if method == 'BCM':\n \n prec = tf.reduce_sum(prec_s, axis=0) + (1 - self.M) / prior_var \n\n if method == 'rBCM':\n \n \n prec = tf.reduce_sum(weight_matrix * prec_s, axis=0) \\\n + (1 - tf.reduce_sum(weight_matrix, axis=0)) / prior_var\n \n \n \n if method != 'bar':\n \n var[:, j] = 1 / prec\n\n mu[:, j] = var[:, j] * tf.reduce_sum(weight_matrix * prec_s * mu_s_c, axis=0)\n \n else:\n \n weight_matrix = normalize_weights(weight_matrix)\n\n mu[:, j] = tf.reduce_sum(weight_matrix * mu_s_c, axis=0)\n var[:, j] = tf.reduce_sum(weight_matrix * var_s_c, axis=0)\n \n \n return self.lik_aggregation(mu, var)", "def learnGauss(metricArray): \n fit = gaussReg() \n n= 100 #You should probably change this...\n overlap = 0.3 \n imageSize = [100,400]\n densMap(fit, metricArray, n, overlap, imageSize )\n overlayMap('SmallTile.jpg', 'ContourPlot.jpg') \n # Final map is saved as OverlayMap.jpg", "def output_grib2(forecast,proj_dict,grid_dict,start_hour,end_hour,\n stride,size,run_date,target_dataset,smoothing,config):\n for d,date in enumerate(run_date):\n date_outpath = config.forecast_out_path+'20{0}/grib/'.format(\n date.strftime(config.run_date_format))\n \n if not os.path.exists(date_outpath):\n os.makedirs(date_outpath)\n\n lscale = 1e6\n grib_id_start = [7, 0, 14, 14, 2]\n \n filtered_forecast = gaussian_filter(forecast[d],smoothing,mode='constant')\n \n gdsinfo = np.array([0, np.product(filtered_forecast.shape[-2:]), 0, 0, 30], dtype=np.int32)\n \n lon_0 = proj_dict[\"lon_0\"]\n sw_lon = grid_dict[\"sw_lon\"]\n \n if lon_0 < 0:\n lon_0 += 360\n if sw_lon < 0:\n sw_lon += 360\n\n gdtmp1 = [1, 0, proj_dict['a'], 0, float(proj_dict['a']), 0, float(proj_dict['b']),\n filtered_forecast.shape[-1], filtered_forecast.shape[-2], grid_dict[\"sw_lat\"] * lscale,\n sw_lon * lscale, 0, proj_dict[\"lat_0\"] * lscale,\n lon_0 * lscale,\n grid_dict[\"dx\"] * 1e3 * stride, grid_dict[\"dy\"] * 1e3 * stride, 0b00000000, 0b01000000,\n proj_dict[\"lat_1\"] * lscale,\n proj_dict[\"lat_2\"] * lscale, -90 * lscale, 0]\n pdtmp1 = np.array([1, 31, 4, 0, 31, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1], dtype=np.int32)\n drtmp1 = np.array([0, 0, 4, 8, 0], dtype=np.int32)\n time_list = list(date.utctimetuple()[0:6])\n grib_objects = Grib2Encode(0, np.array(grib_id_start + time_list + [2, 1], dtype=np.int32))\n grib_objects.addgrid(gdsinfo, gdtmp1)\n pdtmp1[8] = end_hour\n pdtmp1[-2] = 0\n grib_objects.addfield(1, pdtmp1, 0, drtmp1, filtered_forecast)\n grib_objects.end()\n filename = date_outpath + \"{0}_{6}_Hail_{1}_Cali_NMEP_{2}mm_{3}_Hours_{4}-{5}.grib2\".format(\n config.ensemble_name,\n target_dataset,\n size,\n date.strftime(config.run_date_format),\n start_hour,end_hour,config.forecast_model_names)\n print(\"Writing to \" + filename )\n \n grib_file = open(filename, \"wb\")\n grib_file.write(grib_objects.msg)\n grib_file.close()\n\n return", "def createDefaultFilterbank(window):\n # Gaussians:: G1 = N(0, 1), G2 = N(0, 2), G3 = N(0, 4)\n # Laplacian of Gaussians:: LoG1 = Lap(N(0, 1)), LoG2=Lap(N(0, 2)), LoG3=Lap(N(0, 4)), LoG4=Lap(N(0, 8))\n # Derivative of Gaussian (x):: Div1xG1 = d/dx N(0,2), Div1xG2=d/dx N(0,4)\n # Derivative of Gaussian (y): Div1yG1 = d/dy N(0,2), Div1yG2=d/dy N(0,4)\n \n G1 = gaussian_kernel(window, window, 1)\n G2 = gaussian_kernel(window, window, 2)\n G3 = gaussian_kernel(window, window, 4)\n \n # see http://homepages.inf.ed.ac.uk/rbf/HIPR2/log.htm\n LoG1 = laplacianOfGaussian_kernel(window, window, 1)\n LoG2 = laplacianOfGaussian_kernel(window, window, 2)\n LoG3 = laplacianOfGaussian_kernel(window, window, 4)\n LoG4 = laplacianOfGaussian_kernel(window, window, 8)\n \n dx_G1 = gaussian_1xDerivative_kernel(window, window, 2)\n dx_G2 = gaussian_1xDerivative_kernel(window, window, 4)\n \n dy_G1 = gaussian_1yDerivative_kernel(window, window, 2)\n dy_G2 = gaussian_1yDerivative_kernel(window, window, 4)\n \n return np.array([G1, G2, G3, LoG1, LoG2, LoG3, LoG4, dx_G1, dx_G2, dy_G1, dy_G2])", "def pure_gabor():\n \n dots = pickle.load(open(\"/Users/bptripp/code/nengo-FPGA/v1/dot-images-coh1-2000ms-s02.p\", \"rb\" ), encoding='latin1') \n x = np.arange(-40, 41, 1)\n gaborx, gabory = make_gabors(x)\n centres = np.array([[200,200]])\n \n nf = dots.shape[2]\n nrf = centres.shape[0] # number of receptive fields\n ng = gaborx.shape[1] # number of gabors per receptive field\n \n # offsets (from RF centres) of subimages to multiply with kernels\n vw = int(np.floor(gabory.size/2))\n v_offsets = np.arange(-vw, vw+1)\n hw = int(np.floor(gaborx.shape[0]/2))\n h_offsets = np.arange(-hw, hw+1)\n \n result = np.zeros((nrf, ng, nf))\n for i in range(dots.shape[2]): \n for j in range(nrf): \n v_indices = v_offsets + centres[j,0]\n h_indices = h_offsets + centres[j,1]\n region = dots[v_indices[:,np.newaxis],h_indices,i]\n for k in range(ng): \n gabor = np.outer(gabory, gaborx[:,k])\n result[j,k,i] = np.sum(gabor * region)\n return result", "def run_grav(self):\n\n # Solucao direta\n self.prod_w = []\n self.prod_o = []\n t0 = time.time()\n # self.set_volumes_in_primal()\n self.set_sat_in()\n self.set_lamb_2()\n self.set_global_problem_vf_3_gr1_bif()\n self.Pf = self.solve_linear_problem(self.trans_fine, self.b, len(self.all_fine_vols_ic))\n self.organize_Pf()\n del self.Pf\n self.mb.tag_set_data(self.pf_tag, self.all_fine_vols, np.asarray(self.Pf_all))\n del self.Pf_all\n self.test_conservation_fine()\n # self.store_flux_pf_gr_bif = self.create_flux_vector_pf_gr_bif_1()\n\n \"\"\"\n ################################################################\n # Solucao Multiescala\n self.calculate_restriction_op_2()\n self.calculate_prolongation_op_het()\n self.organize_op()\n self.Tc = self.modificar_matriz(self.pymultimat(self.pymultimat(self.trilOR, self.trans_fine, self.nf_ic), self.trilOP, self.nf_ic), self.nc, self.nc)\n self.Qc = self.modificar_vetor(self.multimat_vector(self.trilOR, self.nf_ic, self.b), self.nc)\n self.Pc = self.solve_linear_problem(self.Tc, self.Qc, self.nc)\n self.set_Pc()\n self.Pms = self.multimat_vector(self.trilOP, self.nf_ic, self.Pc)\n\n del self.trilOP\n del self.trilOR\n del self.Tc\n del self.Qc\n del self.Pc\n\n self.organize_Pms()\n del self.Pms\n self.mb.tag_set_data(self.pms_tag, self.all_fine_vols, np.asarray(self.Pms_all))\n del self.Pms_all\n self.erro()\n\n self.test_conservation_coarse_gr()\n # self.Neuman_problem_6_gr()\n # self.store_flux_pms_gr = self.create_flux_vector_pms_gr()\n ####################################################################\n \"\"\"\n\n\n\n\n\n\n\n print('acaboooou')\n self.mb.write_file('new_out_bif_gr.vtk')\n\n\n shutil.copytree(self.caminho1, self.pasta)", "def _compute_gp_all_passbands_2D(obj, dataset, number_gp, t_min, t_max,\n output_root=None, **kwargs):\n obj_data = dataset.data[obj] # object's lightcurve\n gp_times = np.linspace(t_min, t_max, number_gp)\n filter_set = np.asarray(dataset.filter_set)\n\n kernel, gp_params, gp_predict = fit_2d_gp(obj_data, return_kernel=True,\n return_gp_params=True)\n gp_wavelengths = np.vectorize(pb_wavelengths.get)(filter_set)\n obj_gps = predict_2d_gp(gp_predict, gp_times, gp_wavelengths)\n\n if output_root is not None:\n obj_gps.write(os.path.join(output_root, f'gp_{obj}'), format='fits',\n overwrite=True)\n path_save_gps = os.path.join(output_root, f'used_gp_{obj}.pckl')\n path_save_kernels = os.path.join(output_root, f'used_kernels_{obj}'\n '.pckl')\n path_save_params = os.path.join(output_root, f'used_params_{obj}.pckl')\n # Save the GP already conditioned on a specific set of observations\n with open(path_save_gps, 'wb') as f:\n pickle.dump(gp_predict, f, pickle.HIGHEST_PROTOCOL)\n with open(path_save_kernels, 'wb') as f:\n pickle.dump(kernel, f, pickle.HIGHEST_PROTOCOL)\n with open(path_save_params, 'wb') as f:\n pickle.dump(gp_params, f, pickle.HIGHEST_PROTOCOL)\n\n return obj_gps", "def extract_features(img, sigmas, n_features): \n dims = img.shape # dimensions of the image\n \n features = np.zeros((dims[0], dims[1], n_features)) # each feature map has the same size as the input image\n \n # the first feature we use is the pixel intensity in the green channel itself\n img_g = img[:,:,1] #I just assume it follows the RGB convention and not GBR or BGR...\n features[:,:,0] = img_g\n features[:,:,1] = np.sum(img,axis=2) \n \n gabors = get_gabors() \n \n # >>> YOUR CODE STARTS HERE <<<\n i = 2\n# for s in sigmas:\n# gfilters = gauss_filter(s)\n# for gf in gfilters:\n# features[:,:,i] = scipy.signal.fftconvolve(img_g, gf, mode='same') ;i+=1\n for s in sigmas:\n gauss = gauss_filter(s)\n for g in gauss:\n features[:,:,i] = scipy.signal.fftconvolve(img_g, g, mode='same') ;i+=1\n \n for gabor in gabors:\n features[:,:,i] = scipy.signal.fftconvolve(img_g, gabor, mode='same') ;i+=1\n \n \n features[:,:,i] = sobel(img_g, axis=0) ;i+=1\n features[:,:,i] = sobel(img_g, axis=1) ;i+=1\n features[:,:,i] = sobel(img_g, axis=0)+sobel(img_g, axis=1) ;i+=1\n features[:,:,i] = feature.canny(img_g, sigma=0.0) ;i+=1\n features[:,:,i] = feature.canny(img_g, sigma=0, low_threshold=13, high_threshold=50);i+=1\n features[:,:,i] = feature.canny(img_g, sigma=1)\n # >>> YOUR CODE ENDS HERE <<< \n \n return features", "def IR():\n s = np.array(\n [2.40774137,2.287696084,2.203613927,2.048710132,1.899829585,1.591776247,\n 2.021218754,2.572949552,3.298381484,3.635993426,3.788266224,3.8307278,3.834208811]\n )\n\n TI = np.array([50, 75, 100, 150, 200, 300, 400, 500, 750, 1000, 1500, 2000, 3000])\n\n comp1 = s * np.array([-159.1,-134.2,-109.1,-64.7,25.0,40.1,88.6,126.8,187.6,219.4,245.4,253.6,256.1])\n comp2 = s * np.array([-368.3,-356.9,-343.8,-318.1,-292.0,-242.5,-199.3,-158.4,-68.8,14.2,131.9,219.5,333.5])\n comp3 = s * np.array([-77.5,-51.9,-29.8,9.9,40.2,85.7,115.4,135.1,160.1,167.6,172.3,171.7,171.8])\n comp4 = s * np.array([-265.0,-240.6,-216.7,-170.5,-128.2,-53.5,9.6,62.3,159.7,223.8,296.5,328.3,346.7])\n comp5 = s * np.array([-346.5,-328.9,-312.1,-278.5,-244.4,-182.3,-128.0,-80.0,30.8,109.3,225.1,299.5,372.2])\n\n comp = [comp1, comp2, comp3, comp4, comp5]\n MSE = []\n colors = [\"#1f77b4\", \"#ff7f0e\", \"#2ca02c\", \"#d62728\", \"#9467bd\"]\n x_new = np.linspace(0, 3000, 10000)\n for i, j, k in zip(comp, colors, np.arange(1, 6)):\n plt.scatter(TI, i, c=j)\n # popt, _ = curve_fit(MZ, TI, i, p0=np.array([200, 220, 300]))\n popt, _ = curve_fit(MZ, TI, i, p0=np.array([300, 220]))\n # M_z0, T1, M0 = popt\n M0, T1 = popt\n y_new = MZ(x_new, *popt)\n plt.plot(x_new, y_new, \"--\", c=j, label=f\"Fit Comp. {k:d} : $T_1$={T1:3.2f}\")\n MSE.append(mean_squared_error(i,y_new[TI]))\n print(MSE)\n print(np.mean(MSE))\n plt.grid()\n plt.legend(loc=\"best\")\n plt.xlabel(\"TI\")\n plt.ylabel(r\"Singal Intensity $M_z$\")\n plt.show()", "def SC_generation(hourly_radiation, prop_observers, number_groups, weather_data, g, Sz, Az, ha, Tin_C, height,\n panel_properties, latitude):\n\n\n n0 = panel_properties['n0']\n c1 = panel_properties['c1']\n c2 = panel_properties['c2']\n mB0_r = panel_properties['mB0_r']\n mB_max_r = panel_properties['mB_max_r']\n mB_min_r = panel_properties['mB_min_r']\n C_eff = panel_properties['C_eff']\n t_max = panel_properties['t_max']\n IAM_d = panel_properties['IAM_d']\n Aratio = panel_properties['aperture_area_ratio']\n Apanel = panel_properties['module_area']\n dP1 = panel_properties['dP1']\n dP2 = panel_properties['dP2']\n dP3 = panel_properties['dP3']\n dP4 = panel_properties['dP4']\n Cp_fluid_JperkgK = panel_properties['Cp_fluid'] # J/kgK\n\n # create lists to store results\n list_results = [None] * number_groups\n list_areas_groups = [None] * number_groups\n Sum_mcp_kWperC = np.zeros(8760)\n Sum_qout_kWh = np.zeros(8760)\n Sum_Eaux_kWh = np.zeros(8760)\n Sum_qloss = np.zeros(8760)\n Sum_radiation_kWh = np.zeros(8760)\n\n Tin_array_C = np.zeros(8760) + Tin_C\n aperature_area_per_module = Aratio * Apanel\n total_area_module = prop_observers['total_area_module'].sum() # total area for panel installation\n\n # calculate equivalent length of pipes\n lv = panel_properties['module_length'] # module length\n number_modules = round(total_area_module/Apanel) # this is an estimation\n l_ext_mperm2 = (2 * lv * number_modules/ (total_area_module * Aratio)) # pipe length within the collectors\n l_int_mperm2 = 2 * height / (total_area_module * Aratio) # pipe length from building substation to roof top collectors\n Leq_mperm2 = l_int_mperm2 + l_ext_mperm2 # in m/m2 aperture\n\n if panel_properties['type'] == 'ET': # for evacuated tubes\n Nseg = 100 # default number of subsdivisions for the calculation\n else:\n Nseg = 10 # default number of subsdivisions for the calculation\n\n for group in range(number_groups):\n # load panel angles from group\n teta_z = prop_observers.loc[group, 'surface_azimuth'] # azimuth of panels of group\n area_per_group = prop_observers.loc[group, 'total_area_module']\n tilt_angle_deg = prop_observers.loc[group, 'tilt'] # tilt angle of panels\n\n # create dataframe with irradiation from group\n\n radiation_Wh = pd.DataFrame({'I_sol': hourly_radiation[group]})\n radiation_Wh['I_diffuse'] = weather_data.ratio_diffhout * radiation_Wh.I_sol # calculate diffuse radiation\n radiation_Wh['I_direct'] = radiation_Wh['I_sol'] - radiation_Wh['I_diffuse'] # calculate direct radiation\n radiation_Wh.fillna(0, inplace=True) # set nan to zero\n\n # calculate incidence angle modifier for beam radiation\n IAM_b = calc_IAM_beam_SC(Az, g, ha, teta_z, tilt_angle_deg, panel_properties['type'], Sz, latitude)\n\n # calculate heat production from a solar collector of each group\n list_results[group] = calc_SC_module(tilt_angle_deg, IAM_b, IAM_d, radiation_Wh.I_direct,\n radiation_Wh.I_diffuse, weather_data.drybulb_C, n0,\n c1, c2, mB0_r, mB_max_r, mB_min_r, C_eff, t_max,\n aperature_area_per_module, dP1, dP2, dP3, dP4,\n Cp_fluid_JperkgK, Tin_C, Leq_mperm2, l_ext_mperm2,\n l_int_mperm2, Nseg)\n\n\n # multiplying the results with the number of panels in each group and write to list\n number_modules_per_group = area_per_group / Apanel\n list_areas_groups[group] = area_per_group\n radiation_array = hourly_radiation[group] * list_areas_groups[group] / 1000 # kWh\n Sum_qout_kWh = Sum_qout_kWh + list_results[group][1] * number_modules_per_group\n Sum_Eaux_kWh = Sum_Eaux_kWh + list_results[group][2] * number_modules_per_group\n Sum_qloss = Sum_qloss + list_results[group][0] * number_modules_per_group\n Sum_mcp_kWperC = Sum_mcp_kWperC + list_results[group][5] * number_modules_per_group\n Sum_radiation_kWh = Sum_radiation_kWh + radiation_Wh['I_sol']*area_per_group/1000\n\n Tout_group_C = (Sum_qout_kWh / Sum_mcp_kWperC) + Tin_C # in C assuming all collectors are connected in parallel\n\n Final = pd.DataFrame(\n {'Q_SC_gen_kWh': Sum_qout_kWh, 'T_SC_sup_C': Tin_array_C, 'T_SC_re_C': Tout_group_C, 'mcp_SC_kWperC': Sum_mcp_kWperC, 'Eaux_SC_kWh': Sum_Eaux_kWh,\n 'Q_SC_l_kWh': Sum_qloss, 'Area_SC_m2': sum(list_areas_groups), 'radiation_kWh': Sum_radiation_kWh}, index=range(8760))\n\n return list_results, Final", "def global_analysis(tomo, b_th, c=18):\n\n ## Thesholding and Volume analysis\n if c == 6:\n con_mat = [ [[0, 0, 0], [0, 1, 0], [0, 0, 0]],\n [[0, 1, 0], [1, 1, 1], [0, 1, 0]],\n [[0, 0, 0], [0, 1, 0], [0, 0, 0]] ]\n elif c == 18:\n con_mat = [[[0, 1, 0], [1, 1, 1], [0, 1, 0]],\n [[1, 1, 1], [1, 1, 1], [1, 1, 1]],\n [[0, 1, 0], [1, 1, 1], [0, 1, 0]]]\n elif c == 26:\n con_mat = [[[1, 1, 1], [1, 1, 1], [1, 1, 1]],\n [[1, 1, 1], [1, 1, 1], [1, 1, 1]],\n [[1, 1, 1], [1, 1, 1], [1, 1, 1]]]\n else:\n raise ValueError\n tomo_lbl, num_lbls = sp.ndimage.label(tomo >= b_th, structure=np.ones(shape=[3, 3, 3]))\n tomo_out = np.zeros(shape=tomo.shape, dtype=int)\n lut = np.zeros(shape=num_lbls+1, dtype=int)\n\n ## COUNTING REGIONS METHODS\n # import time\n # hold_t = time.time()\n # for lbl in range(1, num_lbls + 1):\n # ids = tomo == lbl\n # feat_sz = len(ids)\n # tomo_out[ids] = feat_sz\n # # print('[1]:', lbl, 'of', num_lbls)\n # print time.time() - hold_t\n\n ## COUNTING PIXELS METHOD\n ## Count loop\n # cont, total = 0, np.prod(tomo.shape)\n # import time\n # hold_t = time.time()\n for x in range(tomo.shape[0]):\n for y in range(tomo.shape[1]):\n for z in range(tomo.shape[2]):\n id = tomo_lbl[x, y, z]\n lut[id] += 1\n # cont += 1\n # print('[1]:', cont, 'of', total)\n #\n ## Write loop\n # cont, total = 0, np.prod(tomo.shape)\n\n for x in range(tomo.shape[0]):\n for y in range(tomo.shape[1]):\n for z in range(tomo.shape[2]):\n id = tomo_lbl[x, y, z]\n if id > 0:\n tomo_out[x, y, z] = lut[id]\n # cont += 1\n # print('[1]:', cont, 'of', total)\n # print time.time() - hold_t\n\n return tomo_out", "def _make_gaussian_maps(x, gaussians, size=None, scaling=6.):\n if size is None:\n size = x.shape[-2:]\n bs = x.shape[0]\n else:\n size = [size] * 2\n bs = 1\n dtype = x.dtype\n device = x.device\n\n gaussian_maps = []\n map_template = torch.ones(*size, dtype=dtype, device=device)\n meshgrids = torch.meshgrid(\n [torch.linspace(0, 1, size[0], dtype=dtype, device=device),\n torch.linspace(0, 1, size[1], dtype=dtype, device=device),])\n\n for gaussian_idx, yx_mu_logstd in enumerate(torch.unbind(gaussians)):\n map = map_template.clone()\n for mu_logstd, mgrid in zip(yx_mu_logstd, meshgrids):\n mu = mu_logstd[0]\n std = torch.exp(mu_logstd[1])\n map *= torch.exp(-((mgrid - mu) / std) ** 2 / 2)\n\n map *= scaling\n gaussian_maps.append(map)\n\n gaussian_maps = torch.stack(gaussian_maps)\n gaussian_maps = gaussian_maps.unsqueeze(0).expand(bs, -1, -1, -1)\n return gaussian_maps", "def residmapComparison():\n srcmap001 = fits.open('dataFiles/6gev_srcmap_001.fits')\n srcmap03 = fits.open('dataFiles/6gev_srcmap_03.fits')\n\n image_data = fits.getdata('dataFiles/6gev_image.fits')\n filename = get_pkg_data_filename('dataFiles/6gev_image.fits')\n hdu = fits.open(filename)[0]\n wcs = WCS(hdu.header)\n\n #Given the results of the fit, calculate the model\n modelData001 = np.zeros(srcmap001[0].shape)\n modelData03 = np.zeros(srcmap03[0].shape)\n\n file = open('plotsData/fitResults001.pk1','rb')\n fit001 = pickle.load(file)\n file.close()\n\n file = open('plotsData/fitResults03.pk1','rb')\n fit03 = pickle.load(file)\n file.close()\n\n\n for source in fit001:\n the_index = srcmap001.index_of(source)\n\n modelData001 += fit001[source][:, None, None]*srcmap001[the_index].data[:-1, :, :]/np.sum(np.sum(srcmap001[the_index].data, axis=2), axis=1)[:-1, None, None]\n for source in fit03:\n the_index = srcmap03.index_of(source)\n modelData03 += fit03[source][:, None, None]*srcmap03[the_index].data[:-1, :, :]/np.sum(np.sum(srcmap03[the_index].data, axis=2), axis=1)[:-1, None, None]\n\n fig = plt.figure(figsize=[12, 4.5])\n\n vmin = -25.0\n vmax = 25.0\n cbStep = 5.0\n ax = fig.add_subplot(121, projection=wcs)\n ax=plt.gca()\n ax.tick_params(direction='in')\n c = Wedge((gc_l, gc_b), 1.0, theta1=0.0, theta2=360.0, width=14.0, edgecolor='black', facecolor='#474747', transform=ax.get_transform('galactic'))\n ax.add_patch(c)\n mappable=plt.imshow((image_data-np.sum(modelData001,axis=0)),cmap='seismic',origin='lower',vmin=vmin, vmax=vmax, interpolation='gaussian')#\n plt.xlabel('Galactic Longitude')\n plt.ylabel('Galactic Latitude')\n plt.title('GC Point Source ($>6$ GeV)')\n cb = plt.colorbar(mappable, label='Residual counts per pixel', pad=0.01,ticks=np.arange(vmin, vmax+cbStep, cbStep))\n cb.ax.tick_params(width=0)\n\n\n ax2=fig.add_subplot(122, projection=wcs)\n ax2 = plt.gca()\n c2 = Wedge((gc_l, gc_b), 1.0, theta1=0.0, theta2=360.0, width=14.0, edgecolor='black', facecolor='#474747', transform=ax2.get_transform('galactic'))\n ax2.add_patch(c2)\n mappable2 = plt.imshow((image_data-np.sum(modelData03,axis=0)), cmap='seismic',origin='lower',vmin=vmin, vmax=vmax, interpolation='gaussian')\n plt.xlabel('Galactic Longitude')\n plt.ylabel('Galactic Latitude')\n plt.title('GC Extended Source ($>6$ GeV)')\n cb2 = plt.colorbar(mappable2, label='Residual counts per pixel', pad=0.01, ticks=np.arange(vmin, vmax+cbStep, cbStep))\n cb2.ax.tick_params(width=0)\n fig.tight_layout()\n plt.subplots_adjust(wspace = 0.13, left=0.04, bottom=0.13, top=0.92)\n #plt.savefig('plots/residComparison.pdf',bbox_inches='tight')\n plt.show()", "def womgau(hop):\n import numpy as np\n import logging\n import matplotlib.pyplot as plt\n from scipy.optimize import curve_fit\n from tmath.wombat.womwaverange import womwaverange\n from tmath.wombat.womget_element import womget_element\n from tmath.wombat.inputter import inputter\n from tmath.wombat.inputter_single import inputter_single\n from tmath.wombat.gauss import gauss\n from tmath.wombat.gauss_cont import gauss_cont\n from tmath.wombat.yesno import yesno\n print(' ')\n logging.info('Object is {}'.format(hop[0].obname))\n print(' ')\n print('Spectrum runs from {} to {}'.format(hop[0].wave[0],hop[0].wave[-1]))\n print(' ')\n print('This routine expects the spectrum to be in flambda units.')\n print('It also expects a linear wavelength scale.')\n print(' ')\n print('Choose general region of spectrum\\n')\n nwave,nflux,mode=womwaverange(hop[0].wave,hop[0].flux,'none')\n print('\\nNow pick the exact range for the fit')\n waveint,fluxint,mode=womwaverange(nwave,nflux,mode)\n indexblue=womget_element(nwave, waveint[0])\n indexred=womget_element(nwave,waveint[-1])\n if (mode == 'w'):\n done = False\n while (not done):\n print(' ')\n wavecenter=inputter('Enter approximate center of Gaussian : ','float',False)\n indexcenter=womget_element(waveint,wavecenter)\n if (indexcenter <= 0) or (wavecenter > waveint[-1]):\n print('Bad central wavelength, try again')\n else:\n done = True\n else:\n done=False\n while (not done):\n print('Mark the approximate center of the Gaussian')\n pickcent=plt.ginput(1,timeout=-1)\n indexcenter=womget_element(waveint,pickcent[0][0])\n print('\\nApproximate center at {}'.format(waveint[indexcenter]))\n print('\\nIs this OK?')\n answer=yesno('y')\n if (answer == 'y'):\n done=True\n weights=np.sqrt(hop[0].var[indexblue:indexred+1])\n print(' ')\n continuum=inputter_single('Do you want to fit gaussian with (c)ontinuum, or (n)o continuum? ','cn')\n if (continuum == 'c'):\n p=[fluxint[indexcenter], waveint[indexcenter],3.0,1.0,waveint[0]]\n result=curve_fit(gauss_cont,waveint,fluxint,sigma=weights,p0=p,absolute_sigma=True,full_output=True)\n else:\n p=[fluxint[indexcenter], waveint[indexcenter],3.0]\n result=curve_fit(gauss,waveint,fluxint,sigma=weights,p0=p,absolute_sigma=True,full_output=True)\n coefferr=np.sqrt(np.diag(result[1]))\n coeff=result[0]\n # make 'finer-grained' version of fit, 0.2A/pix for calculations\n wavecalc=np.arange(2*5*50*abs(coeff[2]))*0.2+coeff[1]-0.2*5*50*abs(coeff[2])\n calccenter=womget_element(wavecalc,coeff[1])\n if (continuum == 'c'):\n fluxcalc=gauss_cont(wavecalc,*coeff)\n fluxcont=wavecalc*coeff[3]+coeff[4]\n fluxgaussian=fluxcalc-fluxcont\n linecont=fluxcont[calccenter]\n else:\n fluxcalc=gauss(wavecalc,*coeff)\n \n \n deltafit=wavecalc[1]-wavecalc[0]\n calcindexblue=womget_element(wavecalc,waveint[0])\n calcindexred=womget_element(wavecalc,waveint[-1])\n sumfluxcalc=np.sum(fluxcalc[calcindexblue:calcindexred+1]*deltafit)\n sumallfluxcalc=np.sum(fluxcalc*deltafit)\n chi=(result[2]['fvec']**2).sum()\n redchi=chi/(len(waveint)-len(coeff))\n if (continuum == 'c'):\n sumfluxgaussian=np.sum(fluxgaussian[calcindexblue:calcindexred+1]*deltafit)\n sumallfluxgaussian=np.sum(fluxgaussian*deltafit)\n sumfluxcont=np.sum(fluxcont[calcindexblue:calcindexred+1]*deltafit)\n sumallfluxcont=np.sum(fluxcont*deltafit)\n sumallfluxcont_test=np.sum(fluxcont)\n # propagate uncertainty (from old version) not sure this is correct\n height_pct=coefferr[0]/coeff[0]\n sigma_pct=coefferr[2]/coeff[2]\n flux_pct=np.sqrt(height_pct**2+sigma_pct**2)\n sumfluxgaussiansig=sumfluxgaussian*flux_pct\n sumallfluxgaussiansig=sumallfluxgaussian*flux_pct\n plt.cla()\n plt.plot(nwave,nflux,drawstyle='steps-mid',color='k')\n plt.ylabel('Flux')\n plt.xlabel('Wavelength')\n xmin,xmax=plt.xlim()\n ymin,ymax=plt.ylim()\n plt.plot(wavecalc,fluxcalc,drawstyle='steps-mid',color='b')\n if (continuum == 'c'):\n plt.plot(wavecalc,fluxgaussian,drawstyle='steps-mid',color='r')\n plt.plot(wavecalc,fluxcont,drawstyle='steps-mid',color='g')\n plt.plot([waveint[0],waveint[0]],[ymin,ymax],color='k',linestyle='--')\n plt.plot([waveint[-1],waveint[-1]],[ymin,ymax],color='k',linestyle='--')\n plt.xlim([xmin,xmax])\n plt.ylim([ymin,ymax])\n logging.info('For object {} Gaussian fit'.format(hop[0].obname))\n if (continuum == 'c'):\n print('\\nData = Black, Fit = Blue, Continuum = Green, Fit-Continuum = Red\\n')\n else:\n print('\\nData = Black, Fit = Blue\\n')\n logging.info('Height {:16.8f}+/-{:16.8f}'.format(coeff[0],coefferr[0]))\n logging.info('Center {:16.8f}+/-{:16.8f}'.format(coeff[1],coefferr[1]))\n logging.info('Sigma {:16.8f}+/-{:16.8f}'.format(coeff[2],coefferr[2]))\n if (continuum == 'c'):\n FWHM = 2.35482*np.abs(coeff[2])\n rest_wave = input('Rest wavelength [N/A]: ') or None\n redshift = input('Redshift [N/A]: ') or None\n if rest_wave:\n rest_wave = float(rest_wave)\n w1 = (rest_wave - FWHM/2.)/(1.+float(redshift)) \n w2 = (rest_wave + FWHM/2.)/(1.+float(redshift)) \n c = 299792.458\n v1 = -1.*c*((rest_wave/w1)**2. - 1)/(1+((rest_wave/w1)**2.))\n v2 = -1.*c*((rest_wave/w2)**2. - 1)/(1+((rest_wave/w2)**2.))\n logging.info('Slope {:16.8f}+/-{:16.8f}'.format(coeff[3],coefferr[3]))\n logging.info('Y-intercept {:16.8f}+/-{:16.8f}'.format(coeff[4],coefferr[4]))\n logging.info('FWHM {:16.8f}+/-{:16.8f}'.format(2.35482*np.abs(coeff[2]),2.35482*coefferr[2]))\n logging.info('FWHM (velocity) {:16.8f} km/s'.format(v2-v1))\n logging.info('Flux between dotted lines (Gaussian): {:16.8f}+/-{:16.8f}'.format(sumfluxgaussian, sumfluxgaussiansig))\n logging.info('EW between dotted lines (Gaussian): {:16.8f}'.format(sumfluxgaussian/linecont))\n logging.info('Flux for full (Gaussian): {:16.8f}+/-{:16.8f}'.format(sumallfluxgaussian, sumallfluxgaussiansig))\n logging.info('EW for full (Gaussian): {:16.8f}'.format(sumallfluxgaussian/linecont))\n logging.info('Continuum flux at line center: {:16.8f}'.format(linecont))\n\n \n logging.info('Chi^2: {}'.format(chi))\n logging.info('Reduced chi^2: {}'.format(redchi))\n logging.info('All fluxes might need to be scaled by 1e-15')\n print(' ')\n return hop", "def check_gabor_approx(): \n x = np.arange(-40, 41, 1)\n DOG2 = make_DOG(3, x)\n DOGs = get_DOGs(3, x, (x.size, x.size))\n gaborx, gabory = make_gabors(x)\n n_gabors = gaborx.shape[1]\n\n w_kernel = pickle.load(open(\"/Users/bptripp/code/nengo-FPGA/v1/dog-gabor-weights-new.p\", \"rb\" ))\n\n plt.figure()\n for i in range(n_gabors):\n gabor = np.outer(gabory, gaborx[:,i])\n approx = np.dot(DOGs, w_kernel[:,:,i].flatten())\n plt.subplot(3,n_gabors,i+1)\n plt.imshow(gabor)\n plt.subplot(3,n_gabors,i+n_gabors+1)\n plt.imshow(np.reshape(approx, (x.size, x.size)))\n plt.subplot(3,n_gabors,i+2*n_gabors+1) \n plt.imshow(nd.filters.convolve(w_kernel[:,:,i].T, DOG2))\n plt.show()", "def two_gaussians(x):\n log_like1 = (\n -0.5 * n * pt.log(2 * np.pi)\n - 0.5 * pt.log(dsigma)\n - 0.5 * (x - mu1).T.dot(isigma).dot(x - mu1)\n )\n log_like2 = (\n -0.5 * n * pt.log(2 * np.pi)\n - 0.5 * pt.log(dsigma)\n - 0.5 * (x - mu2).T.dot(isigma).dot(x - mu2)\n )\n return pt.log(w1 * pt.exp(log_like1) + w2 * pt.exp(log_like2))", "def _plot_pr2(gc_val_lst: list, at_val_lst: list, g3_val_lst: list, a3_val_lst: list, organism_name: str | None = None,\n save_image: bool = False, folder_path: str = 'Report', gene_analysis: bool = True):\n N = len(gc_val_lst)\n gc_arr = np.array(gc_val_lst)\n g3_arr = np.array(g3_val_lst)\n at_arr = np.array(at_val_lst)\n a3_arr = np.array(a3_val_lst)\n x = g3_arr / gc_arr\n y = a3_arr / at_arr\n # [[x_val], [y_val]]\n ag_lst = [[], []]\n ag_count = 0\n tg_lst = [[], []]\n tg_count = 0\n tc_lst = [[], []]\n tc_count = 0\n ac_lst = [[], []]\n ac_count = 0\n no_bias = [[], []]\n no_bias_count = 0\n for x_val, y_val in zip(x, y):\n if x_val > 0.5 and y_val > 0.5:\n ag_lst[0].append(x_val)\n ag_lst[1].append(y_val)\n ag_count += 1\n elif x_val > 0.5 > y_val:\n tg_lst[0].append(x_val)\n tg_lst[1].append(y_val)\n tg_count += 1\n elif x_val < 0.5 and y_val < 0.5:\n tc_lst[0].append(x_val)\n tc_lst[1].append(y_val)\n tc_count += 1\n elif x_val < 0.5 < y_val:\n ac_lst[0].append(x_val)\n ac_lst[1].append(y_val)\n ac_count += 1\n else:\n no_bias[0].append(x_val)\n no_bias[1].append(y_val)\n no_bias_count += 1\n fig, ax = plt.subplots(figsize=(9, 5.25))\n ax.scatter(ag_lst[0], ag_lst[1], s=5, label='AG', alpha=0.5, zorder=2)\n ax.scatter(tg_lst[0], tg_lst[1], s=5, label='TG', alpha=0.5, zorder=2)\n ax.scatter(tc_lst[0], tc_lst[1], s=5, label='TC', alpha=0.5, zorder=2)\n ax.scatter(ac_lst[0], ac_lst[1], s=5, label='AC', alpha=0.5, zorder=2)\n ax.scatter(no_bias[0], no_bias[1], s=5, label=f'No Bias', alpha=0.5, zorder=3)\n ax.grid(True, linestyle=':')\n ax.legend()\n ax.text(0.6, 0.95, 'AG')\n ax.text(0.4, 0.95, 'AC')\n ax.text(0.6, 0.05, 'TG')\n ax.text(0.4, 0.05, 'TC')\n ax.text(0, 0, f'AG: {ag_count}\\nTG: {tg_count}\\nTC: {tc_count}\\nAC: {ac_count}\\nNo Bias: {no_bias_count}',\n transform=ax.transAxes)\n plt.xlim([0, 1])\n plt.ylim([0, 1])\n plt.axvline(0.5, color='grey', zorder=1)\n plt.axhline(0.5, color='grey', zorder=1)\n plt.xlabel(r\"$G_3/GC_3$ Values\")\n plt.ylabel(r\"$A_3/AT_3$ Values\")\n suptitle = r'Parity Rule 2 plot' if organism_name is None else f\"Parity Rule 2 plot for {organism_name}\"\n plt.suptitle(suptitle, fontsize=16)\n title = f'Total genes: {N}' if gene_analysis else f'Total genome: {N}'\n plt.title(title, fontsize=12)\n if save_image:\n make_dir(folder_path)\n name = 'PR2_plot.png' if organism_name is None else f\"PR2_plot_{organism_name}.png\"\n file_name = join(folder_path, name)\n if is_file_writeable(file_name):\n plt.savefig(file_name, dpi=500)\n print(f'Saved file can be found as {abspath(file_name)}')\n plt.show()\n plt.close()", "def generate_stat(sample_size, sparsity = 0, amplitude = 0, sigma = 1):\n var = generate_variable(sample_size, sparsity, amplitude, sigma)\n y_obs = var[0]\n \n \"\"\" \n f is equal to -X(t,theta) and we will minimize f (max. X)\n \"\"\"\n def f(x):\n \"\"\" \n f(x)=-X(t,theta) where x[0]=t and x[1]=theta\n \"\"\"\n res = np.real(np.exp(-1j*x[1])*\\\n sum(y_obs[k+sample_size]*np.exp(1j*k*x[0]) \\\n for k in range(-sample_size,sample_size+1))) \n res = -res/np.sqrt(2*sample_size+1) \n return res\n \n def grad_f(x):\n \"\"\" \n gradient of f\n \"\"\"\n res1 = np.real(np.exp(-1j*x[1])*\\\n sum(1j*k*y_obs[k+sample_size]*np.exp(1j*k*x[0]) \\\n for k in range(-sample_size,sample_size+1)))\n res1 = -res1/np.sqrt(2*sample_size+1)\n \n res2 = np.real(np.exp(-1j*x[1])*\\\n sum(-1j*y_obs[k+sample_size]*np.exp(1j*k*x[0]) \\\n for k in range(-sample_size,sample_size+1)))\n res2 = -res2/np.sqrt(2*sample_size+1)\n return np.array([res1, res2])\n \n #% Minimizing f\n \n \"\"\" \n we minimize on [0, 2pi]^2\n \"\"\"\n bnds = ((0, 2*np.pi), (0, 2*np.pi))\n \n \"\"\" \n We begin by a greedy search of the initialization point over a grid of size 126^2\n the initialization point is init\n \"\"\"\n x = y = np.arange(0, 2*np.pi, 0.05)\n steps = 126\n X, Y = np.meshgrid(x, y)\n val = np.array([f([x,y]) for x,y in zip(np.ravel(X), np.ravel(Y))])\n init = np.argmin(val)\n x1 = init%steps\n x2 = (init-x1)/steps\n init = [x1*0.05, x2*0.05]\n \n \"\"\" \n we minimize f...\n \"\"\"\n result = sco.minimize(f, init, method=\"L-BFGS-B\",\\\n jac=grad_f, bounds=bnds, tol=1e-15)\n \n \"\"\" \n (t1,theta1) is the argmax of X(t, theta) and l1=$\\lambda_1$\n \"\"\"\n t1 = result.x[0]\n theta1 = result.x[1]\n l1 = -f([t1,theta1])\n \n \n \"\"\" \n Function g(x) is equal to (X(t1,theta1)-X(x))/(1-rho((t1,theta1)-x))\n \"\"\"\n def g(x):\n a0 = x[0]-t1\n a1 = x[1]-theta1\n N = 2*sample_size+1\n \n vec = np.array([a0,a1])\n r = np.linalg.norm(vec)\n \"\"\" \n the value for r=0 is set to l1 (note that r=0 corresponds to x=(t1,theta1))\n \"\"\" \n res = l1 \n \n if (0<r) & (r<0.00001):\n \"\"\" \n we look a values near (t1,theta1) for which an indetermination occurs\n \"\"\" \n alpha= np.arccos(np.clip(a0/np.sqrt(a0**2+a1**2), -1.0, 1.0))\n u0 = np.cos(alpha)\n u1 = np.sin(alpha)\n \"\"\" \n u0,u1 defines the direction (unit vector)\n \"\"\"\n denom = sum((k*np.cos(alpha)-np.sin(alpha))**2*\\\n (np.sinc((r*(k*np.cos(alpha)-np.sin(alpha)))/(2*np.pi)))**2\\\n for k in range(-sample_size,sample_size+1))/N\n \"\"\" \n denom computes the denominator\n \"\"\"\n \n# \"\"\" \n# We use simpson rule for the numerator\n# \"\"\"\n# h = np.linspace(0,1,500)\n# \n# b0 = t1 + h*a0\n# b1 = theta1 + h*a1\n# \n# value = (1-h)*(u0**2*\\\n# np.real(np.exp(-1j*b1)*sum(-k**2*y_obs[k+sample_size]*np.exp(1j*k*b0) \\\n# for k in range(-sample_size,sample_size+1)))\\\n# +2*u0*u1*\\\n# np.real(np.exp(-1j*b1)*sum(k*y_obs[k+sample_size]*np.exp(1j*k*b0) \\\n# for k in range(-sample_size,sample_size+1)))\\\n# +u1**2*\\\n# np.real(np.exp(-1j*b1)*sum((-1)*y_obs[k+sample_size]*np.exp(1j*k*b0) \\\n# for k in range(-sample_size,sample_size+1)))) \n# value = value/np.sqrt(N)\n# \n# num = sci.simps(value, h)\n \n \"\"\" \n we use a quadrature for the numerator\n \"\"\" \n fun_int = lambda w: (1-w)*(u0**2*\\\n np.real(np.exp(-1j*(theta1+w*a1))*\\\n sum(-k**2*y_obs[k+sample_size]*np.exp(1j*k*(t1+w*a0)) \\\n for k in range(-sample_size,sample_size+1)))\\\n +2*u0*u1*\\\n np.real(np.exp(-1j*(theta1+w*a1))*\\\n sum(k*y_obs[k+sample_size]*np.exp(1j*k*(t1+w*a0)) \\\n for k in range(-sample_size,sample_size+1)))\\\n +u1**2*\\\n np.real(np.exp(-1j*(theta1+w*a1))*\\\n sum((-1)*y_obs[k+sample_size]*np.exp(1j*k*(t1+w*a0)) \\\n for k in range(-sample_size,sample_size+1)))) \n \n num = np.mean(sci.quad(fun_int, 0, 1, epsabs=1e-15, epsrel=1e-15, limit=1000))\n \n res = -num/denom\n \n if (r>=0.00001):\n \"\"\" \n we look a values far (t1,theta1) for which there is no indetermination\n \"\"\" \n res = (l1+f(x))/(1-(np.cos(a1)*dirichlet(a0,N)/N))\n \n return res \n \"\"\" \n we minimize g on [0, 2pi]^2 an dwe llok for the initialization point\n \"\"\"\n val2 = np.array([g([x,y]) for x,y in zip(np.ravel(X), np.ravel(Y))])\n init2 = np.argmin(val2)\n x1 = init2%steps\n x2 = (init2-x1)/steps\n init2 = [x1*0.05, x2*0.05] \n result2 = sco.minimize(g, init2, method=\"L-BFGS-B\", bounds=bnds, tol=1e-15) \n \"\"\" \n argmin of g\n \"\"\"\n t2 = result2.x[0]\n theta2 = result2.x[1] \n \"\"\" \n value of lambda_2\n \"\"\"\n l21 = l1-result2.fun \n a0 = t2-t1\n a1 = theta2-theta1\n N = 2*sample_size+1\n l22 = l1-(l1+f([t2,theta2]))/(1-(np.cos(a1)*dirichlet(a0,N)/N))\n l2 = max(l21,l22)\n \"\"\" \n we compute the statistic\n \"\"\"\n alpha1 = (1/3)*sample_size*(sample_size+1)\n alpha2 = (1/np.sqrt(N))*\\\n sum((k**2-alpha1)*\\\n np.real(y_obs[k+sample_size]*np.exp(1j*(k*t1-theta1))) \\\n for k in range(-sample_size,sample_size+1))\n alpha3 = (1/np.sqrt(N))*sum(k*np.real(y_obs[k+sample_size]*\\\n np.exp(1j*(k*t1-theta1))) for k in range(-sample_size,sample_size+1)) \n stat = (sigma*(alpha1*l1+alpha2)*scs.norm.pdf(l1/sigma)+\\\n (alpha1*sigma**2-alpha3**2)*(1-scs.norm.cdf(l1/sigma)))/\\\n (sigma*(alpha1*l2+alpha2)*scs.norm.pdf(l2/sigma)+\\\n (alpha1*sigma**2-alpha3**2)*(1-scs.norm.cdf(l2/sigma))) \n \n return stat", "def compute_GS(GMtcs):\n\n GS = np.mean(GMtcs,axis=0) #average over voxels\n\n return GS", "def forward(self, y):\n # landmarks are the weighted average y\n # activations across each channel\n y_projected = torch.zeros_like(y)\n y = self.softmax(y)\n\n # self.save_image(y[0, :, :, :])\n\n # print(y.size())\n # y_0 = y\n y = y.view(y.size(0), y.size(1), -1)\n # print(y[0, :].min(), y[0, :].max())\n mu_y = y * self.yv.reshape(1, 1, -1)\n mu_y = mu_y.sum(dim=-1) #/ y.sum(dim=-1)\n mu_x = y * self.xv.reshape(1, 1, -1)\n mu_x = mu_x.sum(dim=-1) #/ y.sum(dim=-1)\n means = torch.cat((mu_y.unsqueeze(-1), mu_x.unsqueeze(-1)), dim=-1)\n\n # project landmarsk to guassain fuction with fixed standard deviation\n # yv, xv = torch.meshgrid([torch.arange(0, y.size(-2)), torch.arange(0, y.size(-1))])\n # h_act = torch.zeros(y_projected.size(0), y_projected.size(1), device=y.device)\n for batch_id in range(y_projected.size(0)):\n for heatmap_id in range(y_projected.size(1)):\n gdist = MultivariateNormal(means[batch_id, heatmap_id, :], covariance_matrix=self.cov)\n logprobs = gdist.log_prob(torch.cat((self.yv.unsqueeze(0), self.xv.unsqueeze(0)), dim=0).t())\n y_projected[batch_id, heatmap_id, :, :] = torch.exp(logprobs).reshape(y_projected.size(-2), y_projected.size(-1))\n # y_projected[batch_id, heatmap_id, :, :] -= y_projected[batch_id, heatmap_id, :, :].min()\n # y_projected[batch_id, heatmap_id, :, :] /= y_projected[batch_id, heatmap_id, :, :].max()\n # coords = means[batch_id, heatmap_id, :].round().long()\n # h_act[batch_id, heatmap_id] = y_0[batch_id, heatmap_id, coords[0], coords[1]]\n # h_act[batch_id, heatmap_id]\n # max_coords = y_0[batch_id, heatmap_id, :, :].clone()\n # max_coords[max_coords < max_coords.max()] = 0\n # # print(max_coords.size())\n # if batch_id == 0 and heatmap_id == 0:\n # print(means[batch_id, heatmap_id, :], max_coords.nonzero().float().mean(0))\n # a = 'fail' if y.pow(2).mean() > h_act.mean() else 'pass'\n # print(h_act.mean(), y.mean())\n # print(y_projected.max(), y_projected.min())\n # y_out = y_projected.sum(dim=1)\n # self.save_image(y_out.unsqueeze(1), normalize=True)\n # self.save_image(y_out.unsqueeze(1), normalize=True, output='landmarks')\n return y_projected, self.prior_loss(means, y)", "def map_sim_property(**kwargs):\n\n GR = glo.global_results()\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n counter = 0\n fignum = 1\n if p.gal_index == 'all':\n\n for gal_index in GR.N_gal - np.arange(GR.N_gal) - 1:\n\n if counter == 0:\n fig, axes = plt.subplots(3, 3, figsize=(20,15))\n axs = [axes[0,0],axes[0,1],axes[0,2],axes[1,0],axes[1,1],axes[1,2],axes[2,0],axes[2,1],axes[2,2]]\n counter = 9\n\n gal_ob = gal.galaxy(GR=GR, gal_index=gal_index)\n simgas = aux.load_temp_file(gal_ob=gal_ob,data_type='simgas')\n map2D,lab,max_scale = make_projection_map(simgas,prop=p.prop)\n if p.prop == 'm': map2D = map2D * simgas.m.sum()/np.sum(map2D) \n\n # Plot\n Rmax = max_scale/2\n ax1 = axs[9 - counter]\n if p.log:\n map2D[map2D < 10.**p.vmin] = 10.**p.vmin/2\n map2D[map2D > 10.**p.vmax] = 10.**p.vmax\n map2D = np.log10(map2D)\n if not p.log:\n map2D[map2D < p.vmin] = p.vmin/2\n map2D[map2D > p.vmax] = p.vmax\n im = ax1.imshow(map2D,\\\n extent=[-Rmax,Rmax,-Rmax,Rmax],vmin=p.vmin,cmap=p.cmap)\n fig.colorbar(im,shrink=0.8,ax=ax1,label=lab)\n if not p.add: ax1.set_xlabel('x [kpc]'); ax1.set_ylabel('y [kpc]')\n # Limit axes limits a bit to avoid area with no particles...\n ax1.set_xlim([-0.99*Rmax,0.99*Rmax])\n ax1.set_ylim([-0.99*Rmax,0.99*Rmax])\n if (p.prop == 'm') & (p.text == True):\n ax1.text(0.05,0.85,'M$_{gas}$=%.2eM$_{\\odot}$' % np.sum(simgas.m),\\\n fontsize=14,transform=ax1.transAxes,color='white')\n ax1.text(0.05,0.75,'SFR=%.2eM$_{\\odot}$/yr' % GR.SFR[gal_index],\\\n fontsize=14,transform=ax1.transAxes,color='white')\n\n counter -= 1\n\n #if counter == 0:\n # ax1 = plt.subplots(1, 1)\n #cbar = fig.colorbar(im, ax=axes.ravel().tolist(), shrink=0.95, label=lab)\n # fig.colorbar(im,shrink=0.8,label=lab)\n\n if counter == 0 or gal_index == GR.N_gal-1:\n print('Saving in ' + p.d_plot + 'sim_data/map_%s_%s_gals_%i.%s' % (p.prop,p.z1,fignum,p.format))\n # plt.tight_layout()\n if not os.path.isdir(p.d_plot + 'sim_data/'): os.mkdir(p.d_plot + 'sim_data/')\n plt.savefig(p.d_plot + 'sim_data/map_%s_%s_gals_%i.%s' % (p.prop,p.z1,fignum,p.format), format=p.format, dpi=250, facecolor='w')\n fignum += 1\n\n else:\n if p.add:\n fig, ax1 = plt.gcf(), p.ax\n if not p.add:\n fig = plt.figure(figsize=(8,6))\n ax1 = fig.add_axes([0.1, 0.01, 0.8, 0.8]) \n ax1.axis('equal')\n\n gal_ob = gal.galaxy(GR=GR, gal_index=p.gal_index)\n simgas = aux.load_temp_file(gal_ob=gal_ob,data_type=p.sim_type)\n if p.R_max:\n # Cut out square\n simgas = simgas[(np.abs(simgas.x) < p.R_max) & (np.abs(simgas.y) < p.R_max)]\n # Add bottom left corner\n extra_row = simgas.iloc[0] # to ensure that map gets the right size\n extra_row['x'],extra_row['y'] = -p.R_max,-p.R_max\n extra_row[p.prop] = 0\n simgas = simgas.append(extra_row).reset_index(drop=True) \n # Add top right corner\n extra_row = simgas.iloc[0] # to ensure that map gets the right size\n extra_row['x'],extra_row['y'] = p.R_max,p.R_max\n extra_row[p.prop] = 0\n simgas = simgas.append(extra_row).reset_index(drop=True) \n else:\n pass\n map2D,lab,max_scale = make_projection_map(simgas,prop=p.prop)\n if p.prop == 'm': map2D = map2D * simgas.m.sum()/np.sum(map2D) \n print('Min and max of map: ',map2D.min(),map2D.max())\n #map2D[map2D < 1e4] = 1e6\n # Plot map\n if not p.R_max:\n p.R_max = max_scale/2\n if p.log: \n if not p.vmax: p.vmax = np.log10(map2D).max()\n if not p.vmin: p.vmin = np.log10(map2D).max() - 4\n map2D[map2D < 10.**p.vmin] = 10.**p.vmin/2\n map2D[map2D > 10.**p.vmax] = 10.**p.vmax\n map2D = np.log10(map2D)\n else:\n if not p.vmax: p.vmax = np.max(map2D)\n if not p.vmin: p.vmin = np.min(map2D) / 1e3\n map2D[map2D < p.vmin] = p.vmin #np.min(map2D[map2D > 0])\n map2D = np.flipud(map2D)\n\n im = ax1.imshow(map2D,\\\n extent=[-max_scale/2,max_scale/2,-max_scale/2,max_scale/2],vmin=p.vmin,vmax=p.vmax,cmap=p.cmap)\n # Limit axes limits a bit to avoid area with no particles...\n zoom = 1#/1.5\n ax1.set_xlim([-1/zoom * p.R_max,1/zoom * p.R_max])\n ax1.set_ylim([-1/zoom * p.R_max,1/zoom * p.R_max])\n if p.colorbar: \n divider = make_axes_locatable(ax1)\n cax1 = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n fig.colorbar(im,cax=cax1,label=lab)\n if not p.add: ax1.set_xlabel('x [kpc]'); ax1.set_ylabel('y [kpc]')\n if (p.prop == 'm') & (p.text == True):\n simstar = aux.load_temp_file(gal_ob=gal_ob,data_type='simstar')\n ax1.text(0.05,0.92,'M$_{star}$=%.1e M$_{\\odot}$' % np.sum(simstar.m),\\\n fontsize=14,transform=ax1.transAxes,color='white')\n ax1.text(0.05,0.86,'M$_{gas}$=%.1e M$_{\\odot}$' % np.sum(simgas.m),\\\n fontsize=14,transform=ax1.transAxes,color='white')\n ax1.text(0.05,0.80,'SFR=%.2f M$_{\\odot}$/yr' % GR.SFR[p.gal_index],\\\n fontsize=14,transform=ax1.transAxes,color='white')\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'sim_data/'): os.mkdir(p.d_plot + 'sim_data/') \n plt.savefig(p.d_plot + 'sim_data/map_%s_G%i.png' % (p.prop,p.gal_index), format=p.format, dpi=250, facecolor='w')\n\n if not p.colorbar: return(im)", "def make_4gaussians_image(noise=True):\n table = QTable()\n table['amplitude'] = [50, 70, 150, 210]\n table['x_mean'] = [160, 25, 150, 90]\n table['y_mean'] = [70, 40, 25, 60]\n table['x_stddev'] = [15.2, 5.1, 3.0, 8.1]\n table['y_stddev'] = [2.6, 2.5, 3.0, 4.7]\n table['theta'] = np.radians(np.array([145.0, 20.0, 0.0, 60.0]))\n\n shape = (100, 200)\n data = make_gaussian_sources_image(shape, table) + 5.0\n\n if noise:\n rng = np.random.RandomState(12345)\n data += rng.normal(loc=0.0, scale=5.0, size=shape)\n\n return data", "def sample_values(self, positions, gibbs):\n \"\"\"Sample important values\"\"\"\n\n if gibbs:\n self.local_energy = self.h.local_energy_gibbs(positions)\n self.accumulate_energy += self.h.local_energy_gibbs(positions)\n self.accumulate_energy_sq += self.local_energy*self.local_energy\n gradient_wf_a = 0.5*self.w.gradient_wavefunction_a(positions)\n gradient_wf_b = 0.5*self.w.gradient_wavefunction_b(positions)\n gradient_wf_W = 0.5*self.w.gradient_wavefunction_W(positions)\n else:\n self.local_energy = self.h.local_energy(positions)\n self.accumulate_energy += self.h.local_energy(positions)\n self.accumulate_energy_sq += self.local_energy*self.local_energy\n gradient_wf_a = self.w.gradient_wavefunction_a(positions)\n gradient_wf_b = self.w.gradient_wavefunction_b(positions)\n gradient_wf_W = self.w.gradient_wavefunction_W(positions)\n # self.local_energy = self.h.local_energy_numerical(positions)\n # self.accumulate_energy += self.h.local_energy_numerical(positions)\n # gradient_wf_a = np.zeros(self.w.M)\n # gradient_wf_b = np.zeros(self.w.N)\n # gradient_wf_W = np.zeros((self.w.M, self.w.N))\n\n self.accumulate_psi_term_a += gradient_wf_a\n self.accumulate_psi_term_b += gradient_wf_b\n self.accumulate_psi_term_W += gradient_wf_W\n self.accumulate_both_a += gradient_wf_a*self.local_energy\n self.accumulate_both_b += gradient_wf_b*self.local_energy\n self.accumulate_both_W += gradient_wf_W*self.local_energy", "def gaussp(y1, y2, n):\n\n # First check for trivial or stupid requests\n if n <= 0:\n raise ValueError(\"Zero (or less) grid points is stupid. Stop it.\")\n if n == 1:\n r = np.array([0.5*(y2 + y1)])\n wt = np.array([y2 - y1])\n return r, wt\n N_pi = 3.14159265358979323844 # Fortran uses stupid pi because of course it does\n EPS = 1e-14 # Desired accuracy\n n_sav = -1\n\n if n != n_sav:\n n_sav = n\n m = n\n m, r, wt = GridGenerator.gausspp(m)\n m = 0\n\n if m != n:\n m = int((n+1)/2) # Care, integer division\n x = np.zeros((2*m)) # Working r, not returned\n w = np.zeros((2*m)) # Working wt, not returned\n r = np.zeros((2*m))\n wt = np.zeros((2*m))\n for i in range(m):\n r[i] = N_pi*(i+0.75)/(n+0.5)\n r = np.cos(r)\n\n for i in range(m):\n z = r[i]\n z1 = 1e20 # Arbitrary large number to ensure at least 1 loop\n while abs(z-z1) > EPS:\n p1 = 1.0\n p2 = 0.0\n for j in range(n):\n p3 = p2\n p2 = p1\n p1 = ((2*(j + 1) - 1)*z*p2 - j*p3)/(j + 1)\n pp = n*(z*p1 - p2)/(z*z - 1.0)\n z1 = z\n z = z1 - p1/pp\n x[i] = -z\n x[n - (i + 1)] = z\n w[i] = 2.0/((1.0 - z*z)*pp*pp)\n w[n - (i + 1)] = w[i]\n\n for i in range(n):\n fact = 0.5*(y2-y1)\n r[i] = y1 + fact*(x[i] + 1.0)\n wt[i] = fact*w[i]\n\n return n, r, wt", "def zonal_avg2(data,Log=False):\n print 'setting up the destination grid'\n # get lat and lon for new regular grid\n# fpin = Nio.open_file('/home/ivan/Python/data/lat_t.nc','r')\n fpin = Nio.open_file('/home/emunoz/Python/mapping/model_grid/lat_t.nc','r')\n lat_t = fpin.variables['lat_t'][:]\n lat_t_edges = fpin.variables['lat_t_edges'][:]\n fpin.close()\n# fpin = Nio.open_file('/home/ivan/Python/data/gx3v5.nc','r')\n fpin = Nio.open_file('/home/emunoz/Python/mapping/model_grid/gx3v5.nc','r')\n lon_t = N.sort(fpin.variables['TLONG'][0,:])\n ulon = N.sort(fpin.variables['ULONG'][0,:])\n lon_t_edges = N.concatenate((ulon,ulon[0,N.newaxis]+360.),0)\n # get gx3v5 lat and lon\n tlon = fpin.variables['TLONG'][:]\n tlat = fpin.variables['TLAT'][:]\n fpin.close()\n\n # compute area of cells in new regular grid\n area = grid_area(lon_t_edges,lat_t_edges)\n\n nlat = lat_t.shape[0]\n nlon = lon_t.shape[0]\n\n print 'computing weights for grid cell'\n ilist = []\n jlist = []\n wghts2D = []\n wghts3D = []\n for i in range(nlat):\n for j in range(nlon):\n i_inds, j_inds = find_stn_idx(lon_t[j], lat_t[i], tlon, tlat)\n ilist.append(i_inds)\n jlist.append(j_inds)\n dist = gc_dist(lon_t[i], lat_t[i], tlon, tlat)\n # make weights=0 on land\n work2D = 1./MA.array(dist,mask=data[0,...].mask)\n wghts2D.append(MA.filled(N.take(N.take(work2D,i_inds,0),j_inds,1)\n ,0))\n\n work3D = 1./MA.array(N.resize(dist,data.shape),mask=data.mask)\n wghts3D.append(MA.filled(N.take(N.take(work3D,i_inds,-2),j_inds,-1)\n ,0))\n\n #print 'computing zonal average'\n return lon_t, lat_t, ilist, jlist, wghts2D, wghts3D", "def _compute_gp_all_passbands_1D(obj, dataset, number_gp, t_min, t_max,\n output_root=None, **kwargs):\n try:\n kernel_param = kwargs[\"kernel_param\"]\n except KeyError:\n kernel_param = [500., 20.]\n\n obj_data = dataset.data[obj] # object's light curve\n obj_data = cs.rename_passband_column(obj_data.to_pandas())\n unique_pbs = np.unique(obj_data.passband)\n gp_times = np.linspace(t_min, t_max, number_gp)\n\n # Store the output in another astropy table\n obj_gps = []\n used_gp_dict = {}\n used_kernels_dict = {}\n filter_set = np.asarray(dataset.filter_set)\n for pb in filter_set:\n used_kernels_dict[pb] = None # inilialize None kernel to each passband\n if pb in unique_pbs:\n is_pb = obj_data.passband == pb # observations in this passband\n obj_data_pb = obj_data.loc[is_pb]\n\n gp_obs, gp, chosen_kernel = fit_best_gp(kernel_param,\n obj_data_pb, gp_times)\n\n mu, std = gp_obs.flux.values, gp_obs.flux_error.values\n # stack the GP results in a array momentarily\n obj_gp_pb_array = np.column_stack((gp_times, mu, std))\n used_kernels_dict[pb] = chosen_kernel\n # Save the GP already conditioned on a specific set of observations\n gp_predict = partial(gp.predict, obj_data_pb.flux)\n used_gp_dict[pb] = gp_predict\n else:\n obj_gp_pb_array = np.zeros([number_gp, 3])\n obj_gp_pb = Table([obj_gp_pb_array[:, 0], obj_gp_pb_array[:, 1],\n obj_gp_pb_array[:, 2], [pb] * number_gp],\n names=['mjd', 'flux', 'flux_error', 'filter'])\n if len(obj_gps) == 0: # initialize the table for 1st passband\n obj_gps = obj_gp_pb\n else:\n obj_gps = vstack((obj_gps, obj_gp_pb))\n\n if output_root is not None:\n obj_gps.write(os.path.join(output_root, 'gp_' + obj), format='fits',\n overwrite=True)\n path_save_gps = os.path.join(output_root, 'used_gp_dict_{}.pckl'\n ''.format(obj))\n path_save_kernels = os.path.join(output_root, 'used_kernels_dict_{}.'\n 'pckl'.format(obj))\n with open(path_save_gps, 'wb') as f:\n pickle.dump(used_gp_dict, f, pickle.HIGHEST_PROTOCOL)\n with open(path_save_kernels, 'wb') as f:\n pickle.dump(used_kernels_dict, f, pickle.HIGHEST_PROTOCOL)\n\n return obj_gps", "def get_gaussian_maps(mu, sigmax, sigmay, covs, shape_hw, mode='rot'):\n with tf.name_scope(None, 'gauss_map', [mu]):\n # mu_y, mu_x = mu[:, :, 0:1], mu[:, :, 1:2]\n\n y = tf.to_float(tf.linspace(-1.0, 1.0, shape_hw[0]))\n\n x = tf.to_float(tf.linspace(-1.0, 1.0, shape_hw[1]))\n [x,y] = tf.meshgrid(x,y)\n xy = tf.stack([x, y], axis=-1)\n xy = tf.stack([xy] * nb_landmarks, axis=0)\n xy = xy[None, : ,:, :, :]\n if mode in ['rot', 'flat']:\n mu = mu[:,:,None, None,:]\n\n invsigma = tf.stack([sigmay**2, -covs, -covs, sigmax**2], axis=-1)\n invsigma = tf.reshape(invsigma, [-1, nb_landmarks, 2,2])\n denominator = (sigmax*sigmay)**2 - covs**2\n denominator = tf.expand_dims(tf.expand_dims(denominator, -1), -1)\n invsigma = invsigma/(denominator+1e-7)\n invsigma = tf.cast(invsigma, tf.float32)\n pp = tf.tile(invsigma[:, :, None, :, :], [1, 1, shape_hw[1], 1, 1])\n X = xy-mu\n dist = tf.matmul(X,pp)\n dist = tf.reduce_sum((dist*X), axis=-1)\n\n\n if mode == 'rot':\n g_yx = tf.exp(-dist)\n else:\n g_yx = tf.exp(-tf.pow(dist + 1e-5, 0.25))\n\n else:\n raise ValueError('Unknown mode: ' + str(mode))\n\n g_yx = tf.transpose(g_yx, perm=[0, 2, 3, 1])\n return g_yx", "def task2_extra():\n N = 0\n lam = 0\n Ls = numpy.array([2*L for L in range(1,23)])\n h = 0.01\n tau = 0.000099\n\n iterss = []\n\n for L in Ls:\n a = L // 2\n print(L)\n x = numpy.linspace(-L, L, int(2*L/h) + 1)\n # eps = int(0.1 * len(x))\n\n Vm = V1D(lam, x)\n state = phi(N, x-a)\n\n iters = 0\n while True:\n prob = numpy.abs(state)**2\n mid = int(2*L/h) // 2\n # if max(prob) in prob[mid-eps:mid+eps]:\n if numpy.argmax(prob) <= mid:\n print(iters)\n iterss.append(iters)\n break\n\n state[0] = 0\n state[-1] = 0\n state = implicit_scheme_step(state, tau, h, Vm)\n iters += 1\n\n fig = plt.figure()\n plt.title(\"Iterations of Gaussian travel to center\")\n plt.xlabel(\"$L$\")\n plt.ylabel(\"Time\")\n plt.plot(Ls, tau*numpy.array(iterss))\n plt.show()\n fig.savefig(\"naloga2_iters_of_gaussian_travel.pdf\", bbox_inches=\"tight\")", "def cmd_gaus():\n cmds = []\n cmds.append(\"r_m[0.0,-1,1]\")\n cmds.append(\"r_s[2.5,0,10]\")\n cmds.append('Gaussian::res(x,r_m,r_s)')\n return cmds", "def three_mass_FUV_maps(gal_indices,**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n fig = plt.figure(figsize=(15,14.5),constrained_layout=False)\n gs1 = fig.add_gridspec(nrows=3, ncols=3, wspace=0.0, hspace=0.0)\n\n rotate = False\n for row_i,gal_index in enumerate(gal_indices):\n\n R_max = p.R_max[row_i]\n\n ax1 = fig.add_subplot(gs1[row_i, 0])\n m = map_sim_property(add=True,ax=ax1,gal_index=gal_index, \\\n prop='m',R_max=R_max,vmin=0,vmax=7,\\\n pix_size_kpc=0.5,sim_type='simgas',cmap='viridis',log=True,colorbar=False,rotate=rotate,text=p.text)\n frame = plt.gca()\n #if row_i != 2: frame.axes.get_xaxis().set_visible(False)\n if row_i != 2: ax1.set_xlabel('')\n if row_i == 0:\n cbaxes = fig.add_axes([0.05, 0.93, 0.25, 0.01]) \n cb = plt.colorbar(m, orientation='horizontal', cax = cbaxes)\n cbaxes.xaxis.set_ticks_position('top')\n cb.ax.set_title(\"log $\\Sigma_{\\mathrm{gas}}$ [M$_{\\odot}$ kpc$^{-2}$]\")\n # Make a size indicator\n #ax1.plot([p.R_max-15,p.R_max-5],[-p.R_max+5,-p.R_max+5],lw=4,color='white')\n #ax1.text(p.R_max-16,-p.R_max+7,'10 kpc',color='white',fontsize=12)\n # Remove axes ticks\n ax1.tick_params(axis='x',which='both',labelbottom=False)\n ax1.tick_params(axis='y',which='both',labelleft=False)\n\n ax1 = fig.add_subplot(gs1[row_i, 1])\n m = star_map(add=True,ax=ax1,R_max=R_max,vmin=6,vmax=9,\\\n gal_index=gal_index,colorbar=False,rotate=rotate)\n frame = plt.gca()\n #if row_i != 2: frame.axes.get_xaxis().set_visible(False)\n if row_i != 2: ax1.set_xlabel('')\n frame.axes.get_yaxis().set_visible(False)\n if row_i == 0:\n cbaxes = fig.add_axes([0.375, 0.93, 0.25, 0.01]) \n cb = plt.colorbar(m, orientation='horizontal', cax = cbaxes) \n cbaxes.xaxis.set_ticks_position('top')\n cb.ax.set_title(\"log stellar age [yr]\")\n # Make a size indicator\n #ax1.plot([p.R_max-15,p.R_max-5],[-p.R_max+5,-p.R_max+5],lw=4,color='k')\n #ax1.text(p.R_max-16,-p.R_max+7,'10 kpc',color='k',fontsize=12)\n # Remove axes ticks\n ax1.tick_params(axis='x',which='both',labelbottom=False)\n ax1.tick_params(axis='y',which='both',labelleft=False)\n\n ax1 = fig.add_subplot(gs1[row_i, 2])\n m = FUV_map(add=True,ax=ax1,gal_index=gal_index,R_max=R_max,vmin=-10,vmax=3,select=p.select,cmap='twilight',colorbar=False,rotate=rotate)\n frame = plt.gca()\n #if row_i != 2: frame.axes.get_xaxis().set_visible(False)\n if row_i != 2: ax1.set_xlabel('')\n frame.axes.get_yaxis().set_visible(False)\n if row_i == 0:\n cbaxes = fig.add_axes([0.69, 0.93, 0.25, 0.01]) \n cb = plt.colorbar(m, orientation='horizontal', cax = cbaxes) \n cbaxes.xaxis.set_ticks_position('top')\n cb.ax.set_title('FUV flux [W/m$^2$/arcsec$^2$]')\n # Make a size indicator\n # if row_i == 2:\n # print('Adding size indicator')\n #ax1.text(p.R_max-16,-p.R_max+7,'10 kpc',color='w',fontsize=12)\n #ax1.plot([p.R_max-15,p.R_max-5],[-p.R_max+5,-p.R_max+5],lw=4,color='w')\n # else:\n ax1.text(R_max-16,-R_max+7,'10 kpc',color='k',fontsize=12)\n ax1.plot([R_max-15,R_max-5],[-R_max+5,-R_max+5],lw=4,color='k')\n # Remove axes ticks\n ax1.tick_params(axis='x',which='both',labelbottom=False)\n ax1.tick_params(axis='y',which='both',labelleft=False)\n\n # s = segs\n gs1.update(top=0.92,bottom=0.02,left=0.02,right=0.98)\n\n if p.savefig:\n if not os.path.isdir(p.d_plot + 'pretty/'): os.mkdir(p.d_plot + 'pretty/')\n plt.savefig('plots/pretty/mass_FUV_maps_%s%s.png' % (p.sim_name,p.sim_run),format='png',dpi=200)", "def f4():\n n = 4\n v = np.arange(n)**0.75 * 0.2\n e = (np.arange(n)+1)**0.7 * 1e-1\n\n n = 12\n v = np.arange(n)\n e = np.array([0.1]*n) * 10e-0\n\n print(Sumb(v,e))\n\n f = plt.figure()\n a = f.add_subplot(111)\n\n dx = 0.0001\n x = np.arange(-1,v[-1]+1,dx)\n y = x.copy()\n y[:] = 0.\n for i in range(n):\n yx = lg(x,v[i],e[i])\n a.plot(x,np.exp(yx),label='{:d}'.format(i))\n y += yx\n y = np.exp((y - np.max(y))/n**2)\n y /= np.sum(y) * dx \n a.plot(x,y,label='sum')\n s = np.argsort(y)[::-1]\n ys = np.cumsum(y[s]) * dx\n yi = np.argwhere(ys > 0.682689492137)[0][0]\n print('mean = {:2f}'.format(x[s[0]]))\n print('sigma = {:2f}'.format(yi*dx/2))\n xy = np.ndarray((yi+2,2))\n i0,i1 = min(s[:yi]), max(s[:yi])\n xy[:yi,0] = x[i0:i1+1]\n xy[:yi,1] = y[i0:i1+1]\n xy[yi:,1] = 0\n xy[yi:,0] = x[[i1,i0]]\n a.add_patch(Polygon(xy,fill=True,color='green',ec='none',alpha=0.25))\n \n leg = plt.legend()\n plt.draw()", "def make_interferometry(data,algorithm=None):\n # Obteniendo el mapa de coherencia\n Im = np.load(os.getcwd()+\"/Results/Output_\"+algorithm+\"/Im_10.npy\") # Se carga la imagen 10\n f1 = np.zeros((len(Im),len(Im.T)),dtype=complex)\n f2 = np.zeros((len(Im),len(Im.T)),dtype=complex)\n coh = np.zeros((len(Im),len(Im.T)),dtype=complex)\n\n for i in range(n_im-1):\n i += i_o # Valor inicial\n Im1 = np.load(os.getcwd()+\"/Results/Output_\"+algorithm+\"/Im_\"+str(i)+\".npy\")\n Im2 = np.load(os.getcwd()+\"/Results/Output_\"+algorithm+\"/Im_\"+str(i+1)+\".npy\")\n f1 += Im1*Im2.conjugate()\n f2 += np.sqrt((abs(Im1)**2)*(abs(Im2)**2))\n\n coh = abs(f1/f2)\n\n # Graficando la coherencia\n if show:\n plt.close('all')\n cmap =\"plasma\"\n if algorithm == \"BP\":\n title_name ='Mapa de coherencia(BP)\\n[dset'+str(i_o)+'-'+str(i_o+n_im-1)+']'\n direction ='Coherencia_BP_dset'+str(i_o)+'-'+str(i_o+n_im-1)+'].png'\n\n elif algorithm == \"RMA\":\n title_name ='Mapa de coherencia(RMA)\\n[dset'+str(i_o)+'-'+str(i_o+n_im-1)+']'\n direction ='Coherencia_RMA_dset'+str(i_o)+'-'+str(i_o+n_im-1)+'].png'\n #vmin = np.amin(20*np.log10(abs(Sf_n)))+55 #dB\n #vmax = np.amax(20*np.log10(abs(Sf_n)))#-20\n\n fig, ax = plt.subplots()\n im=ax.imshow(coh,cmap,origin='lower',aspect='equal', extent=[data['x_min'],data['x_max'],data['y_min'],data['y_max']]) #, vmin=vmin, vmax=vmax)\n ax.set(xlabel='Azimut(m)',ylabel='Rango(m)', title=title_name)\n ax.grid()\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.1) # pad es el espaciado con la grafica principal\n plt.colorbar(im,cax=cax,label='',extend='both')\n fig.savefig(os.getcwd()+\"/Results/Interferograms_BP/Coherence_maps/\"+direction, orientation='landscape')\n\n # Sacando una mascara de [0.7,1] para la coherencia\n mask = coh<=0.7\n coh[mask] = np.nan\n\n # Graficando con la mascara\n if show:\n cmap =\"plasma\"\n if algorithm == \"BP\":\n title_name ='Mapa de coherencia recortada(BP)\\n[dset'+str(i_o)+'-'+str(i_o+n_im-1)+']'\n direction ='CoherenciaCut_BP_dset'+str(i_o)+'-'+str(i_o+n_im-1)+'].png'\n\n elif algorithm == \"RMA\":\n title_name ='Mapa de coherencia recortada(RMA)\\n[dset'+str(i_o)+'-'+str(i_o+n_im-1)+']'\n direction ='CoherenciaCut_RMA_dset'+str(i_o)+'-'+str(i_o+n_im-1)+'].png'\n #vmin = np.amin(20*np.log10(abs(Sf_n)))+55 #dB\n #vmax = np.amax(20*np.log10(abs(Sf_n)))#-20\n fig, ax = plt.subplots()\n im=ax.imshow(coh,cmap,origin='lower',aspect='equal', extent=[data['x_min'],data['x_max'],data['y_min'],data['y_max']]) #, vmin=vmin, vmax=vmax)\n ax.set(xlabel='Azimut(m)',ylabel='Rango(m)', title=title_name)\n ax.grid()\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.1) # pad es el espaciado con la grafica principal\n plt.colorbar(im,cax=cax,label='',extend='both')\n fig.savefig(os.getcwd()+\"/Results/Interferograms_BP/Coherence_maps/\"+direction, orientation='landscape')\n\n # Obteniendo el mapa de desplazamientos, aka, Interferograma\n par = sp.get_parameters()\n global c,fc\n c,fc = par['c'],par['fc']\n\n if show:\n for i in range(n_im-1):\n # Hallando el interferograma 'i'-esimo\n i1 = i+10\n Im1 = np.load(os.getcwd()+\"/Results/Output_\"+algorithm+\"/Im_\"+str(i1)+\".npy\")\n Im2 = np.load(os.getcwd()+\"/Results/Output_\"+algorithm+\"/Im_\"+str(i1+1)+\".npy\")\n disp = np.angle(Im1*Im2.conjugate())*1000*c/(4*np.pi*fc) # Distancias en mm\n disp[mask] = np.nan\n # Graficando y guardando\n if algorithm == \"BP\":\n title_name ='Interferograma (BP) \\ndset'+'['+str(i1)+']-['+str(i1+1)+']'\n #direction ='Interferogramas_BP/'+'Itf_BP_dset'+'['+str(i+10)+']-['+str(i+11)+'].png'\n direction ='Itf_BP_dset'+str(i)+'.png'\n\n elif algorithm == \"RMA\":\n title_name ='Interferograma (RMA) \\ndset'+'['+str(i1)+']-['+str(i1+1)+']'\n #direction ='Interferogramas_RMA/'+'Itf_RMA_dset'+'['+str(i+10)+']-['+str(i+11)+'].png'\n direction ='Itf_RMA_dset'+str(i)+'.png'\n cmap = plt.cm.plasma #brg\n cmap.set_bad('black',1.)\n #vmin = np.amin(20*np.log10(abs(Sf_n)))+55 #dB\n #vmax = np.amax(20*np.log10(abs(Sf_n)))#-20\n fig, ax = plt.subplots()\n im=ax.imshow(disp,cmap,origin='lower',aspect='equal', extent=[data['x_min'],data['x_max'],data['y_min'],data['y_max']]) #, vmin=vmin, vmax=vmax)\n ax.set(xlabel='Azimut(m)',ylabel='Rango(m)', title=title_name)\n ax.grid()\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.1) # pad es el espaciado con la grafica principal\n plt.colorbar(im,cax=cax,label='desplazamiento(mm)',extend='both')\n fig.savefig(os.getcwd()+\"/Results/Interferograms_BP/Interferograms_complete/Images/\"+direction,orientation='landscape')\n plt.close()\n\n # Hallando la curva de distancias vs tiempo\n #-------Definiendo las zonas--------------\n zone0 = np.array([0,650,0,400])\n zone1 = np.array([0,100,100,200])\n zone2 = np.array([0,100,200,300])\n zone3 = np.array([100,200,100,200])\n zone4 = np.array([100,200,200,300])\n zone5 = np.array([200,300,100,200])\n zone6 = np.array([200,300,200,300])\n zone_indexes = {0:zone0,1:zone1,2:zone2,3:zone3,4:zone4,5:zone5,6:zone6}\n\n desp = np.zeros((len(zone_indexes),n_im-1)) # Variable y: desplazamiento\n\n for z in range(len(zone_indexes)): # Hallando los desplazamientos promedios por zona\n idc = zone_indexes[z]\n for i in range(n_im-1):\n i1 = i+10 # 10 es el valor inicial de las imagenes\n Im1 = np.load(os.getcwd()+\"/Results/Output_\"+algorithm+\"/Im_\"+str(i1)+\".npy\")\n Im2 = np.load(os.getcwd()+\"/Results/Output_\"+algorithm+\"/Im_\"+str(i1+1)+\".npy\")\n d_i = np.angle(Im1*Im2.conjugate())*1000*c/(4*np.pi*fc)\n # ----- Grafica ------\n d_i2 = d_i.copy()\n d_i2[mask] = np.nan\n d_i2 = d_i2[idc[0]:idc[1],idc[2]:idc[3]]\n # ----- end --------\n d_i[mask] = 0\n d_i = d_i[idc[0]:idc[1],idc[2]:idc[3]]\n if i==0: desp[z,i] = d_i.mean()\n else: desp[z,i] = d_i.mean()+desp[z,i-1]\n\n # Graficando la zona z-esima\n if show and i == 0: #\n if algorithm == \"BP\":\n title_name ='Interferograma recortado(BP) \\nZona '+str(z)+' - dset'+'['+str(i1)+']-['+str(i1+1)+']'\n direction ='Itf_BP_rec_zona_'+str(z)+'_dset'+'['+str(i1)+']-['+str(i1+1)+'].png'\n #direction ='Interferogramas_BP/'+'Itf_BP_dset'+str(i)+'.png'\n\n elif algorithm == \"RMA\":\n title_name ='Interferograma recortado(RMA) \\nZona '+str(z)+' - dset'+'['+str(i1)+']-['+str(i1+1)+']'\n direction ='Itf_RMA_rec_zona_'+str(z)+'_dset'+'['+str(i1)+']-['+str(i1+1)+'].png'\n #direction ='Interferogramas_RMA/'+'Itf_RMA_dset'+str(i)+'.png'\n cmap = plt.cm.plasma #brg\n cmap.set_bad('black',1.)\n #vmin = np.amin(20*np.log10(abs(Sf_n)))+55 #dB\n #vmax = np.amax(20*np.log10(abs(Sf_n)))#-20\n fig, ax = plt.subplots()\n im=ax.imshow(d_i2,cmap,origin='lower',aspect='equal', extent=[idc[2]-200,idc[3]-200,idc[0]+300,idc[1]+300]) #, vmin=vmin, vmax=vmax)\n ax.set(xlabel='Azimut(m)',ylabel='Rango(m)', title=title_name)\n ax.grid()\n divider = make_axes_locatable(ax)\n cax = divider.append_axes(\"right\", size=\"5%\", pad=0.1) # pad es el espaciado con la grafica principal\n plt.colorbar(im,cax=cax,label='desplazamiento(mm)',extend='both')\n fig.savefig(os.getcwd()+\"/Results/Interferograms_BP/Interferograms_complete/Images/\"+direction,orientation='landscape')\n\n #t = np.arange(len(Ims)-1)+10 # Variable x, tiempo\n time_dset = np.load(\"Dates_BP.npy\")\n time_dset = time_dset[:-1]\n time_dset2 = np.array([datetime.datetime.strptime(idx,\"%d-%m-%y %H:%M:%S\") for idx in time_dset])\n\n # Grafica de desplazamiento vs tiempo por cada algoritmo\n \"\"\"if show:\n if algorithm == \"BP\":\n title_name ='Desplazamiento promedio(BP)'\n direction ='desplazamientoPromedio_BP_dset10-100.png'\n\n fig, ax= plt.subplots()\n ax.plot(t,desp,'b',marker='o',markerfacecolor='b',markeredgecolor='b')\n ax.set(xlabel='Tiempo(unidades)',ylabel='Desplazamiento(mm)', title=title_name)\n #ax.set_xlim([R.min(),R.max()])\n ax.set_ylim([-c*100/(4*fc),c*100/(4*fc)])\n ax.grid()\n plt.show()\n fig.savefig(os.getcwd()+\"/Results/Desplazamientos/\"+direction,orientation='landscape')\n\n elif algorithm == \"RMA\":\n title_name ='Desplazamiento promedio(RMA)'\n direction ='desplazamientoPromedio_RMA_dset10-100.png'\n\n fig, ax= plt.subplots()\n ax.plot(t,desp,'r',marker='o',markerfacecolor='r',markeredgecolor='r')\n ax.set(xlabel='Tiempo(unidades)',ylabel='Desplazamiento(mm)', title=title_name)\n #ax.set_xlim([R.min(),R.max()])\n ax.set_ylim([-c*50/(4*fc),c*50/(4*fc)])\n ax.grid()\n plt.show()\n fig.savefig(os.getcwd()+\"/Results/Desplazamientos/\"+direction,orientation='landscape')\n \"\"\"\n return {'d_t':desp, 't':time_dset2}", "def compute_g(self, i, j):\n #Compute variance and mean denominator (same denominator for both)\n g_next = 0\n if (i+1 < self.nb_days-1):\n g_next = self.g_heat[i+1,j]\n\n denominator = 2 * self.sigma2\n numerator_mean = self.sigma2 * (self.g_heat[i-1,j] + g_next)\n if (self.u_heat > self.temperatures[i]):\n denominator = denominator + self.sigma_g_star_2[0, j] * ((self.temperatures[i] - self.u_heat)**2)\n numerator_mean = numerator_mean + \\\n self.sigma_g_star_2[0, j] * (self.temperatures[i] - self.u_heat) * (self.consumptions[i] - self.s[i,j] * self.kappa[self.daytypes[i]])\n\n #Mean\n mean = numerator_mean / denominator\n\n #Compute variance numerator\n variance_numerator = (self.sigma2 * self.sigma_g_star_2[0, j])\n #Variance\n variance = variance_numerator / denominator\n\n self.g_heat[i,j] = self.truncated_norm(-inf, 0, mean, variance)", "def _calc(self):\r\n u = self._fadefunc(self.xf)\r\n v = self._fadefunc(self.yf)\r\n w = self._fadefunc(self.zf)\r\n\r\n # populate the hashes dict\r\n self._hash()\r\n \r\n # once the hash dict is populated, start calculating the dot product between \r\n # the gradient vector and the distance vectors, which is done in the _grad method.\r\n # finally linearly interpolate the values to get the avg value\r\n # first interpolate in the x-dir, then in y-dir\r\n x1: float = self._lerp(self._grad(self.hashes[\"aaa\"], self.xf, self.yf, self.zf),\r\n self._grad(self.hashes[\"baa\"], self.xf - 1, self.yf, self.zf), u)\r\n\r\n x2: float = self._lerp(self._grad(self.hashes[\"aba\"], self.xf, self.yf - 1, self.zf),\r\n self._grad(self.hashes[\"bba\"], self.xf - 1, self.yf - 1, self.zf), u)\r\n\r\n # the first y-dir lerp\r\n y1: float = self._lerp(x1, x2, v)\r\n\r\n x1: float = self._lerp(self._grad(self.hashes[\"aab\"], self.xf, self.yf, self.zf - 1),\r\n self._grad(self.hashes[\"bab\"], self.xf - 1, self.yf, self.zf - 1), u)\r\n\r\n x2: float = self._lerp(self._grad(self.hashes[\"abb\"], self.xf, self.yf - 1, self.zf - 1),\r\n self._grad(self.hashes[\"bbb\"], self.xf-1, self.yf-1, self.zf-1), u)\r\n\r\n # the second y-dir lerp\r\n y2: float = self._lerp(x1, x2, v)\r\n\r\n # the final noise value, which will be in the range [0, 1]\r\n self.value = (self._lerp(y1, y2, w) + 1)/2\r\n return self.value", "def past_weight_grad_calculator2(xs, es, kp_x, kd_x, kp_e, kd_e, shapes):\n kp_x, kd_x, kp_e, kd_e = [as_floatx(k) for k in (kp_x, kd_x, kp_e, kd_e)]\n n_samples, n_in, n_out = shapes\n rx = kd_x/(kp_x+kd_x)\n re = kd_e/(kp_e+kd_e)\n\n xr = create_shared_variable(np.zeros((n_samples, n_in)))\n er = create_shared_variable(np.zeros((n_samples, n_out)))\n\n\n\n\n # xr_new = xr*rx + xs/(kp_x+kd_x)\n # er_new = er*re + es/(kp_e+kd_e)\n\n arr = rx*re/(1-rx*re)\n\n xr_new = xr*arr + xs/(kp_x+kd_x)\n er_new = er*arr + es/(kp_e+kd_e)\n\n xsum = create_shared_variable(np.zeros((n_samples, n_in)))\n esum = create_shared_variable(np.zeros((n_samples, n_out)))\n\n xsum_new = xsum+xr_new\n esum_new = esum+er_new\n\n x_nospikes = tt.eq(xs, 0)\n e_nospikes = tt.eq(es, 0)\n\n dw = xs.T.dot(esum_new) + xsum_new.T.dot(es)\n\n add_update(xr, xr_new)\n add_update(er, er_new)\n add_update(xsum, xsum_new*x_nospikes)\n add_update(esum, esum_new*e_nospikes)\n\n return xs.T.dot(er) + xr.T.dot(es)\n # return xr.T.dot(er)\n # return dw", "def modelOnBetaGrid(sample,bins,N,l,u):\r\n\r\n betaGrid=np.linspace(l,u,N)\r\n traces=[]\r\n WAIC=dict()\r\n index=0\r\n\r\n for beta in betaGrid:\r\n trace=intensityLogGauss(sample,bins,beta)\r\n traces.append(trace['intensity'])\r\n WAIC[index]=trace\r\n index+=1\r\n\r\n df=pm.compare(WAIC,ic='WAIC')\r\n\r\n return betaGrid,df,traces", "def app(ngr=100,c2a=1.6235):\n import matplotlib.pyplot as plt\n\n ## small donuts\n # plt.gcf().clf()\n grs = main(mu=0,ngrains=ngr,tilt_1=30.,sigma=15)\n plt.gcf().savefig('small_doughnut.pdf',bbox_inches='tight')\n plt.gcf().clf()\n f = gen_file(lab='sm_doughnut',ngr=ngr)\n write_gr(f,grs)\n\n ## Big donuts\n grs = main(mu=0,ngrains=ngr,tilt_1=50.,sigma=15)\n plt.gcf().savefig('big_doughnut.pdf',bbox_inches='tight')\n plt.gcf().clf()\n f = gen_file(lab='big_doughnut',ngr=ngr)\n write_gr(f,grs)\n\n ## twin tilts (30).\n gr1=main(mu=0,ngrains=ngr/2,tilts_about_ax1=30.,sigma=45)\n plt.gcf().clf()\n gr2=main(mu=0,ngrains=ngr/2,tilts_about_ax1=-30.,sigma=45)\n plt.gcf().clf()\n grs =[]\n for i in range(len(gr1)):\n grs.append(gr1[i])\n grs.append(gr2[i])\n grs=np.array(grs)\n mypf=upf.polefigure(grains=grs,csym='hexag',cdim=[1,1,c2a])\n mypf.pf_new(poles=[[0,0,0,1],[1,0,-1,0]],cmap='jet',ix='TD',iy='RD')\n plt.gcf().savefig('t30.pdf',bbox_inches='tight')\n f = gen_file(lab='dbl_lets_30',ngr=ngr)\n write_gr(f,grs)\n\n ## twin tilts (50).\n gr1=main(mu=0,ngrains=ngr/2,tilts_about_ax1=50.,sigma=45)\n plt.gcf().clf()\n gr2=main(mu=0,ngrains=ngr/2,tilts_about_ax1=-50.,sigma=45)\n plt.gcf().clf()\n gr =[]\n for i in range(len(gr1)):\n gr.append(gr1[i])\n gr.append(gr2[i])\n gr=np.array(gr)\n mypf=upf.polefigure(grains=gr,csym='hexag',cdim=[1,1,c2a])\n mypf.pf_new(poles=[[0,0,0,1],[1,0,-1,0]],cmap='jet',ix='TD',iy='RD')\n plt.gcf().savefig('t50.pdf',bbox_inches='tight')\n plt.gcf().clf()\n f = gen_file(lab='dbl_lets_50',ngr=ngr)\n write_gr(f,gr)", "def multiple_optima(gene_number=937, resolution=80, model_restarts=10, seed=10000, max_iters=300, optimize=True, plot=True):\r\n\r\n # Contour over a range of length scales and signal/noise ratios.\r\n length_scales = np.linspace(0.1, 60., resolution)\r\n log_SNRs = np.linspace(-3., 4., resolution)\r\n\r\n data = GPy.util.datasets.della_gatta_TRP63_gene_expression(data_set='della_gatta',gene_number=gene_number)\r\n # data['Y'] = data['Y'][0::2, :]\r\n # data['X'] = data['X'][0::2, :]\r\n\r\n data['Y'] = data['Y'] - np.mean(data['Y'])\r\n\r\n lls = GPy.examples.regression._contour_data(data, length_scales, log_SNRs, GPy.kern.rbf)\r\n if plot:\r\n pb.contour(length_scales, log_SNRs, np.exp(lls), 20, cmap=pb.cm.jet)\r\n ax = pb.gca()\r\n pb.xlabel('length scale')\r\n pb.ylabel('log_10 SNR')\r\n\r\n xlim = ax.get_xlim()\r\n ylim = ax.get_ylim()\r\n\r\n # Now run a few optimizations\r\n models = []\r\n optim_point_x = np.empty(2)\r\n optim_point_y = np.empty(2)\r\n np.random.seed(seed=seed)\r\n for i in range(0, model_restarts):\r\n # kern = GPy.kern.rbf(1, variance=np.random.exponential(1.), lengthscale=np.random.exponential(50.))\r\n kern = GPy.kern.rbf(1, variance=np.random.uniform(1e-3, 1), lengthscale=np.random.uniform(5, 50))\r\n\r\n m = GPy.models.GPRegression(data['X'], data['Y'], kernel=kern)\r\n m['noise_variance'] = np.random.uniform(1e-3, 1)\r\n optim_point_x[0] = m['rbf_lengthscale']\r\n optim_point_y[0] = np.log10(m['rbf_variance']) - np.log10(m['noise_variance']);\r\n\r\n # optimize\r\n if optimize:\r\n m.optimize('scg', xtol=1e-6, ftol=1e-6, max_iters=max_iters)\r\n\r\n optim_point_x[1] = m['rbf_lengthscale']\r\n optim_point_y[1] = np.log10(m['rbf_variance']) - np.log10(m['noise_variance']);\r\n\r\n if plot:\r\n pb.arrow(optim_point_x[0], optim_point_y[0], optim_point_x[1] - optim_point_x[0], optim_point_y[1] - optim_point_y[0], label=str(i), head_length=1, head_width=0.5, fc='k', ec='k')\r\n models.append(m)\r\n\r\n if plot:\r\n ax.set_xlim(xlim)\r\n ax.set_ylim(ylim)\r\n return m # (models, lls)\r", "def awGrid(vis,HA,uvw,image_params,obs_params,Mterms,Mterms_ij):\t\n\tStokes = image_params['Stokes']\n\t\n\n\n\tprint '--------------Gridding X pol--------------------'\n\txgrid_wt, xgrid_uv = gridOnePolAWproj(vis[0],HA,uvw,image_params,obs_params,Mterms[0],Mterms_ij[0])\n\tprint '--------------Gridding Y pol--------------------'\n\tygrid_wt, ygrid_uv = gridOnePolAWproj(vis[1],HA,uvw,image_params,obs_params,Mterms[1],Mterms_ij[1])\n\n\tN = np.shape(xgrid_wt)[0]\n\tgrid_uv = np.zeros([N, N], dtype=complex)\n\tgrid_wt = np.zeros([N, N], dtype=complex)\n\t\n\tif Stokes == 'I':\n\t\t#combine X and Y gridded vis to create the I pol gridded vis\n\t\t# I = (XX+YY)/2\n\t\tgrid_uv.real = (ygrid_uv.real + xgrid_uv.real)/2\n\t\tgrid_uv.imag = (ygrid_uv.imag + xgrid_uv.imag)/2\n\n\t\t#combine X and Y gridded wt to create the I pol gridded wt\n\t\tgrid_wt.real = (ygrid_wt.real + xgrid_wt.real)/2\n\t\tgrid_wt.imag = (ygrid_wt.imag + xgrid_wt.imag)/2\n\n\telif Stokes == 'Q':\n\t\t#combine X and Y gridded vis to create the I pol gridded vis\n\t\t# Q = (XX-YY)/2\n\t\tgrid_uv.real = (ygrid_uv.real - xgrid_uv.real)/2\n\t\tgrid_uv.imag = (ygrid_uv.imag - xgrid_uv.imag)/2\n\n\t\t#combine X and Y gridded wt to create the I pol gridded wt\n\t\tgrid_wt.real = (ygrid_wt.real - xgrid_wt.real)/2\n\t\tgrid_wt.imag = (ygrid_wt.imag - xgrid_wt.imag)/2\n\n\n\tdty_image=np.fft.fftshift(np.fft.fft2(np.fft.ifftshift(grid_uv)))\n\tpsf_image=np.fft.fftshift(np.fft.fft2(np.fft.ifftshift(grid_wt)))\n\n\treturn dty_image, psf_image", "def task2_extra2():\n N = 0\n lam = 0\n L = 10\n h = 0.001\n tau = 0.000099\n aa = numpy.array([0.25*a for a in range((L-1)*4)])\n x = numpy.linspace(-L, L, int(2*L/h) + 1)\n Vm = V1D(lam, x)\n # eps=int(0.1*len(x))\n\n iterss = []\n for a in aa:\n print(a)\n state = phi(N, x-a)\n\n iters = 0\n while True:\n prob = numpy.abs(state)**2\n mid = int(2*L/h) // 2\n # if max(prob) in prob[mid-eps:mid+eps]:\n if numpy.argmax(prob) <= mid:\n print(iters)\n iterss.append(iters)\n break\n\n state[0] = 0\n state[-1] = 0\n state = implicit_scheme_step(state, tau, h, Vm)\n iters += 1\n\n fig = plt.figure()\n plt.title(\"Iterations of Gaussian travel to center ($L={}$)\".format(L))\n plt.xlabel(\"$a$\")\n plt.ylabel(\"Time\")\n plt.plot(aa, tau*numpy.array(iterss))\n plt.show()\n fig.savefig(\"naloga2_iters_of_gaussian_travel_fixedL={}.pdf\".format(L), bbox_inches=\"tight\")", "def refugia_adj_1_growthall(params, ns):\n #14 parameters \n nu10, nu1, nuA0, nuA, nu20, nu2, nu30, nu3, m3_1, m3_2, m3_3, T1, T2, T3 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2])\n ## Population function and migration matrix for T1\n nuA_func = lambda t: nuA0 * (nuA/nuA0)**(t/T1) \n ## Population function for T1\n nu_T1_func = lambda t: [nu1, nuA_func(t)]\n fs.integrate(nu_T1_func, T1)\n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T2\n nu_T2 = [nu1, nu2, nu3]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3\n nu1_func = lambda t: nu10 * (nu1/nu10)**(t/T3) \n nu2_func = lambda t: nu20 * (nu2/nu20)**(t/T3) \n nu3_func = lambda t: nu30 * (nu3/nu30)**(t/T3) \n nu_T3_func = lambda t: [nu1_func(t), nu2_func(t), nu3_func(t)] \n mig3 = numpy.array([[0, m3_1, m3_3],[m3_1, 0, m3_2], [m3_3, m3_2, 0]]) \n fs.integrate(nu_T3_func, T3, m=mig3) \n return fs", "def Intgrl(N, R, Zeta1, Zeta2, Za, Zb):\n\n global S12, T11, T12, T22, V11A, V12A, V22A, V11B, V12B, V22B, V1111, V2111, V2121, V2211, V2221, V2222\n\n S12 = 0.0\n T11 = 0.0\n T12 = 0.0\n T22 = 0.0\n V11A = 0.0\n V12A = 0.0\n V22A = 0.0\n V11B = 0.0\n V12B = 0.0\n V22B = 0.0\n V1111 = 0.0\n V2111 = 0.0\n V2121 = 0.0\n V2211 = 0.0\n V2221 = 0.0\n V2222 = 0.0\n\n R2 = R * R\n\n # The coefficients for the contracted Gaussian functions are below\n Coeff = np.array([[1.00000, 0.0000000, 0.000000],\n [0.678914, 0.430129, 0.000000],\n [0.444635, 0.535328, 0.154329]])\n\n Expon = np.array([[0.270950, 0.000000, 0.000000],\n [0.151623, 0.851819, 0.000000],\n [0.109818, 0.405771, 2.227660]])\n D1 = np.zeros([3])\n A1 = np.zeros([3])\n D2 = np.zeros([3])\n A2 = np.zeros([3])\n\n # This loop constructs the contracted Gaussian functions\n for i in range(N):\n A1[i] = Expon[N - 1, i] * (Zeta1 ** 2)\n D1[i] = Coeff[N - 1, i] * ((2.0 * A1[i] / np.pi) ** 0.75)\n A2[i] = Expon[N - 1, i] * (Zeta2 ** 2)\n D2[i] = Coeff[N - 1, i] * ((2.0 * A2[i] / np.pi) ** 0.75)\n\n # Calculate one electron integrals\n # Centre A is first atom centre B is second atom\n # Origin is on second atom\n # V12A - off diagonal nuclear attraction to centre A etc.\n for i in range(N):\n for j in range(N):\n # Rap2 - squared distance between centre A and centre P\n Rap = A2[j] * R / (A1[i] + A2[j])\n Rap2 = Rap ** 2\n Rbp2 = (R - Rap) ** 2\n S12 = S12 + S_int(A1[i], A2[j], R2) * D1[i] * D2[j]\n T11 = T11 + T_int(A1[i], A1[j], 0.0) * D1[i] * D1[j]\n T12 = T12 + T_int(A1[i], A2[j], R2) * D1[i] * D2[j]\n T22 = T22 + T_int(A2[i], A2[j], 0.0) * D2[i] * D2[j]\n V11A = V11A + V_int(A1[i], A1[j], 0.0, 0.0, Za) * D1[i] * D1[j]\n V12A = V12A + V_int(A1[i], A2[j], R2, Rap2, Za) * D1[i] * D2[j]\n V22A = V22A + V_int(A2[i], A2[j], 0.0, R2, Za) * D2[i] * D2[j]\n V11B = V11B + V_int(A1[i], A1[j], 0.0, R2, Zb) * D1[i] * D1[j]\n V12B = V12B + V_int(A1[i], A2[j], R2, Rbp2, Zb) * D1[i] * D2[j]\n V22B = V22B + V_int(A2[i], A2[j], 0.0, 0.0, Zb) * D2[i] * D2[j]\n\n # Calculate two electron integrals\n\n for i in range(N):\n for j in range(N):\n for k in range(N):\n for l in range(N):\n Rap = A2[i] * R / (A2[i] + A1[j])\n Rbp = R - Rap\n Raq = A2[k] * R / (A2[k] + A1[l])\n Rbq = R - Raq\n Rpq = Rap - Raq\n Rap2 = Rap * Rap\n Rbp2 = Rbp * Rbp\n Raq2 = Raq * Raq\n Rbq2 = Rbq * Rbq\n Rpq2 = Rpq * Rpq\n V1111 = V1111 + TwoE(A1[i], A1[j], A1[k], A1[l], 0.0, 0.0, 0.0) * D1[i] * D1[j] * D1[k] * D1[l]\n V2111 = V2111 + TwoE(A2[i], A1[j], A1[k], A1[l], R2, 0.0, Rap2) * D2[i] * D1[j] * D1[k] * D1[l]\n V2121 = V2121 + TwoE(A2[i], A1[j], A2[k], A1[l], R2, R2, Rpq2) * D2[i] * D1[j] * D2[k] * D1[l]\n V2211 = V2211 + TwoE(A2[i], A2[j], A1[k], A1[l], 0.0, 0.0, R2) * D2[i] * D2[j] * D1[k] * D1[l]\n V2221 = V2221 + TwoE(A2[i], A2[j], A2[k], A1[l], 0.0, R2, Rbq2) * D2[i] * D2[j] * D2[k] * D1[l]\n V2222 = V2222 + TwoE(A2[i], A2[j], A2[k], A2[l], 0.0, 0.0, 0.0) * D2[i] * D2[j] * D2[k] * D2[l]\n return", "def source_adj_gsdf(gmdata_sim,gmdata_obs,IsolationFilter,num_pts,dt): \n t = np.arange(num_pts)*dt\n ts=np.flip(-t[1:], axis=0)\n lTime = np.concatenate((ts,t), axis=0)#Lag time \n \n #convolve the waveforms for the cross- and auto-correlagrams \n cross = np.correlate(IsolationFilter,gmdata_obs,'full')\n auto = np.correlate(IsolationFilter,gmdata_sim,'full') \n \n #GSDF Parameters \n w0=2*np.pi/(lTime[-1]) \n# wN=2*np.pi/(2*dt)\n# w(:,1)=-wN:w0:wN\n wf=w0*np.linspace(-int(num_pts/2),int(num_pts/2),num_pts) \n fi = [0.05, 0.075, 0.1]\n# fi = [0.02, 0.03, 0.04, 0.05]\n# fi = [0.05, 0.075, 0.1, 0.125, 0.15, 0.175, 0.2]\n Tw = 2/np.mean(fi) # Effective window\n# sw = 2*np.pi*0.72/Tw; # Sigma w ~ 0.2827433388230814\n sw=0.1 \n \n# #% A local maximum will be selected closest to 0-lag\n# I_O=np.argmax(cross)\n# I_S=np.argmax(auto) \n\n I_O, peaks_O = find_peaks(np.abs(hilbert(cross))/np.max(np.abs(hilbert(cross))), height=0.25)\n I_S, peaks_S = find_peaks(np.abs(hilbert(auto))/np.max(np.abs(hilbert(auto))), height=0.25)\n\n PkO = peaks_O.get(\"peak_heights\", \"\")\n PkS = peaks_S.get(\"peak_heights\", \"\")\n\n if (I_O==[] or I_S==[]):\n I_O=np.argmax(cross)\n I_S=np.argmax(auto)\n else:\n I_O_min = np.argmin(np.multiply((1+np.abs(lTime[I_O]))**2,np.abs(1-PkO)))\n I_O = I_O[I_O_min]\n\n I_S_min = np.argmin(np.multiply((1+np.abs(lTime[I_S]))**2,np.abs(1-PkS)))\n I_S = I_S[I_S_min]\n \n ##Windowing\n win1=np.exp(-(0.5*sw**2)*(lTime-lTime[I_O])**2)\n win2=np.exp(-(0.5*sw**2)*(lTime-lTime[I_S])**2) \n \n #\n WO = np.multiply(win1,cross)\n WS = np.multiply(win2,auto)\n WS = WS*np.max(WO)/np.max(WS) #Normalized window by amplitude\n #% Parameters for \"bootstraping\"\n InOR=np.argmax(WO)\n InSR=np.argmax(WS) \n \n #% Isolation filter FFT for perturbation kernel\n tff=np.conj(fftshift(fft(IsolationFilter)))*1/num_pts \n \n adj_sim_decompose = np.zeros((len(fi),num_pts))\n adj_sim_sum = np.zeros(num_pts)\n TauP_arr = np.zeros(len(fi)) \n \n ne = int(np.min([2/np.min(fi)/dt,num_pts/2])) #% Effective bandwidth for inversion\n \n for i in range(0,len(fi)): \n si = 0.1*fi[i]\n #Crosscorrelagram and Autocorrelagram filtering\n dO=computebandfftfilter_gauss(WO,dt,fi[i],si,lTime);\n dS=computebandfftfilter_gauss(WS,dt,fi[i],si,lTime); \n \n # % Check bootstraping\n InO=np.argmax(np.real(dO))\n InS=np.argmax(np.real(dS)) \n \n BS = 1; Cn = 0;\n while BS == 1 or Cn < 10:\n InO=int(InO)\n if (lTime[InO] < lTime[InOR]+0.51/fi[i]) and (lTime[InO] >= lTime[InOR]-0.51/fi[i]):\n BS = 0\n elif (lTime[InO] >= (lTime[InOR]+0.45/fi[i])):\n InO=InO-np.round(1/fi[i]/dt)\n elif (lTime[InO] < lTime[InOR]-0.45/fi[i]):\n InO=InO+np.round(1/fi[i]/dt)\n Cn = Cn+1\n \n BS = 1; Cn = 0;\n while BS == 1 or Cn < 10:\n InS=int(InS) \n if (lTime[InS] < lTime[InSR]+0.51/fi[i]) and (lTime[InS] >= lTime[InSR]-0.51/fi[i]):\n BS = 0\n elif (lTime[InS] >= (lTime[InSR]+0.45/fi[i])):\n InS=InS-np.round(1/fi[i]/dt)\n elif (lTime[InS] < lTime[InSR]-0.45/fi[i]):\n InS=InS+np.round(1/fi[i]/dt)\n Cn = Cn+1 \n\n # Five parameter Gaussian wavelet fitting \n Ao = np.max(envelope(np.real(dO))); Io = np.argmax(envelope(np.real(dO)));\n As = np.max(envelope(np.real(dS))); Is = np.argmax(envelope(np.real(dS))); \n ##Constrain the initial values \n # Parameters for curve_fit\n wi=2*np.pi*fi[i] \n \n try:\n GaO, params_covariance = curve_fit(Eqn, lTime[Io-ne-1:Io+ne], np.real(dO[Io-ne-1:Io+ne]))\n GaS, params_covariance = curve_fit(Eqn, lTime[Is-ne-1:Is+ne], np.real(dS[Is-ne-1:Is+ne])) \n except:\n GaO = [Ao, 2*np.pi*si, lTime[Io], 2*np.pi*fi[i], lTime[InO]]\n GaS = [As, 2*np.pi*si, lTime[Is], 2*np.pi*fi[i], lTime[InS]] \n\n# GaO, params_covariance = curve_fit(Eqn, lTime[Io-ne-1:Io+ne], np.real(dO[Io-ne-1:Io+ne]),bounds=(0,[Ao, 2*np.pi*si, lTime[Io], 2*np.pi*fi[i], lTime[InO]]))\n# GaS, params_covariance = curve_fit(Eqn, lTime[Is-ne-1:Is+ne], np.real(dS[Is-ne-1:Is+ne]),bounds=(0,[As, 2*np.pi*si, lTime[Is], 2*np.pi*fi[i], lTime[InS]])) \n \n# % Check fitting\n if ((GaO[0]/GaS[0]) > 10**5) or np.abs(GaO[4]-GaS[4]) > lTime[-1]/2:\n GaO = [Ao, 2*np.pi*si, lTime[Io], 2*np.pi*fi[i], lTime[InO]]\n GaS = [As, 2*np.pi*si, lTime[Is], 2*np.pi*fi[i], lTime[InS]] \n \n wP=((si**2)*wf+(sw**2)*wi)/(sw**2+si**2)\n wPP=((si**2)*wf-(sw**2)*wi)/(sw**2+si**2)\n siP=((si**2)*(sw**2)/(sw**2+si**2))**0.5 \n #Estimate waveform perturbation kernel (WPK)\n IW=(siP/(sw*GaS[0]))*np.multiply(np.exp(-0.5*(wf-2*np.pi*fi[i])**2/(sw**2+si**2)),np.divide(tff,wP))+\\\n (siP/(sw*GaS[0]))*np.exp(-0.5*(wf+2*np.pi*fi[i])**2/(sw**2+si**2))*tff/wPP\n \n IW[0:int(len(IW)/2)]=0*IW[0:int(len(IW)/2)]\n \n itff = ifft(fftshift(num_pts*IW)) \n \n #Save the GSDF measurements\n TauP_arr[i] = GaO[4]-GaS[4]; #% delta_P\n \n# Jp = np.real(itff)\n# Jp = np.imag(itff)\n Jp = -np.imag(itff) \n adj_sim_decompose[i,:] = np.flip(Jp,axis=0)*TauP_arr[i] \n \n #if i>0:\n adj_sim_sum = adj_sim_sum + adj_sim_decompose[i,:] \n \n return adj_sim_sum, TauP_arr", "def set_sobol_g_func():\n a = np.zeros(21)\n x = np.zeros(21) \n\n # a[0:2] = 0\n # a[2:9] = [0.005, 0.020, 0.040, 0.060, 0.08, 0.090, 1]\n # a[9:16] = 2\n # a[16:24] = [2.10, 2.25, 2.75, 3, 3.10, 3.15, 3.25, 3.50]\n # a[24:30] = 9\n # a[30:44] = [8, 8.5, 9, 10, 10.5, 11, 12, 12.5, 13, 13.5, 14, 14.5, 15, 16]\n # a[44:] = [70, 75, 80, 85, 90, 99]\n\n a[0:2] = 0\n a[2:4] = [0.005, 0.090]\n a[4:7] = 2\n a[7:11] = [2.10, 2.75, 3, 3.15]\n a[11:15] = [8, 13, 13.5, 16]\n a[15:] = [70, 75, 80, 85, 90, 99]\n\n x_names = ['x' + str(i+1) for i in range(21)]\n len_params = len(x_names)\n x_bounds = np.zeros((21, 2))\n x_bounds[:, 0] = 0\n x_bounds[:, 1] = 1\n\n problem = {\n 'num_vars': len(x),\n 'names': x_names,\n 'bounds': x_bounds\n }\n return a, x, x_bounds, x_names, len_params, problem", "def grd_posterior_gaussian(self, ) -> Tuple[np.ndarray, np.ndarray]:\n xmin, xmax = self.x_range\n ymin, ymax = self.y_range\n\n mu = np.array([0, 0])\n sigma = np.zeros((2, 2))\n\n _sample = self._sample\n _prior = self.prior\n\n def mean_x(x: float, y: float):\n return x * _sample(x, y) * _prior.eval(x, y)\n\n def mean_y(x: float, y: float):\n return y * _sample(x, y) * _prior.eval(x, y)\n\n def var_x(x: float, y: float):\n return x * mean_x(x, y)\n\n def var_y(x: float, y: float):\n return y * mean_y(x, y)\n\n # def var_xy(x: float, y: float):\n # return x * mean_y(x, y)\n\n # First moment\n (mu[0], mu[1]) = (integrate.dblquad(mean_x, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],\n integrate.dblquad(mean_y, xmin, xmax, lambda x: ymin, lambda x: ymax)[0])\n (sigma[0, 0], sigma[1, 1]) = \\\n (integrate.dblquad(var_x, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],\n integrate.dblquad(var_y, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],)\n # integrate.dblquad(var_xy, xmin, xmax, lambda x: ymin, lambda x: ymax)[0],)\n return mu, sigma", "def map_sim_positions(**kwargs):\n\n GR = glo.global_results()\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n fig, ax1 = plt.subplots(figsize=(10,10))\n # p.gal_index = np.where(GR.file_name == 'z0.00_G7169_cG29270')[0][0]\n gal_ob = gal.galaxy(GR=GR, gal_index=p.gal_index)\n # print('TEST!',gal_ob.file_name,p.gal_index)\n simdata = aux.load_temp_file(gal_ob=gal_ob,data_type=p.sim_type)\n\n # Plot\n print(simdata.head())\n ax1.plot(simdata.x,simdata.y,'o',ms=2,mew=2)\n\n print(gal_ob.radius)\n # Limit axes limits a bit to avoid area with no particles...\n # ax1.set_xlim([-2/3*gal_ob.radius,2/3*gal_ob.radius])make_projec\n # ax1.set_ylim([-2/3*gal_ob.radius,2/3*gal_ob.radius])\n ax1.set_xlabel('x [kpc]'); ax1.set_ylabel('y [kpc]')", "def plt_npr_gaussian_all(tb, npr, sigma, soil, snow, onset, figname='all_plot_test0420', size=(12, 8), xlims=[0, 365],\n shade=False, title=False, site_no='947', pp=False, subs=5, s_symbol='k.',\n day_tout=-1, end_ax1=[0, 0], end_ax2=[0, 0], end_ax3=[0, 0], tair=[], snow_plot=False):\n # ylim for each station\n site_lim = {'947': [-17, -7], '949': [-13, -7], '950': [-13, -7], '960': [-14, -8], '962': [-15, -8], '967': [-12, -8], '968': [-17, -7],\n '1089': [-13, -7], '1090': [-14, -7], '1175': [-15, -8], '1177': [-19, -10],\n '1233': [-17, -6], '2065': [-14, -8], '2081': [-15, -7], '2210': [-16, -8], '2211': [-16, -8], '2212': [-16, -8],\n '2213': [-17, -10]}\n axs = []\n fig = plt.figure(figsize=size)\n gs0 = gridspec.GridSpec(5, 1)\n gs00 = gridspec.GridSpecFromSubplotSpec(4, 1, subplot_spec=gs0[0])\n ax0 = plt.Subplot(fig, gs00[-1, :])\n # ax1, ax2, ax3, ax4 = plt.subplot(fig, gs0[1]), plt.subplot(fig, gs0[2]), \\\n # plt.subplot(fig, gs0[3]), plt.subplot(fig, gs0[4])\n fig, (ax1, ax2, ax3, ax4) = plt.subplots(4, figsize=size, sharex=True) # sharex\n params = {'mathtext.default': 'regular'}\n plt.rcParams.update(params)\n\n # # add a time window bar 2018/05 updated\n # nr, st = 20, 9\n # sub_no = 4*nr+st\n # ax0, ax1, ax2, ax3, ax4 = plt.subplot2grid((sub_no, 1), (0, 0), rowspan=st), \\\n # plt.subplot2grid((sub_no, 1), (st, 0), rowspan=nr), \\\n # plt.subplot2grid((sub_no, 1), (st+nr, 0), rowspan=nr), \\\n # plt.subplot2grid((sub_no, 1), (st+2*nr, 0), rowspan=nr), \\\n # plt.subplot2grid((sub_no, 1), (st+3*nr, 0), rowspan=nr)\n\n # params = {'mathtext.default': 'regular'}\n # plt.rcParams.update(params)\n # row 1 tb\n # ax1 = fig.add_subplot(511) # tb\n # ax0 setting, for boundary of seasons\n # timings = [60, 150, 260, 350, 366]\n # timing_color = ['aqua', 'red', 'orange', 'blue', 'aqua']\n # timing_color_rgba = plot_funcs.check_rgba(timing_color)\n # timing_color_rgba[3] = [0., .3, 1., 1.]\n # print timing_color_rgba\n # timing_name = [\"Frozen\", \"Thawing\", \"Thawed\", \"Freezing\", \" \"]\n # fill_y1 = 1\n # ax0.plot(soil[0][1], soil[0][2]*0)\n # plot_funcs.make_ticklabels_invisible(ax0) # hide the y_tick\n # ax0.tick_params(axis='x', which='both', bottom='off', top='off')\n # ax0.tick_params(axis='y', which='both', left='off', right='off')\n # text_x0 = 0\n #\n # for i in range(0, len(timings)):\n # ax0.fill_between(np.arange(text_x0, timings[i]), fill_y1, color=timing_color_rgba[i])\n # text_x = 0.5*(timings[i]+text_x0)\n # print text_x\n # ax0.text(text_x, 0.5, timing_name[i], va=\"center\", ha=\"center\") # 1.3 up\n # text_x0 = timings[i]+1\n # if i < len(timings)-1:\n\n # # add vertical line and label\n # ax0.axvline(timings[i])\n # ax0.text(timings[i], 1.3, timings[i], va=\"center\", ha=\"center\")\n\n\n print np.nanmax(soil[0][1]), np.nanmin(soil[0][1])\n ax0.set_xlim(xlims)\n axs.append(ax1)\n # l1, = ax1.plot(tb[0][0], tb[0][1], 'bo', markersize=2)\n _, ax1_2, l1 = plot_funcs.pltyy(tb[0][0], tb[0][1], 'test_comp2', 'T$_b$ (K)',\n t2=tb[2][0], s2=tb[2][1], label_y2= '$E_{Tbv}$\\n(K/day)',\n symbol=['k.', 'g-'],\n handle=[fig, ax1], nbins2=6) # plot tbv\n l1_le = plot_funcs.plt_more(ax1, tb[1][0], tb[1][1], line_list=[l1])\n # ax1.locator_params(axis='y', nbins=4)\n # ax1_2.axhline(y=0)\n ax1.set_ylabel('T$_b$ (K)')\n # ax1.legend([l1_le[0][0], l1_le[1]], ['T$_{BV}$', 'T$_{BH}$'], loc=3, prop={'size': 6})\n if title is not False:\n plt.title(title)\n #ax4.legend([l4], [fe[0]], loc=2, prop={'size': 10})\n # ax4.text(0.85, 0.85, '(a)')\n # fig.text(0.85, 0.21, '(e)')\n # fig.text(0.85, 0.37, '(d)')\n # fig.text(0.85, 0.53, '(c)')\n # fig.text(0.85, 0.705, '(b)')\n # fig.text(0.85, 0.87, '(a)')\n #ax1.annotate(str(vline[0]), xy=(vline[0], 260))\n #ax1.annotate(str(vline[1]), xy=(vline[1], 260))\n # row2 npr\n # ax2 = fig.add_subplot(512) # npr\n axs.append(ax2)\n _, ax2_2, l2 = plot_funcs.pltyy(npr[0][0], npr[0][1], 'test_comp2', 'NPR ($10^{-2}$)',\n t2=npr[1][0], s2=npr[1][1], label_y2='$E_{NPR}$\\n($10^{-2}$/day)',\n symbol=[s_symbol, 'g-'], handle=[fig, ax2], nbins2=6)\n # ax2.locator_params(axis='y', nbins=5)\n #ax0.set_ylim([0, 0.06])\n #ax0.set_ylim([0, 0.06])\n\n # sigma\n # ax3 = fig.add_subplot(513) # sigma\n axs.append(ax3)\n _, ax3_2, l2 = plot_funcs.pltyy(sigma[0][0], sigma[0][1], 'test_comp2', '$\\sigma^0_{45} (dB)$',\n t2=sigma[1][0], s2=sigma[1][1], label_y2='$E_{\\sigma^0_{45}}$\\n(dB/day)',\n symbol=[s_symbol, 'g-'], handle=[fig, ax3], nbins2=6)\n # ax3.set_ylim(site_lim[site_no])\n # ax3.locator_params(axis='y', nbins=4)\n\n # moisture and temperature\n # ax4 = fig.add_subplot(514) # T soil and temperature\n axs.append(ax4)\n if snow_plot is False:\n _, ax4_2, l2 = plot_funcs.pltyy(soil[0][1], soil[0][2], 'test_comp2', 'VWC (%)',\n t2=soil[1][1], s2=soil[1][2], label_y2='T$_{soil}$ ($^\\circ$C)',\n symbol=['k-', 'b-'], handle=[fig, ax4], nbins2=6)\n for ax_2 in [ax4_2]:\n ax_2.axhline(ls='--', lw=1.5)\n else:\n ax4.plot(snow[1], snow[2], 'k', linewidth=2.0)\n ax4_2 = ax4.twinx()\n if len(tair) > 0:\n tair[1][tair[1] < -60] = np.nan\n ax4_2.plot(tair[0], tair[1], 'k:')\n ax4_2.set_ylim([-30, 30])\n ax4_2.axhline(ls='--', lw=1.5)\n ax4_2.yaxis.set_major_locator(MaxNLocator(5))\n ax4_2.set_ylabel('T$_{air}$ ($^o$C)')\n\n\n ax2s = [ax1_2, ax2_2, ax3_2, ax4_2]\n ax_ins = [ax4]\n # swe\n # ax5 = fig.add_subplot(515) # swe\n # axs.append(ax5)\n # ax_ins.append(ax5)\n # ax5.plot(snow[1], snow[2], 'k', linewidth=2.0)\n # add air temperature\n # if len(tair) > 0:\n # ax5_2 = ax5.twinx()\n # ax2s.append(ax5_2)\n # tair[1][tair[1] < -60] = np.nan\n # ax5_2.plot(tair[0], tair[1], 'k:')\n # ax5_2.set_ylim([-30, 30])\n # ax5_2.axhline(ls='--', lw=1.5)\n # ax5_2.yaxis.set_major_locator(MaxNLocator(5))\n # ax5_2.set_ylabel('T$_{air}$ ($^o$C)')\n # if not pp:\n # if site_no in ['947', '949', '950', '967', '1089']:\n # ax5.set_ylabel('SWE (mm)')\n # ax5.set_ylim([0, 200])\n # else:\n # ax5.set_ylabel('SD (cm)')\n # ax5.set_ylim([0, 100])\n # if site_no in ['950', '1089']:\n # ax5.set_ylim([0, 500])\n # else:\n # ax5.set_ylabel('precipitation (mm)')\n # ax4.set_xlabel('Day of year 2016')\n\n # add vertical line\n lz = ['--', '--', '--']\n labelz = ['$\\sigma^0$', 'TB', 'NPR']\n if onset.size> 4: # freeze and thaw\n i2 = -1\n # for ax in [ax4, ax5]:\n # # for i in [0, 1, 2]:\n # for i in [0, 1, 2]:\n # ax.axvline(x=onset[i*2], color='k', ls=lz[i], label=labelz[i])\n # ax.axvline(x=onset[i*2+1], color='k', ls=lz[i])\n for ax in [ax3, ax1, ax2]:\n # ax.axvline(x=onset[-2], color='r', ls='-', label='in situ')\n # ax.axvline(x=onset[-1], color='r', ls='-')\n i2 += 1\n ax.axvline(x=onset[i2*2], color='k', ls=lz[i2], label=labelz[i2])\n ax.axvline(x=onset[i2*2+1], color='k', ls=lz[i2])\n elif onset.size <=4:\n for ax in ax_ins:\n for i in [0]:\n ax.axvline(x=onset[i], color='k', ls=lz[i], label=labelz[i])\n ax.axvline(x=onset[i+1], color='k', ls=lz[i+1])\n ax.axvline(x=onset[i+2], color='k', ls=lz[i+2])\n\n l2d_sm = ax4.axvline(x=onset[6], color='r', ls='--')\n ax4.axvline(x=onset[7], color='r', ls='--')\n\n # special vline\n if day_tout > 0:\n l2d, = ax4.axvline(x=day_tout, color='r', ls=':')\n ax1.axvline(x=end_ax1[0], color='b', ls='--')\n ax1.axvline(x=end_ax1[1], color='b', ls='--')\n ax2.axvline(x=end_ax2[0], color='b', ls='-')\n ax2.axvline(x=end_ax2[1], color='b', ls='-')\n ax3.axvline(x=end_ax3[0], color='b', ls=':')\n ax3.axvline(x=end_ax3[1], color='b', ls=':')\n\n # plot settings\n # ticks setting\n for ax in ax_ins:\n ax.yaxis.set_major_locator(MaxNLocator(4))\n for ax in axs:\n yticks = ax.yaxis.get_major_ticks()\n yticks[0].label1.set_visible(False)\n yticks[-1].label1.set_visible(False)\n for ax in ax2s:\n yticks = ax.yaxis.get_major_ticks()\n yticks[0].label2.set_visible(False)\n yticks[-1].label2.set_visible(False)\n # label location\n text4 = ['a', 'b', 'c', 'd', 'e']\n i4 = -1\n for i, ax in enumerate(axs):\n ax.yaxis.set_major_locator(MaxNLocator(4))\n i4 += 1\n ax.get_yaxis().set_label_coords(-0.09, 0.5)\n ax.text(0.02, 0.95, text4[i], transform=ax.transAxes, va='top', fontsize=16)\n # ax.annotate(text4[i4], xy=get_axis_limits(ax), fontweight='bold')\n for ax in ax2s:\n ax.yaxis.set_major_locator(MaxNLocator(4))\n ax.get_yaxis().set_label_coords(1.10, 0.5) # position of labels\n\n # ylims\n ax3_2.set_ylim([-3, 2])\n ax1.set_ylim([210, 280])\n if site_no == '1233':\n ax1.set_ylim([180, 275])\n ax3_2.set_ylim([-3, 3])\n # ax1_2.set_ylim([-9, 9])\n if site_no == '1177':\n st = 0\n else:\n ax2.set_ylim([0, 6])\n ax2_2.set_ylim([-2, 2])\n\n # x_label\n for i3 in range(0, 4):\n axs[i3].set_xlabel('')\n\n if shade is False:\n shade_window = 'no shade'\n else:\n for ax in axs:\n for shade0 in shade:\n ax.axvspan(shade0[0], shade0[1], color=(0.8, 0.8, 0.8), alpha=0.5, lw=0)\n if xlims:\n for ax in axs:\n ax.set_xlim(xlims)\n\n # legend setting\n leg1 = ax1.legend([l1_le[0][0], l1_le[1]], ['T$_{bv}$', 'T$_{bh}$'],\n loc=3, ncol=1, prop={'size': 12}, numpoints=1)\n # for leg in [leg1]:\n # leg.get_frame().set_linewidth(0.0)\n # layout setting\n ax4.set_xlabel('Day of year 2016')\n plt.tight_layout()\n\n # if site_no == '1233':\n # ax1.set_visible(False)\n # ax1_2.set_visible(False)\n # ax3.set_visible(False)\n # ax3_2.set_visible(False)\n\n fig.subplots_adjust(hspace=0.05)\n\n # other setting like the title\n\n\n # ax_name = ['tb', 'npr', 'sig', 'VWC', 'SWE', 'tbG', 'nprG', 'sigG', 'tsoil']\n # ax_i = 0\n # yticks = ax2.yaxis.get_major_ticks()\n # yticks[0].label1.set_visible(False)\n # yticks[-2].label1.set_visible(False)\n # for ax in [ax1, ax2, ax3, ax4, ax5]:\n # yticks = ax.yaxis.get_major_ticks()\n # yticks[0].label1.set_visible(False)\n # yticks[-1].label1.set_visible(False)\n # for ax in [ax1_2, ax2_2, ax3_2, ax4_2]:\n # yticks = ax.yaxis.get_major_ticks()\n # yticks[0].label2.set_visible(False)\n # yticks[-1].label2.set_visible(False)\n plt.rcParams.update({'font.size': 16})\n print figname\n plt.savefig(figname, dpi=300)\n plt.close()\n\n return 0\n if vline is not False:\n ax_count = 0\n for ax in axs:\n ax.set_xlim([0, 350])\n ax_count += 1\n if type(vline) is list:\n ax.axvline(x=vline[0], color='k', ls='--')\n ax.axvline(x=vline[1], color='k', ls='-.')\n continue\n ax.axvline(x=vline[0, 1], color='k', label=repr(vline[0, 1]), ls='--')\n ax.axvline(x=vline[1, 1], color='k', label=repr(vline[1, 1]), ls='-.')\n ax.axvline(x=vline[2, 1], color='k', label=repr(vline[1, 1]), ls='--')\n ax.axvline(x=vline[3, 1], color='k', label=repr(vline[1, 1]), ls='-.')\n # ax.xaxis.set_minor_locator(months)\n # ax.xaxis.set_minor_formatter(monthsFmt)\n # ax.xaxis.set_major_locator(years)\n # ax.xaxis.set_major_formatter(yearsFmt)\n # ax.locator_params(axis='x', nbins=16)\n tick_num = np.array([50, 100, 150, 200, 250, 300, 350, 365, 415, 465, 515, 565, 615, 665, 715], dtype=int)\n ax.xaxis.set_ticks(tick_num)\n labels = [item.get_text() for item in ax.get_xticklabels()]\n n = 0\n for label in labels:\n if tick_num[n] == 50:\n if ax_count == 5:\n labels[n] = \"50\\nYear'15 \"\n else:\n labels[n] = \"50\"\n elif tick_num[n] == 350:\n labels[n] = ' '\n elif tick_num[n] == 365:\n labels[n] = \"365\\n Year'16\"\n elif tick_num[n] == 415:\n labels[n] = repr(tick_num[n]-365)\n elif tick_num[n] > 415:\n labels[n] = repr(tick_num[n]-365)\n else:\n labels[n] = repr(tick_num[n])\n n += 1\n # labels[0] = 'Year\\n2015'\n # labels[1] = '100'\n # labels[2] = '150'\n # labels[3] = '200'\n # labels[4] = '250'\n # labels[5] = '300'\n # labels[6] = ''\n # labels[7] = 'Year\\n2016'\n # labels[8] = '50'\n # labels[9] = '100'\n # labels[10] = '150'\n # labels[11] = '200'\n\n ax.set_xticklabels(labels)\n plt.savefig(figname, dpi=300)\n plt.close()", "def _compute_GP_variables(self):\r\n Wi = 1.0/self.W\r\n self.Sigma_tilde = np.diagflat(Wi)\r\n\r\n Y_tilde = Wi*self.Ki_f + self.f_hat\r\n\r\n self.Wi_K_i = self.W12BiW12\r\n ln_det_Wi_K = pddet(self.Sigma_tilde + self.K)\r\n lik = self.noise_model.logpdf(self.f_hat, self.data, extra_data=self.extra_data)\r\n y_Wi_K_i_y = mdot(Y_tilde.T, self.Wi_K_i, Y_tilde)\r\n\r\n Z_tilde = (+ lik\r\n - 0.5*self.ln_B_det\r\n + 0.5*ln_det_Wi_K\r\n - 0.5*self.f_Ki_f\r\n + 0.5*y_Wi_K_i_y\r\n + self.NORMAL_CONST\r\n )\r\n\r\n #Convert to float as its (1, 1) and Z must be a scalar\r\n self.Z = np.float64(Z_tilde)\r\n self.Y = Y_tilde\r\n self.YYT = np.dot(self.Y, self.Y.T)\r\n self.covariance_matrix = self.Sigma_tilde\r\n self.precision = 1.0 / np.diag(self.covariance_matrix)[:, None]\r\n\r\n #Compute dZ_dK which is how the approximated distributions gradients differ from the dL_dK computed for other likelihoods\r\n self.dZ_dK = self._Kgradients()\r\n #+ 0.5*self.Wi_K_i - 0.5*np.dot(self.Ki_f, self.Ki_f.T) #since we are not adding the K gradients explicit part theres no need to compute this again\r", "def addGaussian(ax, ismulti):\n shape = (96, 288) #ax.shape[:2]\n intensity_noise = np.random.uniform(low=0, high=0.05)\n if ismulti:\n ax[:,:,0] = ax[:,:,0]*(1+ intensity_noise*np.random.normal(loc=0, scale=1, size=shape[0]*shape[1]).reshape(shape[0],shape[1]))\n else:\n ax[:,:,0] = ax[:,:,0] + intensity_noise*np.random.normal(loc=0, scale=1, size=shape[0]*shape[1]).reshape(shape[0],shape[1])\n return ax", "def gaus(x,A,B,C):\n return A * np.exp( -(x-B)**2 / (2*C**2))", "def make_egge(w,minZ,maxZ,ires=1,m=mz0):\n cmds = []\n # coefficients for the amplitudes\n cmds.append(\"A[1,0,1000000]\")\n cmds.append(\"B[1,0,1000000]\")\n cmds.append(\"C[10000.0,0,1000000]\")\n # amplitudes\n cmds.append('m[%s,%s,%s]'%(m,minZ,maxZ))\n cmds.append('g[8,0,100]')\n denom = '((x^2-m^2)^2+g^2*m^2)'\n cmds.append(\"expr::z_rbw('x^2/%s',x,m,g)\"%denom)\n cmds.append(\"expr::z_int('(x^2-m^2)/%s',x,m,g)\"%denom)\n cmds.append(\"expr::z_rad('1/(x^2+1)',x)\")\n # resolution model\n cmds += resolutions[ires]()\n [w.factory(cmd) for cmd in cmds]\n # sum-of-amplitudes pdf\n lshape = RooRealSumPdf('lshape','lshape',RooArgList(w.function('z_rad'),w.function('z_int'),w.function('z_rbw')),RooArgList(w.var('A'),w.var('B'),w.var('C')))\n getattr(w,'import')(lshape)\n # convolution\n pdf = w.pdf('lshape')\n if w.pdf('res'):\n w.var('x').setBins(10000,'cache')\n cmd = 'FCONV::sum(x,lshape,res)'\n w.factory(cmd)\n pdf = w.pdf('sum')\n return pdf, kFALSE", "def perform_bivariate_3_gaussians_exp(N, pp, mu_1, mu_2, mu_3,\n sigma_1, sigma_2, sigma_3, \n truncation_bounds, censoring_bounds,\n max_iteration=50, seed=100):\n # Fix the random state\n random.seed(seed)\n np.random.seed(seed)\n\n # Stack and reorder the means and covariance matrices into unified matrices\n mu = np.stack([mu_1, mu_2, mu_3], axis=0)\n sigma = np.stack([sigma_1, sigma_2, sigma_3], axis=2)\n pp, mu, sigma = reorder_gmm_compoments(pp, mu, sigma)\n K = mu.shape[0]\n\n # Generate GMM data\n print(\"Step #1: Generating a Gaussian-Mixture-Model dataset\")\n print(\"True parameters:\")\n print(\"pp: \\n{}\\n\".format(pp))\n print(\"mu: \\n{}\\n\".format(mu))\n print(\"sigma: \\n{}\\n\".format(sigma.T))\n y = generate_gmm_data(pp, mu, sigma, N)\n\n # Plot the GMM data\n plt.figure()\n ax = plot_gmm_data(y, mu, sigma, point_color='black')\n plt.title(\"The Original Data Generated by Three Gaussian Components\")\n print(\"\\n\" + \"*\"*80)\n\n # Perform censoring and truncation on the original data\n print(\"Step #2: Censoring and truncating the data\")\n x = censor_and_truncate_data(y)\n\n # Plot the censored and truncated data\n plt.figure()\n ax = plot_gmm_data(y, mu, sigma, point_color='red')\n plt.title(\"Truncated and Censored Data\")\n print(\"\\n\" + \"*\"*80)\n\n # Init parameters using K-means\n print(\"Step #3: Initializing parameters using K-means\")\n par = init_kmeans(x, K)\n print(\"\\n\" + \"*\"*80)\n\n # Estimating parameters using truncated and censored EM\n print(\"Step #4: Estimating parameters using truncated and censored EM\")\n tc_em_results = perform_truncated_em(x, K, \n truncation_bounds, censoring_bounds, \n par['pp'], par['mu'], par['sigma'], \n max_iteration)\n\n print(\"Estimated parameters by standard EM:\")\n print(\"pp: \\n{}\\n\".format(tc_em_results['pp']))\n print(\"mu: \\n{}\\n\".format(tc_em_results['mu']))\n print(\"sigma: \\n{}\\n\".format(tc_em_results['sigma'].T))\n \n plt.figure()\n plt.plot(range(len(tc_em_results['ll_hist'])), tc_em_results['ll_hist'])\n plt.title(\"Learning Curve of the Truncated and Censored EM\")\n plt.xlabel(\"Iteration\")\n plt.ylabel(\"Log-likelihood\")\n\n plt.figure()\n ax = plot_gmm_data(x, tc_em_results['mu'], tc_em_results['sigma'])\n plt.title(\"Truncated and Censored EM\")\n print(\"\\n\" + \"*\"*80)\n\n print(\"Step #5: Estimating parameters using standard EM\")\n std_em_results = perform_standard_em(x, K, seed)\n\n print(\"Estimated parameters by standard EM:\")\n print(\"pp: \\n{}\\n\".format(std_em_results['pp']))\n print(\"mu: \\n{}\\n\".format(std_em_results['mu']))\n print(\"sigma: \\n{}\\n\".format(std_em_results['sigma'].T))\n\n plt.figure()\n ax = plot_gmm_data(x, std_em_results['mu'], std_em_results['sigma'])\n plt.title(\"Standard EM\")\n print(\"\\n\" + \"*\"*80)\n\n # Evaluate the KL-Divergence between true distribution and estimated \n # distributions\n print(\"Step #6: Evaluating the estimated parameters\")\n pp, mu, sigma = reorder_gmm_compoments(pp, mu, sigma)\n tc_em_results['pp'], tc_em_results['mu'], tc_em_results['sigma'] =\\\n reorder_gmm_compoments(tc_em_results['pp'], tc_em_results['mu'],\n tc_em_results['sigma'])\n std_em_results['pp'], std_em_results['mu'], std_em_results['sigma'] =\\\n reorder_gmm_compoments(std_em_results['pp'], std_em_results['mu'],\n std_em_results['sigma'])\n \n true_gmm = build_GMM_model(pp, mu, sigma, seed)\n tc_gmm = build_GMM_model(tc_em_results['pp'], tc_em_results['mu'],\n tc_em_results['sigma'], seed)\n std_gmm = build_GMM_model(std_em_results['pp'], std_em_results['mu'],\n std_em_results['sigma'], seed)\n \n tc_kl = estimate_kl_divergence_gmm(true_gmm, tc_gmm)\n std_kl = estimate_kl_divergence_gmm(true_gmm, std_gmm) \n\n print(\"\\t* KL-Divergence corresponding to truncated and censored EM: {}\".\\\n format(tc_kl))\n print(\"\\t* KL-Divergence corresponding to standard EM: {}\".\\\n format(std_kl))\n print(\"\\n\" + \"*\"*80)\n\n # Show the plots\n print(\"Step #7: Showing the plots\")\n plt.show()", "def _get_gaussian_maps(self, x, source_str, prefix='coarse_', **kwargs):\n suffix = source_str if self.ds_gaussians else ''\n gaussians = self.__getattr__(prefix + \"gaussians\" + suffix)\n gaussian_maps = self._make_gaussian_maps(x, gaussians, **kwargs)\n return gaussian_maps", "def cell_params(x,y,**kwargs):\n\n GR = glo.global_results()\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n cloudy_library = clo.library()\n lookup_table = cloudy_library._restore_lookup_table()\n lookup_table['logG0s'] = lookup_table['logFUVs']\n if x == 'NH': \n x_cloudy,R_NIR_FUV_cl = aux.get_NH_from_cloudy()\n else:\n x_cloudy = np.unique(lookup_table['log'+x+'s'])\n if y == 'NH': \n y_cloudy,R_NIR_FUV_cl = aux.get_NH_from_cloudy()\n else:\n y_cloudy = np.unique(lookup_table['log'+y+'s'])\n\n if not p.ylim:\n p.ylim = [1e-3,30]\n if not p.xlim:\n p.xlim = [1e-7,1e3]\n \n # SELECT GALAXIES\n rand_gal_index = np.random.randint(0, GR.N_gal, size=(p.bins))\n if p.bins == GR.N_gal: rand_gal_index = np.arange(GR.N_gal)\n if p.gal_index: \n rand_gal_index = [p.gal_index]\n print(rand_gal_index)\n xs = np.array([])\n ys = np.array([])\n m_tot,m_encomp,m_y0 = 0,0,0\n for gal_index in rand_gal_index:\n print(gal_index)\n gal_ob = gal.galaxy(gal_index)\n df = gal_ob.cell_data.get_dataframe()\n df['nSFR'] = df.nSFR.values#/(0.2**3)\n #df['nSFR'] = df['SFR_density']\n #df['NH'] = 10.**df['NH']\n x1 = df[x].values\n y1 = df[y].values\n x1[x1 <= p.xlim[0]] = p.xlim[0]\n y1[y1 <= p.ylim[0]] = p.ylim[0]\n m_tot += np.sum(df.m.values)\n m_encomp += np.sum(df.m[(x1>=p.xlim[0]) & (y1>=p.ylim[0])].values)\n m_y0 += np.sum(df.m[(y1 == 0)].values)\n #print(x,x1.min(),x1.max())\n #print(y,y1.min(),y1.max())\n ys = np.append(ys,y1[(x1>=p.xlim[0]) & (y1>=p.ylim[0])])\n xs = np.append(xs,x1[(x1>=p.xlim[0]) & (y1>=p.ylim[0])])\n print('Min max of %s:' % x)\n print(xs.min(),xs.max())\n print('Min max of %s:' % y)\n print(ys.min(),ys.max())\n fig,ax = plt.subplots(figsize=(10,8))\n hb = ax.hexbin(xs,ys,xscale='log',yscale='log',bins='log',mincnt=1,lw=None,gridsize=50,cmap='inferno')\n cb = fig.colorbar(hb, ax=ax)\n cb.set_label('Number of cells in %i galaxies' % len(rand_gal_index))\n ax.set_xlabel(getlabel(x))\n ax.set_ylabel(getlabel(y))\n print('Total gas mass fraction encompassed: %.4f%%' % (m_encomp/m_tot*100))\n print('Total gas mass fraction with y = 0: %.4f%%' % (m_y0/m_tot*100))\n ax.set_xlim(p.xlim)\n ax.set_ylim(p.ylim)\n # Overplot Cloudy grid params\n print(x,x_cloudy)\n print(y,y_cloudy)\n for x1 in x_cloudy:\n ax.plot([10**x1,10**x1],ax.get_ylim(),'-',color='white',alpha=0.7)\n ax.plot([10**x1,10**x1],ax.get_ylim(),'--k',alpha=0.7)\n for y1 in y_cloudy:\n ax.plot(ax.get_xlim(),[10.**y1,10.**y1],'-',color='white',alpha=0.7)\n ax.plot(ax.get_xlim(),[10.**y1,10.**y1],'--k',alpha=0.7)\n\n if not os.path.isdir(p.d_plot + 'cell_data/'): os.mkdir(p.d_plot + 'cell_data/') \n plt.savefig('plots/cell_data/%s%s_cell_params_%s_%s_%s.png' % (p.sim_name,p.sim_run,p.z1,x,y),dpi=250, facecolor='w')", "def skybass_sampling_rates(data):\n for i in range(4):\n fig = plt.figure()\n TODO: finish", "def WENOSG_interpolation(sp0,sp1,sp2,sp3,sp4):\n\n epsilon = 10**-12\n\n p0 = (1./3.)*sp0 - (7./6.)*sp1 + (11./6.)*sp2\n p1 = -(1./6.)*sp1 + (5./6.)*sp2 + (1./3.)*sp3\n p2 = (1./3.)*sp2 + (5./6.)*sp3 - (1./6.)*sp4\n\n gamma = [.1, .6, .3]\n\n beta0 = (13./12.)*(sp0 - 2*sp1 + sp2)**2 + .25*(sp0 - 4*sp1 + 3*sp2)**2\n beta1 = (13./12.)*(sp1 - 2*sp2 + sp3)**2 + .25*(sp1 - sp3)**2\n beta2 = (13./12.)*(sp2 - 2*sp3 + sp4)**2 + .25*(3*sp2 - 4*sp3 + sp4)**2\n\n c00 = .25\n c10 = .75\n c01 = .5\n c11 = .5\n\n tau0 = abs(beta1 - beta0)\n tau1 = abs(beta2 - beta1)\n\n alpha00 = c00*(1 + (tau0 / (beta0 + epsilon))**2)\n alpha10 = c10*(1 + (tau0 / (beta1 + epsilon))**2)\n alpha01 = c01*(1 + (tau1 / (beta1 + epsilon))**2)\n alpha11 = c11*(1 + (tau1 / (beta2 + epsilon))**2)\n\n alphakl_sum = alpha00 + alpha10 + alpha01 + alpha11\n\n psi00 = alpha00 / alphakl_sum\n psi10 = alpha11 / alphakl_sum\n psi01 = alpha01 / alphakl_sum\n psi11 = alpha11 / alphakl_sum\n\n def g(k, w):\n return (w * (gamma[k] + gamma[k]**2 - 3*gamma[k]*w + w**2))/(gamma[k]**2 + w*(1-2*gamma[k]))\n\n alphastar00 = g(0, psi00)\n alphastar10 = g(1, psi10)\n alphastar01 = g(0, psi01)\n alphastar11 = g(1, psi11)\n\n alphastar_sum = alphastar00 + alphastar10 + alphastar01 + alphastar11\n\n w00 = alphastar00 / alphastar_sum\n w10 = alphastar10 / alphastar_sum\n w01 = alphastar01 / alphastar_sum\n w11 = alphastar11 / alphastar_sum\n\n h0 = w00*p0 + w10*p1\n h1 = w01*p1 + w11*p2\n\n tau = abs(beta2 - beta0)\n\n c0 = .4\n c1 = .6\n\n alpha0 = c0 * (1 + (tau / (beta0 + epsilon))**2)\n alpha1 = c1 * (1 + (tau / (beta2 + epsilon))**2)\n\n alpha_sum = alpha0 + alpha1\n\n psi0 = alpha0 / alpha_sum\n psi1 = alpha1 / alpha_sum\n\n psi_sum = g(0, psi0) + g(1, psi1)\n\n w0SG = g(0, psi0) / psi_sum\n w1SG = g(1, psi1) / psi_sum\n\n u_half = w0SG * h0 + w1SG * h1\n\n return u_half", "def evolute(self, ngen, x0=None, verbose=True):\r\n self.history = {'local_fitness':[], 'global_fitness':[], 'a': [], 'A': []}\r\n self.best_fitness=float(\"inf\") \r\n self.verbose=verbose\r\n self.Positions = np.zeros((self.nwhales, self.dim))\r\n if x0:\r\n assert len(x0) == self.nwhales, '--error: the length of x0 ({}) MUST equal the number of whales in the group ({})'.format(len(x0), self.nwhales)\r\n for i in range(self.nwhales):\r\n self.Positions[i,:] = x0[i]\r\n else:\r\n #self.Positions=self.init_sample(self.bounds) #TODO, update later for mixed-integer optimisation\r\n # Initialize the positions of whales\r\n \r\n for i in range(self.dim):\r\n self.Positions[:, i] = (np.random.uniform(0, 1, self.nwhales) * (self.ub[i] - self.lb[i]) + self.lb[i])\r\n \r\n fitness0=self.eval_whales()\r\n \r\n self.best_position, self.best_fitness = self.select(self.Positions, fitness0)\r\n \r\n for k in range(0, ngen):\r\n \r\n # a is annealed from 2 to 0\r\n self.a = self.a0 - k * ((self.a0) / (ngen))\r\n # fac is annealed from -1 to -2 to estimate l\r\n self.fac = -1 + k * ((-1) / ngen)\r\n #-----------------------------\r\n # Update Whale Positions\r\n #-----------------------------\r\n self.UpdateWhales()\r\n \r\n #----------------------\r\n # Evaluate New Whales\r\n #----------------------\r\n fitness=self.eval_whales()\r\n \r\n for i, fits in enumerate(fitness):\r\n #save the best of the best!!!\r\n if fits < self.best_fitness:\r\n self.best_fitness=fits\r\n self.best_position=self.Positions[i, :].copy()\r\n \r\n #--mir\r\n if self.mode=='max':\r\n self.fitness_best_correct=-self.best_fitness\r\n self.local_fitness=-np.min(fitness)\r\n else:\r\n self.fitness_best_correct=self.best_fitness\r\n self.local_fitness=np.min(fitness)\r\n\r\n self.history['local_fitness'].append(self.local_fitness)\r\n self.history['global_fitness'].append(self.fitness_best_correct)\r\n self.history['a'].append(self.a)\r\n self.history['A'].append(self.A)\r\n \r\n # Print statistics\r\n if self.verbose and i % self.nwhales:\r\n print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')\r\n print('WOA step {}/{}, nwhales={}, Ncores={}'.format((k+1)*self.nwhales, ngen*self.nwhales, self.nwhales, self.ncores))\r\n print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')\r\n print('Best Whale Fitness:', np.round(self.fitness_best_correct,6))\r\n print('Best Whale Position:', np.round(self.best_position,6))\r\n print('a:', np.round(self.a,3))\r\n print('A:', np.round(self.A,3))\r\n print('^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^')\r\n\r\n if self.verbose:\r\n print('------------------------ WOA Summary --------------------------')\r\n print('Best fitness (y) found:', self.fitness_best_correct)\r\n print('Best individual (x) found:', self.best_position)\r\n print('--------------------------------------------------------------') \r\n \r\n return self.best_position, self.fitness_best_correct, self.history", "def Gen_Annihilation_Map(angularSize, size, profile,fileOut):\r\n print 'Generating Annihilation Rate Map'\r\n \r\n # Behavioral Parameters \r\n solarDist = 8.3 # Solar Distance in kpc\r\n stopRadius = 60.0 # Radius from sun in kpc to stop LOS integration\r\n zSteps = 100 # Steps for LOS integration\r\n kpc2cm = 3.08568025e21\r\n \r\n # Constants\r\n deg2rad = math.pi/180.0 \r\n map = np.zeros((size, size)) # Initialize map\r\n \r\n # Code\r\n zStepSize = stopRadius/float(zSteps) # Size of one step in kpc\r\n aPP = float(angularSize)/float(size) # Angle per pixel\r\n solidAngle = (aPP*deg2rad)**2.0 # Multiply this by radial portion to get wedge volume.\r\n # Based on the input DM_model, we integrate rho**2 along the LOS\r\n max = 0\r\n for x in range(0,size): \r\n for y in range(0,size):\r\n rate = 0.0\r\n gamma = math.sqrt((float(x)-size/2.0)**2 + (float(y)-size/2.0)**2)*aPP # Inclusive angle for law of cosines\r\n if profile[0] != 'PULSAR': \r\n for z in range(0,zSteps):\r\n # Compute wedge volume. Currently this assumes a relatively small angular region around galactic center.\r\n volume = ((z+1)**3.0-z**3.0)*solidAngle/3.#\r\n # Compute radius from galactic center using law of cosines\r\n a = (z*zStepSize)\r\n #r = math.sqrt(a**2 + b**2 - 2*a*b*math.cos(gamma)) \r\n l = (float(x)-size/2.0)*aPP # longitude\r\n b = (float(y)-size/2.0)*aPP # latitude \r\n r = math.sqrt(a**2 + solarDist**2 - 2*a*solarDist*math.cos(l*deg2rad)*math.cos(b*deg2rad))\r\n #if gamma>max:\r\n # max = gamma\r\n # Get square DM density to obtain rate\r\n if (profile[0] == 'NFW'):\r\n rate += (volume*rho_DM_NFW(r,profile[1],profile[2]))**2\r\n elif (profile[0] == 'EIN'):\r\n rate += (0.0780763*rho_DM_EIN(r,profile[1],profile[2]))**2*zStepSize*kpc2cm*solidAngle\r\n \r\n elif (profile[0] == 'FLAT'):\r\n rate = 1 # just keep everything flat. += will give integration limit dependent results\r\n elif (profile[0] == 'NFWDECAY'): # NFW not squared.\r\n rate += volume*rho_DM_NFW(r,profile[1],profile[2])\r\n map[x,y] = rate\r\n else:\r\n l = (float(x)-size/2.0)*aPP # longitude\r\n b = (float(y)-size/2.0)*aPP # latitude\r\n r = math.sqrt(l**2.0+b**2.0)\r\n if (r<=0.05):\r\n map[x,y] = 0.05**-1.6\r\n else:\r\n map[x,y] = r**-1.2 \r\n # Write to file \r\n outFile = open(fileOut, \"wb\" )\r\n pickle.dump(map/np.max(map), outFile)\r\n print 'Rate Map saved to ', fileOut\r\n #print np.max(map)\r\n print 'J-Factor (GeV^2/cm^5): ' , np.sum(map)\r\n return map/np.max(map)", "def workflow(save_dir):\n year = 2016\n month_series = range(1, 13)\n total_potential_biomass_multiplier = 48.8\n total_standing_biomass_multiplier = 45.25\n biomass_jitter = 3.\n diet_sufficiency_multiplier = 0.28\n diet_sufficiency_jitter = 0.01\n avg_animal_density = 0.0175\n animal_density_jitter = 0.005\n\n # twelve months of precipitation rasters covering the study area\n precip_basename_list = [\n 'chirps-v2.0.{}.{:02d}.tif'.format(year, month) for month in\n month_series]\n\n # reclassify 0 to NoData in CHIRPS rasters\n output_precip_dir = os.path.join(save_dir, 'precip')\n if not os.path.exists(output_precip_dir):\n os.makedirs(output_precip_dir)\n for bn in precip_basename_list:\n base_raster = os.path.join(PRECIP_DIR, bn)\n target_raster = os.path.join(output_precip_dir, bn)\n pygeoprocessing.raster_calculator(\n [(base_raster, 1)], zero_to_nodata, target_raster,\n gdal.GDT_Float32, _IC_NODATA)\n\n # generate outputs\n for month in month_series:\n precip_raster = os.path.join(\n output_precip_dir, 'chirps-v2.0.{}.{:02d}.tif'.format(year, month))\n\n total_potential_biomass_path = os.path.join(\n save_dir, 'potential_biomass_{}_{:02d}.tif'.format(year, month))\n pygeoprocessing.raster_calculator(\n [(precip_raster, 1)] + [(path, 'raw') for path in [\n total_potential_biomass_multiplier,\n biomass_jitter]],\n precip_to_correlated_output, total_potential_biomass_path,\n gdal.GDT_Float32, _IC_NODATA)\n\n total_standing_biomass_path = os.path.join(\n save_dir, 'standing_biomass_{}_{:02d}.tif'.format(year, month))\n pygeoprocessing.raster_calculator(\n [(precip_raster, 1)] + [(path, 'raw') for path in [\n total_standing_biomass_multiplier,\n biomass_jitter]],\n precip_to_correlated_output, total_standing_biomass_path,\n gdal.GDT_Float32, _IC_NODATA)\n\n diet_sufficiency_path = os.path.join(\n save_dir, 'diet_sufficiency_{}_{:02d}.tif'.format(year, month))\n pygeoprocessing.raster_calculator(\n [(precip_raster, 1)] + [(path, 'raw') for path in [\n diet_sufficiency_multiplier,\n diet_sufficiency_jitter]],\n precip_to_correlated_output, diet_sufficiency_path,\n gdal.GDT_Float32, _IC_NODATA)\n\n animal_density_path = os.path.join(\n save_dir, 'animal_density_{}_{:02d}.tif'.format(year, month))\n pygeoprocessing.raster_calculator(\n [(precip_raster, 1)] + [(path, 'raw') for path in [\n avg_animal_density,\n animal_density_jitter]],\n precip_to_animal_density, animal_density_path,\n gdal.GDT_Float32, _IC_NODATA)", "def graphe_regrets(rgt_alea, rgt_glou, rgt_glou_e, rgt_ucb):\n T = np.arange(rgt_alea.size)\n fig, ax = plt.subplots()\n ax.grid(True)\n plt.xlabel(\"T\")\n plt.ylabel(\"Regret\")\n \n ax.plot(T, rgt_alea, label = 'aléatoire') \n ax.plot(T, rgt_glou, label = 'glouton')\n ax.plot(T, rgt_glou_e, label = 'e-glouton')\n ax.plot(T, rgt_ucb, label = 'UCB')\n ax.legend(loc = \"upper left\")\n plt.title(\"Regrets des 4 algorithmes par rapport à T\")", "def analyse_plots(plot_dict, data_dict) :\n for component in [ '_x', '_y' ] :\n z_pos = array.array( 'd' )\n trans_pos = array.array( 'd' )\n errors = array.array( 'd' )\n zeros = array.array( 'd' )\n\n plot = plot_dict['beam_positions'+component]\n\n for i in range( plot.GetXaxis().GetNbins()+2 ) :\n projection = plot.ProjectionY( \\\n 'profile'+component+'_pro_'+str(i), i, i )\n if projection.GetEntries() == 0 :\n continue\n\n pro_mean, pro_mean_err, pro_std, pro_std_err = \\\n scifi.fit_gaussian( projection )\n\n errors.append( pro_mean_err )\n trans_pos.append( pro_mean )\n z_pos.append( data_dict['station_positions'][ i-6 ] )\n zeros.append(0.0)\n\n position_graph = ROOT.TGraphErrors( len(zeros), z_pos, trans_pos, \\\n zeros, errors )\n position_graph.SetName('beam_profile'+component)\n plot_dict['beam_profile'+component] = position_graph\n\n profile_x = plot_dict['beam_profile_x']\n profile_y = plot_dict['beam_profile_y']\n\n up_x_func = ROOT.TF1( \"up_fit_x\", \"pol1\", -5000.0, 0.0 )\n up_y_func = ROOT.TF1( \"up_fit_y\", \"pol1\", -5000.0, 0.0 )\n down_x_func = ROOT.TF1( \"down_fit_x\", \"pol1\", 0.0, 5000.0 )\n down_y_func = ROOT.TF1( \"down_fit_y\", \"pol1\", 0.0, 5000.0 )\n\n up_fit_x = profile_x.Fit( 'up_fit_x', \"QSR\" )\n up_fit_y = profile_y.Fit( 'up_fit_y', \"QSR\" )\n down_fit_x = profile_x.Fit( 'down_fit_x', \"QSR\" )\n down_fit_y = profile_y.Fit( 'down_fit_y', \"QSR\" )\n\n plot_dict['beam_profile_x_up_fit'] = up_x_func\n plot_dict['beam_profile_y_up_fit'] = up_y_func\n plot_dict['beam_profile_x_down_fit'] = down_x_func\n plot_dict['beam_profile_y_down_fit'] = down_y_func\n\n\n up_beam_gra_x = up_x_func.GetParameter(1)\n up_beam_gra_x_err = up_x_func.GetParError(1)\n up_beam_gra_y = up_y_func.GetParameter(1)\n up_beam_gra_y_err = up_y_func.GetParError(1)\n\n up_beam_pos_x = data_dict['station_positions'][-1]*up_beam_gra_x + up_x_func.GetParameter(0)\n up_beam_pos_x_err = up_x_func.GetParError(0)\n up_beam_pos_y = data_dict['station_positions'][-1]*up_beam_gra_y + up_y_func.GetParameter(0)\n up_beam_pos_y_err = up_y_func.GetParError(0)\n\n up_beam_rot_x = math.atan( up_beam_gra_x )\n up_beam_rot_x_err = up_beam_gra_x_err # Approx linear\n up_beam_rot_y = math.atan( up_beam_gra_y )\n up_beam_rot_y_err = up_beam_gra_y_err # Approx linear\n\n\n\n down_beam_gra_x = down_x_func.GetParameter(1)\n down_beam_gra_x_err = down_x_func.GetParError(1)\n down_beam_gra_y = down_y_func.GetParameter(1)\n down_beam_gra_y_err = down_y_func.GetParError(1)\n\n down_beam_pos_x = data_dict['station_positions'][1]*down_beam_gra_x + down_x_func.GetParameter(0)\n down_beam_pos_x_err = down_x_func.GetParError(0)\n down_beam_pos_y = data_dict['station_positions'][1]*down_beam_gra_y + down_y_func.GetParameter(0)\n down_beam_pos_y_err = down_y_func.GetParError(0)\n\n down_beam_rot_x = math.atan( down_beam_gra_x )\n down_beam_rot_x_err = down_beam_gra_x_err # Approx linear\n down_beam_rot_y = math.atan( down_beam_gra_y )\n down_beam_rot_y_err = down_beam_gra_y_err # Approx linear\n\n\n# down_pos_x = down_beam_pos_x - data_dict['station_positions'][1]*up_beam_gra_x + up_x_func.GetParameter(0)\n# down_pos_x_err = math.sqrt( up_x_func.GetParError(0)**2 + down_beam_pos_x_err**2 )\n# down_pos_y = down_beam_pos_y - data_dict['station_positions'][1]*up_beam_gra_y + up_y_func.GetParameter(0)\n# down_pos_y_err = math.sqrt( up_y_func.GetParError(0)**2 + down_beam_pos_y_err**2 )\n\n length = TRACKER_SEPARATION\n down_pos_x = down_beam_pos_x - ( up_beam_pos_x + length*up_beam_gra_x )\n down_pos_x_err = math.sqrt( up_beam_pos_x_err**2 + down_beam_pos_x_err**2 + (length*up_beam_gra_x_err)**2 )\n down_pos_y = down_beam_pos_y - ( up_beam_pos_y + length*up_beam_gra_y )\n down_pos_y_err = math.sqrt( up_beam_pos_y_err**2 + down_beam_pos_y_err**2 + (length*up_beam_gra_y_err)**2 )\n\n down_rot_x = down_beam_rot_x - up_beam_rot_x\n down_rot_x_err = math.sqrt( down_beam_rot_x_err**2 + up_beam_rot_x_err**2 )\n down_rot_y = down_beam_rot_y - up_beam_rot_y\n down_rot_y_err = math.sqrt( down_beam_rot_y_err**2 + up_beam_rot_y_err**2 )\n\n\n print\n print \"Incoming Beam Misalignments:\"\n print\n print \"Displacement and rotation of beam with respect to upstream tracker:\"\n print\n print \"X Position = {0:0.3f} +/- {1:0.3f} mm\".format( up_beam_pos_x, up_beam_pos_x_err )\n print \"Y Position = {0:0.3f} +/- {1:0.3f} mm\".format( up_beam_pos_y, up_beam_pos_y_err )\n print\n print \"X Rotation = {0:0.3f} +/- {1:0.3f} mrad\".format( up_beam_rot_x*1000.0, up_beam_rot_x_err*1000.0 )\n print \"Y Rotation = {0:0.3f} +/- {1:0.3f} mrad\".format( up_beam_rot_y*1000.0, up_beam_rot_y_err*1000.0 )\n print\n\n print\n print \"Downstream Tracker Beam Misalignments:\"\n print\n print \"Displacement and rotation of beam with respect to downstream tracker:\"\n print\n print \"X Position = {0:0.3f} +/- {1:0.3f} mm\".format( down_beam_pos_x, down_beam_pos_x_err )\n print \"Y Position = {0:0.3f} +/- {1:0.3f} mm\".format( down_beam_pos_y, down_beam_pos_y_err )\n print\n print \"X Rotation = {0:0.3f} +/- {1:0.3f} mrad\".format( down_beam_rot_x*1000.0, down_beam_rot_x_err*1000.0 )\n print \"Y Rotation = {0:0.3f} +/- {1:0.3f} mrad\".format( down_beam_rot_y*1000.0, down_beam_rot_y_err*1000.0 )\n print\n\n print\n print \"Downstream Tracker Alignment:\"\n print\n print \"Displacement and rotation of between the two trackers:\"\n print\n print \"X Position = {0:0.3f} +/- {1:0.3f} mm\".format( down_pos_x, down_pos_x_err )\n print \"Y Position = {0:0.3f} +/- {1:0.3f} mm\".format( down_pos_y, down_pos_y_err )\n print\n print \"X Rotation = {0:0.3f} +/- {1:0.3f} mrad\".format( down_rot_x*1000.0, down_rot_x_err*1000.0 )\n print \"Y Rotation = {0:0.3f} +/- {1:0.3f} mrad\".format( down_rot_y*1000.0, down_rot_y_err*1000.0 )\n print", "def eg_sk():\n\n rxs = []\n a = []\n b = []\n c = []\n d = []\n e = []\n f = []\n g = []\n h = []\n i = []\n j = []\n\n for _ in range(1000):\n a.append(utils.gaussian(10, 1))\n\n for _ in range(1000):\n b.append(utils.gaussian(10.1, 1))\n\n for _ in range(1000):\n c.append(utils.gaussian(20, 1))\n\n for _ in range(1000):\n d.append(utils.gaussian(30, 1))\n\n for _ in range(1000):\n e.append(utils.gaussian(30.1, 1))\n\n for _ in range(1000):\n f.append(utils.gaussian(10, 1))\n\n for _ in range(1000):\n g.append(utils.gaussian(10, 1))\n\n for _ in range(1000):\n h.append(utils.gaussian(40, 1))\n\n for _ in range(1000):\n i.append(utils.gaussian(40, 3))\n\n for _ in range(1000):\n j.append(utils.gaussian(10, 1))\n\n for k, v in enumerate([a, b, c, d, e, f, g, h, i, j]):\n rxs.append(creation.RX(v, \"rx{}\".format(k)))\n\n for rx in stats.tiles(stats.scottKnot(rxs)):\n print(\"\", rx[\"rank\"], rx[\"name\"], rx[\"show\"], sep=\"\\t\")", "def setup(self):\n igd = self.options['input_grid_data']\n ogd = self.options['output_grid_data']\n output_subset = self.options['output_subset']\n\n if ogd is None:\n ogd = igd\n\n # Build the interpolation matrix which maps from the input grid to the output grid.\n # Rather than a single phase-wide interpolating polynomial, map each segment.\n # To do this, find the nodes in the output grid which fall in each segment of the input\n # grid. Then build a Lagrange interpolating polynomial for that segment\n L_blocks = []\n output_nodes_ptau = list(ogd.node_ptau[ogd.subset_node_indices[output_subset]])\n\n for iseg in range(igd.num_segments):\n i1, i2 = igd.segment_indices[iseg]\n iptau_segi = np.take(igd.node_ptau, (i1, i2-1))\n istau_segi = np.take(igd.node_stau, (i1, i2-1))\n\n # The indices of the output grid that fall within this segment of the input grid\n if ogd is igd:\n optau_segi = iptau_segi\n else:\n ptau_hi = igd.segment_ends[iseg+1]\n if iseg < igd.num_segments - 1:\n idxs_in_iseg = np.where(output_nodes_ptau <= ptau_hi)[0]\n else:\n idxs_in_iseg = np.arange(len(output_nodes_ptau))\n optau_segi = np.asarray(output_nodes_ptau)[idxs_in_iseg]\n # Remove the captured nodes so we don't accidentally include them again\n output_nodes_ptau = output_nodes_ptau[len(idxs_in_iseg):]\n\n # Now get the output nodes which fall in iseg in iseg's segment tau space.\n ostau_segi = 2.0 * (optau_segi - iptau_segi[0]) / (iptau_segi[-1] - iptau_segi[0]) - 1\n\n # Create the interpolation matrix and add it to the blocks\n L, _ = lagrange_matrices(istau_segi, ostau_segi)\n L_blocks.append(L)\n\n self.interpolation_matrix = block_diag(*L_blocks)\n r, c = np.nonzero(self.interpolation_matrix)\n\n output_num_nodes, input_num_nodes = self.interpolation_matrix.shape\n\n for (name, kwargs) in self._timeseries_outputs:\n\n input_kwargs = {k: kwargs[k] for k in ('units', 'desc')}\n input_name = 'input_values:{0}'.format(name)\n self.add_input(input_name,\n shape=(input_num_nodes,) + kwargs['shape'],\n **input_kwargs)\n\n output_name = name\n output_kwargs = {k: kwargs[k] for k in ('units', 'desc')}\n output_kwargs['shape'] = (output_num_nodes,) + kwargs['shape']\n self.add_output(output_name, **output_kwargs)\n\n self._vars.append((input_name, output_name, kwargs['shape']))\n\n size = np.prod(kwargs['shape'])\n val_jac = np.zeros((output_num_nodes, size, input_num_nodes, size))\n\n for i in range(size):\n val_jac[:, i, :, i] = self.interpolation_matrix\n\n val_jac = val_jac.reshape((output_num_nodes * size, input_num_nodes * size),\n order='C')\n\n val_jac_rows, val_jac_cols = np.where(val_jac != 0)\n\n rs, cs = val_jac_rows, val_jac_cols\n self.declare_partials(of=output_name,\n wrt=input_name,\n rows=rs, cols=cs, val=val_jac[rs, cs])", "def CII_vs_CO(**kwargs):\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n GR = glo.global_results(sim_run=p.sim_runs[1],nGal=p.nGals[1],grid_ext=p.grid_exts[1])\n fig,ax1 = plt.subplots()\n L_CII = getattr(GR,'L_[CII]158_sun')\n L_CO = getattr(GR,'L_CO(1-0)_sun')\n Zsfr = getattr(GR,'Zsfr')\n lL_CO, lL_CII = np.log10(L_CO), np.log10(L_CII) \n lL_CO, lL_CII = lL_CO[(L_CO > 0) & (L_CII > 0)], lL_CII[(L_CO > 0) & (L_CII > 0)]\n sc = ax1.scatter(np.log10(L_CO)-10, np.log10(L_CII)-10, marker='o', c=np.log10(Zsfr), cmap='viridis', zorder=10,\\\n vmin=np.log10(0.05), vmax=np.log10(3.1), \\\n s=10, alpha=0.8)#, label='SIGAME 100Mpc_arepoPDF')\n # print('Min Zsfr in Simba sample: ',np.min(Zsfr))\n # print('indices with L_CO < 1e0:')\n nbins = 100\n k = kde.gaussian_kde(np.column_stack([lL_CO,lL_CII]).T)\n x, y = np.mgrid[lL_CO.min():lL_CO.max():nbins*1j, \\\n 4:lL_CII.max():nbins*1j]\n z = k(np.vstack([x.flatten(), y.flatten()]))\n CS = ax1.contour(x, y, z.reshape(x.shape),colors='forestgreen',levels=8,zorder=10)\n CS.collections[0].set_label('SIGAME 100Mpc_arepoPDF')\n\n GR = glo.global_results(sim_run=p.sim_runs[0],nGal=p.nGals[0],grid_ext=p.grid_exts[1])\n L_CII = getattr(GR,'L_[CII]158_sun')\n L_CO = getattr(GR,'L_CO(1-0)_sun')\n Zsfr = getattr(GR,'Zsfr')\n lL_CO, lL_CII = np.log10(L_CO), np.log10(L_CII) \n lL_CO, lL_CII = lL_CO[(L_CO > 0) & (L_CII > 0)], lL_CII[(L_CO > 0) & (L_CII > 0)]\n lL_CO = np.append(lL_CO,np.array([6.1,5]))\n lL_CII = np.append(lL_CII,np.array([8.9,9.7]))\n # ax1.scatter(np.log10(L_CO), np.log10(L_CII), marker='^', c=np.log10(Zsfr), cmap='viridis', zorder=10,\\\n # vmin=np.log10(0.05), vmax=np.log10(3.1), \\\n # s=10, alpha=0.8, label='SIGAME 25Mpc_arepoPDF')\n nbins = 100\n k = kde.gaussian_kde(np.column_stack([lL_CO,lL_CII]).T)\n x, y = np.mgrid[lL_CO.min():lL_CO.max():nbins*1j, \\\n 4:lL_CII.max():nbins*1j]\n z = k(np.vstack([x.flatten(), y.flatten()]))\n CS = ax1.contour(x, y, z.reshape(x.shape),colors='deepskyblue',linestyles='dotted',levels=6)\n CS.collections[0].set_label('SIGAME 25Mpc_arepoPDF')\n\n GR = glo.global_results(sim_run=p.sim_runs[1],nGal=p.nGals[1],grid_ext=p.grid_exts[0])\n L_CII = getattr(GR,'L_[CII]158_sun')\n L_CO = getattr(GR,'L_CO(1-0)_sun')\n Zsfr = getattr(GR,'Zsfr')\n lL_CO, lL_CII = np.log10(L_CO), np.log10(L_CII) \n lL_CO, lL_CII = lL_CO[(L_CO > 0) & (L_CII > 0)], lL_CII[(L_CO > 0) & (L_CII > 0)]\n lL_CO = np.append(lL_CO,np.array([-2.2,4.7]))\n lL_CII = np.append(lL_CII,np.array([8,9.3]))\n # ax1.scatter(np.log10(L_CO), np.log10(L_CII), marker='^', c=np.log10(Zsfr), cmap='viridis', zorder=10,\\\n # vmin=np.log10(0.05), vmax=np.log10(3.1), \\\n # s=10, alpha=0.8, label='SIGAME v3 Simba-%s' % (p.sim_runs[0].replace('_','').replace('Mpc','')))\n nbins = 100\n k = kde.gaussian_kde(np.column_stack([lL_CO,lL_CII]).T)\n x, y = np.mgrid[lL_CO.min():lL_CO.max():nbins*1j, \\\n 4:lL_CII.max():nbins*1j]\n z = k(np.vstack([x.flatten(), y.flatten()]))\n CS = ax1.contour(x, y, z.reshape(x.shape),colors='brown',levels=8,zorder=5,linestyles='dashed')\n CS.collections[0].set_label('SIGAME 100Mpc_arepoPDF_no_ext')\n\n # Observations\n K16 = pd.read_pickle('data/observations/AHIMSA_sample_lit')\n K16_LCII = K16['[CII]158_Lsun']\n K16_LCO = K16['CO(1-0)_Lsun']\n ax1.plot(np.log10(K16_LCO), np.log10(K16_LCII), '>', color='grey', ms=6, fillstyle='none',alpha=0.8, mew=1,zorder=0,\\\n label='Mixed type galaxies [Kamenetzky+16]')\n\n C15 = pd.read_pickle('data/observations/DGS_Cormier_2015')\n C15_LCII = C15['L_[CII]158']\n C15_LCO = C15['L_CO(1-0)']\n C15_Z = C15['Z']\n # L_ul = C15['L_[CII]158'][(C15['L_[CII]158'] < 0) & (C15['L_CO(1-0)'] > 0)]\n # if len(L_ul) > 0:\n # ax1.plot(np.log10(C15['L_CO(1-0)'][C15['L_[CII]158'] < 0]),np.log10(-1.*L_ul),'s',ms=5,mew=0,color='grey',alpha=0.8)\n # ax1.errorbar(np.log10(C15['L_CO(1-0)'][C15['L_[CII]158'] < 0]),np.log10(-1.*L_ul), elinewidth=1,\\\n # uplims=np.ones(len(L_ul)),yerr=np.ones(len(L_ul))*1,color='grey',alpha=0.8,lw=0)\n ax1.scatter(np.log10(C15_LCO), np.log10(C15_LCII), marker='+', c=np.log10(C15_Z), cmap='viridis', zorder=0,\\\n vmin=np.log10(0.05), vmax=np.log10(3.1),\\\n s=100, lw=3, alpha=0.8, label='Dwarf galaxies [Cormier+15]')\n\n A17 = pd.read_pickle('data/observations/xCOLD_GASS_Accurso_2017')\n A17 = A17.loc[np.argwhere(A17['L_CO(1-0)'].values > 0).flatten()]\n ax1.scatter(A17['L_CO(1-0)'],A17['L_[CII]158'], marker='d', c=np.log10(A17['Z']), cmap='viridis', zorder=0,\\\n vmin=np.log10(0.05), vmax=np.log10(3.1),\\\n s=50, lw=0, alpha=0.8, label='COLD GASS [Accurso+17]') #c=np.log10(A17['Z']), \n\n CII_obs = np.log10(np.append(K16_LCII.values,C15_LCII.values))\n CO_obs = np.log10(np.append(K16_LCO.values,C15_LCO.values))\n CII_obs = np.append(CII_obs,A17['L_[CII]158'].values)\n CO_obs = np.append(CO_obs,A17['L_CO(1-0)'].values)\n index = np.argwhere((CII_obs > 0) & (CO_obs > 0)).flatten()\n CII_obs = CII_obs[index]\n CO_obs = CO_obs[index]\n\n x = np.linspace(0, 7, 100)\n fit = LinearRegression().fit(CO_obs.reshape(-1, 1),\\\n CII_obs.reshape(-1, 1))\n L_fit = fit.predict(x.reshape(-1, 1))\n ax1.plot(x, L_fit, color='black', linestyle='--', label='Log-linear fit to observations')\n\n ax1.set_ylabel('log ' + getlabel('[CII]158'))\n ax1.set_xlabel('log ' + getlabel('CO(1-0)'))\n plt.colorbar(sc,label=r'log $\\langle$Z$\\rangle_{\\rm SFR}$ [Z$_{\\rm \\odot}$]')\n\n handles, labels = ax1.get_legend_handles_labels()\n print(labels) # labels, handles = zip(*sorted(zip(labels, handles), key=lambda t: t[0]))\n # handles = [handles[_] for _ in [2,4,3,5,0,6,7,1]]\n # labels = [labels[_] for _ in [2,4,3,5,0,6,7,1]]\n handles = [handles[_] for _ in [2,4,3,5,6,0,1]]\n labels = [labels[_] for _ in [2,4,3,5,6,0,1]]\n plt.legend(handles,labels,loc='lower left',fontsize=10.,frameon=True)\n\n ax1.set_xlim([-3,6.2])\n ax1.set_ylim([4,10])\n\n if not os.path.isdir(p.d_plot + 'luminosity/'): os.mkdir(p.d_plot + 'luminosity/') \n plt.savefig(p.d_plot + 'luminosity/CO_vs_CII%s%s.png' % (p.grid_ext,p.table_ext), dpi=300)", "def CalculateG(g, s):\n for i in range(len(y_train)):\n g[i] = 1 - y_train[i]*s[i]\n return g", "def plot():\n xvals = np.arange(-50, 250, step=0.1)\n\n fig = plt.figure()\n plt.suptitle(\"Gaussian with smooth transition to power law\")\n\n A0vals = [10, 11]\n avals = [5*10**-3, 10**-3, 5*10**-4]\n ttvals = [10., 50., 100.]\n cvals = [-0.1, -0.9, -5./3., -4.]\n offset = [-30, 0.0, 30]\n\n paramvals = [A0vals, avals, ttvals,cvals, offset]\n titles, labels = return_parameter_names()\n\n nplots = len(paramvals)\n\n for i in range(nplots):\n plt.subplot(nplots, 1, i+1)\n vals = paramvals[i]\n for j in range(len(vals)):\n pset = list(default())\n pset[i] = vals[j]\n yvals=[]\n ypower=[]\n ypeak=[]\n for x in xvals:\n yvals.append(fitfunc(x, pset))\n ypeak.append(logpeak(x,pset))\n if x > 0:\n ypower.append(logpowerlaw(x,pset))\n label = labels[i] + \"=\"+str(vals[j])\n plt.plot(xvals, yvals, label = label)\n\n plt.title(titles[i])\n plt.legend()\n\n fig.set_size_inches(15, 30)\n plt.savefig(\"graphs/misc/lightcurve_models.pdf\")\n plt.close()", "def _gaussian_weighted_map_const_multi(\n y_grid, x_grid, heatmap, points_y, points_x, boxes,\n gaussian_denom_ratio):\n num_instances, _ = _get_shape(boxes, 2)\n height, width, num_keypoints = _get_shape(heatmap, 3)\n\n # [height, width, num_instances, num_keypoints].\n # Note that we intentionally avoid using tf.newaxis as TfLite converter\n # doesn't like it.\n y_diff = (\n tf.reshape(y_grid, [height, width, 1, 1]) -\n tf.reshape(points_y, [1, 1, num_instances, num_keypoints]))\n x_diff = (\n tf.reshape(x_grid, [height, width, 1, 1]) -\n tf.reshape(points_x, [1, 1, num_instances, num_keypoints]))\n distance_square = y_diff * y_diff + x_diff * x_diff\n\n y_min, x_min, y_max, x_max = tf.split(boxes, 4, axis=1)\n\n # Make the mask with all 1.0 in the box regions.\n # Shape: [height, width, num_instances]\n in_boxes = tf.math.logical_and(\n tf.math.logical_and(\n tf.reshape(y_grid, [height, width, 1]) >= tf.reshape(\n y_min, [1, 1, num_instances]),\n tf.reshape(y_grid, [height, width, 1]) < tf.reshape(\n y_max, [1, 1, num_instances])),\n tf.math.logical_and(\n tf.reshape(x_grid, [height, width, 1]) >= tf.reshape(\n x_min, [1, 1, num_instances]),\n tf.reshape(x_grid, [height, width, 1]) < tf.reshape(\n x_max, [1, 1, num_instances])))\n in_boxes = tf.cast(in_boxes, dtype=tf.float32)\n\n gaussian_denom = tf.cast(\n tf.minimum(height, width), dtype=tf.float32) * gaussian_denom_ratio\n # shape: [height, width, num_instances, num_keypoints]\n gaussian_map = tf.exp((-1 * distance_square) / gaussian_denom)\n return tf.expand_dims(heatmap, axis=2) * gaussian_map * tf.reshape(\n in_boxes, [height, width, num_instances, 1])", "def gredo(x,y,guessx,guessy,guesspar):\n\n global BTRACK, GSTRUC, NPIX\n \n flag = True # do it unless proven wrong \n \n # FROM **ANY** PREVIOUS POSITION \n prev, = np.where((BTRACK['x']==x) & (BTRACK['y']==y))\n nprev = len(prev)\n\n if guesspar is None:\n return False\n tguesspar = guesspar\n nguesspar = len(tguesspar) \n ngg = nguesspar//3 \n \n # FROM **ANY** PREVIOUS POSITION \n # We have used a guess from this position before \n # but have the parameters changed sufficiently\n nogaussians = True # no gaussians found by default\n if (nprev > 0): \n \n # Looping through the previous ones \n for i in range(nprev):\n guesspar2 = BTRACK['data'][prev[i]]['guesspar']\n \n # Some gaussians found \n if (guesspar2 is not None):\n nogaussians = False\n tpar = guesspar2\n ntpar = len(tpar) \n ntg = ntpar//3 # number of gaussians in this guess \n \n # Same number of gaussians \n if (ntpar == nguesspar): \n # Sorting, largest first \n tpar2 = utils.gsort(tpar) \n tguesspar2 = utils.gsort(tguesspar) \n \n # Fixing possible zeros that could ruin the ratio \n dum = np.copy(tpar2)\n bd, = np.where(dum == 0.) \n if len(bd) > 0:\n dum[bd] = 1e-5 \n diff = np.abs(tpar2 - tguesspar2) \n ratio = diff/np.abs(dum) \n \n # These differences are too small, NO redo \n if (np.max(ratio) < 0.01): \n return False\n \n # Some previous visits, but no Gaussians detected, redo=False\n if nogaussians:\n return False\n \n return flag", "def transition_function(grid, neighbourstates, neighbourcounts, decay_grid,\n water_decay_grid):\n\n global water_counter\n global ignition_grid\n neighbourstates = np.array(neighbourstates)\n init_grid = initial_grid.astype(int)\n ig_grid = np.array(ignition_grid)\n windspeed_ignition_modifiers = wind_speed_rvalue(\"NE\", 10)\n new_ig_grid = []\n for i, row in enumerate(grid):\n new_ig_grid.append([\n ignite(cell, neighbourstates[:, i, j],\n windspeed_ignition_modifiers) for j, cell in enumerate(row)\n ])\n new_ig_grid = np.array(new_ig_grid)\n started_to_burn = []\n for i, row in enumerate(grid):\n started_to_burn.append([\n started_burning(cell, ig_grid[i, j], new_ig_grid[i, j])\n for j, cell in enumerate(row)\n ])\n grid[started_to_burn] = START_BURN\n ig_grid = np.add(new_ig_grid, ig_grid)\n full_burn = []\n for i, row in enumerate(grid):\n full_burn.append([\n fully_burning(cell, ig_grid[i, j], decay_grid[i, j])\n for j, cell in enumerate(row)\n ])\n grid[full_burn] = BURNING\n end_burning = []\n for i, row in enumerate(grid):\n end_burning.append([\n ending_burn(cell, decay_grid[i, j], decay_values[int(\n initial_grid[i, j])]) for j, cell in enumerate(row)\n ])\n grid[end_burning] = END_BURN\n decay_grid[(grid == BURNING) | (grid == END_BURN)] -= 1\n burnt_out = (decay_grid == 0) # find those which have decayed to 0\n grid[(decay_grid == 0\n )] = BURNT #set all that have decayed to zero to BURNT(7)\n water_counter += 1\n\n if (water_counter == 100):\n grid[120:160, 80:120] = initial_grid[120:160, 80:120]\n water_decay_grid[(grid != LAKE)] -= 1 # take one off their decay value\n grid[(water_decay_grid == 0)] = BURNT # switch their state to 5\n ignition_grid = ig_grid\n return grid", "def traces(mndata,Params,srate,imagepath):\n\t#plot high gamma traces\n\t#data should be bandpassed (todo)\n\t#resample to srate\n\tst = resample(Params[\"st\"],srate)\n\ten = resample(Params[\"en\"],srate)\n\tbl_en = resample(Params[\"bl_en\"],srate)\n\tbl_st = resample(Params[\"bl_st\"],srate)\n\tplot_tp = resample(Params[\"plot\"],srate)\n\tcue = resample(500,srate)\n\t\n\tcolors = ['red','orange','green','blue']\n\tx = np.array(range(st,en+1))\n\tf, (ax,ax2) = plt.subplots(1,2, sharex = False)\n\tax.axhline(y = 0,color = 'k',linewidth=2)\n\tax.axvline(x = 0,color='k',linewidth=2)\n\tax.axvline(x = cue,color = 'gray',linewidth = 2)\n\tax.axvline(x = cue+cue,color = 'gray',linewidth = 2)\n\tax.axvspan(cue, cue+cue, facecolor='0.5', alpha=0.25,label = 'cue')\n\n\tfor j in range(len(Params[\"conditions\"])):\n\t\tcondition = Params['conditions'][j]\n\t\ty = mndata[condition]['data']\n\t\tax.plot(x,y, label = condition,linewidth = 2,color = colors[j])\n\t\n\tax.set_ylim((-30,85))\n\tax.set_xlim(st,en)\n\tax.legend()\n\tax.xaxis.set_ticklabels(['', '0', '','500', '', '1000', '', '1500', '', '2000','','2500','', '3000'],minor=False)\n\tax.xaxis.set_ticks(range(st,en,plot_tp))\n\n\tax.set_xlabel(\"time (ms)\")\n\tax.set_ylabel(\"% change baseline\")\n\tax.set_title('Analytic Amplitude - High Gamma (70-150Hz)', fontsize = 18)\n\n\t#plot brain with elec location\n\t#brain = plt.imread(imagepath)\n\t#aa = pylab.mean(brain,2)\n\t#ax2.imshow(aa)\n\t#a2.gray()\n\n\t#brain = Image.open(imagepath)\n\t#ax2.set_axis_off()\n\t#im = plt.imshow(brain, origin = 'lower')\n\n\t#brain = _png.read_png(imagepath)\n\t#imagebox = OffsetImage(brain,zoom =5)\n\t#ab = AnnotationBbox(imagebox,)\n\n\tim = Image.open(imagepath)\n\tax2.imshow(im,aspect = 'auto',origin = 'lower')\n\tax2.set_xlim((0,750))\n\tax2.set_title('Electrode Location',fontsize = 18)\n\n\n\n\treturn f, (ax, ax2)", "def update_grhs():\n init_gradient()\n costs_per_batch = []\n for i in range(n_train_batches):\n c = update_gradient_batch(i,*args)\n costs_per_batch.append(c)\n return numpy.mean(costs_per_batch,axis=0)", "def flatNoiseCGH():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/NoiseStudy/FlatMeasurements/'\n d1,dx1 = met.read4DFits(wdir+'161205_RefFlat_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161205_RefFlat_Avg8_Meas2.fits')\n p1,px1 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestPitch_Meas1.fits')\n p2,px2 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestPitch_Meas2.fits')\n p3,px3 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestRoll_Meas1.fits')\n p4,px4 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestRoll_Meas2.fits')\n\n #Construct baseline power spectra\n f1,pow1 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1)\n f2,pow2 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1,axis=1)\n \n #Construct parroted power spectra\n f3,pow3 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx1)\n f4,pow4 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx2,axis=1)\n f5,pow5 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx1)\n f6,pow6 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx2,axis=1)\n\n #Plot\n plt.loglog(f1,pow1/f1[0],label='Axial Baseline')\n plt.loglog(f2,pow2/f2[0],label='Azimuthal Baseline')\n plt.loglog(f3,pow3/f3[0],label='Pitch Axial')\n plt.loglog(f4,pow4/f4[0],label='Pitch Azimuthal')\n plt.loglog(f5,pow5/f5[0],label='Roll Axial')\n plt.loglog(f6,pow6/f6[0],label='Roll Azimuthal')\n plt.title('Residual Fringe Repeatability Impact')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n plt.legend(loc='lower left')\n\n return f1,pow1", "def compute_g_0(self, j):\n #Compute variance and mean denominator (same denominator for both)\n denominator = self.sigma2 * self.sigma_g_star_2[0, j] + (10**8) * self.sigma2\n\n numerator_mean = (10**8) * self.sigma2 * self.g_heat[1,j]\n if (self.u_heat > self.temperatures[0]):\n denominator = denominator + (10**8) * self.sigma_g_star_2[0, j] * ((self.temperatures[0] - self.u_heat )**2)\n numerator_mean = numerator_mean + \\\n (10**8) * self.sigma_g_star_2[0, j] * (self.temperatures[0] - self.u_heat) * (self.consumptions[0] - self.s[0,j] * self.kappa[self.daytypes[0]])\n\n #Mean\n mean = numerator_mean / denominator\n\n #Compute variance numerator\n variance_numerator = ((10**8) * self.sigma2 * self.sigma_g_star_2[0, j])\n #Variance\n variance = variance_numerator / denominator\n\n self.g_heat[0,j] = self.truncated_norm(-inf, 0, mean, variance)", "def swe(lon, lat, month):\n\n im = month - 1\n\n h0 = np.array( [ 8.37, 9.43, 10.74, 11.67, 11.80, 12.48,\n 4.01, 1.08, 3.84, 6.24, 7.54, 8.00 ] )\n a = np.array( [ -0.0270, 0.0058, 0.1618, 0.0841, -0.0043, 0.2084,\n 0.0970, 0.0712, 0.0393, 0.1158, 0.0567, -0.0540 ] )\n b = np.array( [ -0.3400, -0.1309, 0.0276, -0.1328, -0.4284, -0.5739,\n -0.4930, -0.1450, -0.2107, -0.2803, -0.3201, -0.3650 ] )\n c = np.array( [ -0.0319, 0.0017, 0.0213, 0.0081, -0.0380, -0.0468,\n -0.0333, -0.0155, -0.0182, -0.0215, -0.0284, -0.0362 ] )\n d = np.array( [ -0.0056, -0.0021, 0.0076, -0.0003, -0.0071, -0.0023,\n -0.0026, 0.0014, -0.0053, 0.0015, -0.0032, -0.0112 ] )\n e = np.array( [ -0.0005, -0.0072, -0.0125, -0.0301, -0.0063, -0.0253,\n -0.0343, -0.0000, -0.0190, -0.0176, -0.0129, -0.0035 ] )\n\n x = (90. - lat) * np.cos( np.radians(lon) )\n y = (90. - lat) * np.sin( np.radians(lon) )\n\n h = ( h0[im] + ( a[im] * x ) + ( b[im] * y ) + ( c[im] * x * y ) +\n ( d[im] * x * x ) + ( e[im] * y * y ) )\n\n# h = np.where( h < 0., 0., h)\n \n return h", "def _get_sum_gaussian_image(s_gen, xs, ys, sdev, n=50):\n m1, m2 = xs.min(), xs.max()\n xx = np.linspace(m1, m2, n)\n XX, YY = np.meshgrid(xx, xx)\n XX, YY = [u.ravel()[np.newaxis, :] for u in [XX, YY]]\n xs, ys, S_gen = [u[:, np.newaxis] for u in [xs, ys, s_gen]]\n res = np.sum(\n S_gen * np.exp(((xs - XX) ** 2 + (ys - YY) ** 2) /\n (-2 * sdev ** 2)), axis=0)\n return res.reshape(n, n)", "def garch(ω, α, β, n_out=1000):\r\n p = len(α)\r\n q = len(β)\r\n\r\n\r\n # Größe Einbrennphase\r\n n_pre = 1000\r\n n = n_pre + n_out\r\n\r\n\r\n # Sample noise\r\n ɛ = np.random.standard_normal(size=n)#st.t.rvs(df=5,size=n))\r\n #st.t.var(df=5))#np.random.normal(size=n)\r\n\r\n\r\n y = np.zeros(n)\r\n σ = np.zeros(n)\r\n\r\n #Start-Values\r\n for k in range(np.max([p, q])):\r\n σ[k] = (np.random.standard_normal())\r\n\r\n #Create GARCH data\r\n\r\n for k in range(np.max([p, q]), n):\r\n α_term = sum([α[i] * y[k-i-1]**2 for i in range(p)])\r\n β_term = sum([β[i] * ɛ[k-i-1]**2 for i in range(q)])\r\n σ[k] = np.sqrt(ω + α_term + β_term)\r\n y[k] = σ[k] * ɛ[k]\r\n\r\n\r\n return y[n_pre:]", "def CalcGreenFunctions(x, y, z, x_src_l, y_src_l, alpha, dire, Lambda_y, \\\n gamma_l, c, omega, G_sen, dir_meas, dir_meas_deg, airloss_alpha, f, n):\n \n G = greens_fct(repmat(x_src_l, np.shape(x)[0],1), repmat(y_src_l, np.shape(y)[0],1), omega, c, \\\n np.transpose(repmat(x, np.shape(x_src_l)[0], 1)), np.transpose(repmat(y, np.shape(y_src_l)[0], 1)), z)\n\n G = G_sen * G\n \n beta = np.arcsin((np.transpose(repmat(y, np.shape(y_src_l)[0], 1)) - repmat(y_src_l, np.shape(y)[0], 1)) \\\n * np.sqrt((np.transpose(repmat(x, np.shape(x_src_l)[0], 1)) - \\\n repmat(x_src_l, np.shape(x)[0], 1))**2 + \\\n (np.transpose(repmat(y, np.shape(y_src_l)[0], 1)) - repmat(y_src_l, np.shape(y)[0], 1))**2)**(-1)) \\\n + repmat(gamma_l, np.shape(x)[0], 1)\n \n # air attenuation\n src_rec_dist = np.sqrt((np.transpose(repmat(x, np.shape(x_src_l)[0], 1)) - repmat(x_src_l, np.shape(x)[0], 1))**2 \\\n + (np.transpose(repmat(y, np.shape(y_src_l)[0], 1)) - repmat(y_src_l, np.shape(y)[0], 1))**2)\n \n air_att = airloss_alpha * src_rec_dist\n air_att = 10**(-air_att / 20)\n G = G * air_att\n \n H_post = calc_directivity(dire, alpha, Lambda_y, beta, omega, c, f, dir_meas, dir_meas_deg, n)\n\n G = G * H_post\n \n return G", "def visualSignMap(phasemap1, phasemap2):\r\n\r\n if phasemap1.shape != phasemap2.shape:\r\n raise LookupError(\"'phasemap1' and 'phasemap2' should have same size.\")\r\n\r\n gradmap1 = np.gradient(phasemap1)\r\n gradmap2 = np.gradient(phasemap2)\r\n\r\n # gradmap1 = ni.filters.median_filter(gradmap1,100.)\r\n # gradmap2 = ni.filters.median_filter(gradmap2,100.)\r\n\r\n graddir1 = np.zeros(np.shape(gradmap1[0]))\r\n # gradmag1 = np.zeros(np.shape(gradmap1[0]))\r\n\r\n graddir2 = np.zeros(np.shape(gradmap2[0]))\r\n # gradmag2 = np.zeros(np.shape(gradmap2[0]))\r\n\r\n for i in range(phasemap1.shape[0]):\r\n for j in range(phasemap2.shape[1]):\r\n graddir1[i, j] = math.atan2(gradmap1[1][i, j], gradmap1[0][i, j])\r\n graddir2[i, j] = math.atan2(gradmap2[1][i, j], gradmap2[0][i, j])\r\n\r\n # gradmag1[i,j] = np.sqrt((gradmap1[1][i,j]**2)+(gradmap1[0][i,j]**2))\r\n # gradmag2[i,j] = np.sqrt((gradmap2[1][i,j]**2)+(gradmap2[0][i,j]**2))\r\n\r\n vdiff = np.multiply(np.exp(1j * graddir1), np.exp(-1j * graddir2))\r\n\r\n areamap = np.sin(np.angle(vdiff))\r\n\r\n return areamap", "def optimize_at_with_gd(y_true, output, thresholds, average):\n opt = Adam()\n opt.alpha = 1e-2\n metric = calculate_f1(y_true, output, thresholds, average)\n metric_asfo_epoch = [metric]\n for i in range(100):\n grads = calculate_at_gradient(y_true, output, thresholds, average)\n # if i==0: print(\"grads:\", grads)\n grads = [-e for e in grads]\n thresholds = opt.GetNewParams(thresholds, grads)\n metric = calculate_f1(y_true, output, thresholds, average)\n if i % 50 == 0:\n print('Iteration: {}, Score: {:.3f}, thresholds: {}'.format(\n i, metric, np.array(thresholds)))\n metric_asfo_epoch.append(metric)\n\n return metric, thresholds, metric_asfo_epoch", "def main(rand,mu,lamb,cxpb,mutpb,ngen,param):\n \n random.seed(rand)\n NGEN = ngen\n MU = mu\n LAMBDA = lamb\n CXPB = cxpb\n MUTPB = mutpb\n \n # Used for printing the results. It is the parameter that is changed one run from another\n if param==\"rand\" or param==\"optimal\":\n list_results=[rand]\n elif param==\"mu\":\n list_results=[mu]\n elif param==\"lamb\":\n list_results=[lamb]\n elif param==\"cross\":\n list_results=[cxpb]\n elif param==\"mutate\":\n list_results=[mutpb]\n elif param==\"ngen\":\n list_results=[ngen]\n elif param==\"original\":\n list_results=[0]\n \n # Initialization of the objects for the GA\n pop = toolbox.population(n=MU)\n hof = tools.ParetoFront()\n stats = tools.Statistics(lambda ind: ind.fitness.values)\n stats.register(\"avg\", np.mean, axis=0)\n stats.register(\"std\", np.std, axis=0)\n stats.register(\"min\", np.min, axis=0)\n stats.register(\"max\", np.max, axis=0)\n\n # Run of the GA\n p,logbook=algorithms.eaMuPlusLambda(pop, toolbox, MU, LAMBDA, CXPB, MUTPB, NGEN, stats,\n halloffame=hof,verbose=0)\n with open(results_path+param+'_logbook.csv', 'a',newline='') as f:\n w = csv.DictWriter(f, logbook[0].keys())\n w.writeheader()\n for el in logbook:\n w.writerow(el)\n w.writerow({})\n \n # Takes the max fitness of the population from all of the runs\n max_fit=0\n max_gen=0\n for elt in logbook:\n if elt['max'][0]>max_fit:\n max_fit=elt['max'][0]\n max_gen=elt['gen']\n list_results.append(max_fit)\n list_results.append(max_gen)\n \n #TODO\n# for ind in hof:\n# dist = numpy.linalg.norm(a-b)\n\n print (\"{0} {1} {2} {3}\".format(round(list_results[1],3),round(list_results[2],3),round(list_results[0],3),hof[0]))\n current_out_writer.writerow([list_results[0],list_results[1],list_results[2],hof[0]])\n \n return pop, stats, hof", "def _run_rbf_interpolation(out_dir, layer, bounds, function, smooth):\n # if running scipy methods prepend root dir to out path\n out_dir = OPJ(path_root, out_dir)\n if not os.path.isdir(out_dir):\n print(\n os.path.abspath(out_dir),\n ' does not exist, creating directory.\\n'\n )\n Path(out_dir).mkdir(parents=True, exist_ok=True)\n\n out_file = OPJ(\n out_dir, \n '{time_agg}.tiff'.format(time_agg=layer)\n )\n print(\n '\\nInterpolating {g} point bias ratios for: {t}\\n'.\\\n format(g=grid_var, t=layer),\n 'Using the \"{}\" method\\n'.format(function),\n 'Resolution (pixel size) of output raster: {} degrees'.format(res)\n )\n print( \n 'GeoTIFF raster will be saved to: \\n',\n os.path.abspath(out_file)\n )\n\n\n # get grid extent based on station locations in CSV\n if not bounds:\n bounds = get_subgrid_bounds(in_path, buffer=buffer, grid_res=CS) \n lon_min, lon_max, lat_min, lat_max = bounds\n # fix any minor adjustments to make raster fit gridMET fishnet extent\n # if scale_factor=1 make sure raster pixels align exactly w/gridcells\n # raster extent may exceed fishnet grid to fill gaps for zonal stats\n if scale_factor:\n nxcells = abs(lon_min-lon_max) / (CS*scale_factor)\n nycells = abs(lat_min-lat_max) / (CS*scale_factor)\n remainder_x = int(nxcells) - nxcells\n remainder_y = int(nycells) - nycells\n if abs(remainder_x) > CS:\n remainder_x -= CS * (remainder_x / CS) \n if abs(remainder_y) > CS:\n remainder_y -= CS * (remainder_y / CS)\n lon_min -= remainder_x\n lon_max += CS\n lat_min -= remainder_y\n lat_min -= CS\n \n # check if layer is in summary CSV \n existing_layers = pd.read_csv(in_path).columns\n if not layer in existing_layers:\n print('Column {} does not exist in input CSV:\\n {}'.format(\n layer, in_path),\n '\\nSkipping interpolation.'\n )\n return\n \n # get point station data from summary CSV\n in_df = pd.read_csv(in_path, na_values=[-999])\n lon_pts, lat_pts = in_df.STATION_LON.values, in_df.STATION_LAT.values\n values = in_df[layer].values\n \n # mask out stations with missing data\n if in_df[layer].isnull().sum() > 0:\n mask = in_df[layer].notnull()\n n_missing = in_df[layer].isna().sum()\n # if one point or less data points exists exit\n if len(mask) == n_missing or len(values) - n_missing == 1:\n print('Missing sufficient point data for variable: {} {}'.\\\n format(grid_var, layer),\n '\\nNeed at least two stations with data, skipping.')\n return\n print('Warning:\\n',\n 'Data missing for {} of {} stations for variable: {} {}'.\\\n format(n_missing, len(values), grid_var, layer),\n '\\nproceeding with interpolation.')\n # get locations where ratio is not nan\n values = values[mask]\n lon_pts = lon_pts[mask]\n lat_pts = lat_pts[mask]\n\n nx_cells = int(np.round(np.abs((lon_min - lon_max) / CS)))\n ny_cells = int(np.round(np.abs((lat_min - lat_max) / CS)))\n # rbf requires uniform grid (n X n) so \n # extend short dimension and clip later \n nx_cells_out = copy.copy(nx_cells)\n ny_cells_out = copy.copy(ny_cells)\n # gdal requires \"upper left\" corner coordinates\n lat_max_out = copy.copy(lat_max)\n lon_max_out = copy.copy(lon_max)\n # extend short dimension to make square grid\n if not nx_cells == ny_cells:\n diff = np.abs(nx_cells - ny_cells)\n if nx_cells > ny_cells:\n lat_max += diff * CS\n ny_cells += diff\n else:\n lon_max += diff * CS\n nx_cells += diff\n\n if scale_factor == 1:\n # make finer/coarse grid by scale factor\n lons = np.linspace(lon_min, lon_max, \n int(np.round(nx_cells/scale_factor))-1)\n lats = np.linspace(lat_min, lat_max, \n int(np.round(ny_cells/scale_factor))-1)\n # extent for original created by spatial.build_subgrid\n # add one to make sure raster covers full extent\n lons_out = np.linspace(lon_min, lon_max_out, \n int(np.round(nx_cells_out/scale_factor))-1)\n lats_out = np.linspace(lat_min, lat_max_out, \n int(np.round(ny_cells_out/scale_factor))-1)\n\n else:\n # add one extra cell to cover grid buffer extent for upscaling\n # raster extent always >= grid buffer\n lons = np.linspace(lon_min, lon_max, \n int(np.round(nx_cells/scale_factor)))\n lats = np.linspace(lat_min, lat_max, \n int(np.round(ny_cells/scale_factor)))\n lons_out = np.linspace(lon_min, lon_max_out, \n int(np.round(nx_cells_out/scale_factor)))\n lats_out = np.linspace(lat_min, lat_max_out, \n int(np.round(ny_cells_out/scale_factor)))\n\n # if function was 'linear_rbf' \n function = function.replace('_rbf', '')\n # make sampling square grid\n XI, YI = np.meshgrid(lons, lats)\n # apply rbf interpolation\n rbf = Rbf(lon_pts, lat_pts, values, function=function, smooth=smooth)\n ZI = rbf(XI, YI)\n # clip to original extent, rbf array flips axes, and row order... \n ZI_out = ZI[0:len(lats_out),0:len(lons_out)]\n ZI_out = np.flip(ZI_out,axis=0)\n\n #### save scipy interpolated data as raster \n pixel_size = CS * scale_factor\n # number of pixels in each direction\n x_size = len(lons_out)\n y_size = len(lats_out)\n # set geotransform info\n gt = [\n lon_min,\n pixel_size,\n 0,\n lat_max_out,\n 0,\n -pixel_size\n ]\n # make geotiff raster\n driver = gdal.GetDriverByName('GTiff')\n ds = driver.Create(\n out_file,\n x_size, \n y_size, \n 1, \n gdal.GDT_Float32, \n )\n # set projection geographic lat/lon WGS 84\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(4326)\n ds.SetProjection(srs.ExportToWkt())\n # assign spatial dimensions \n ds.SetGeoTransform(gt)\n outband = ds.GetRasterBand(1)\n # save rbf interpolated array as geotiff raster close\n outband.WriteArray(ZI_out)\n ds = None\n\n # calc residuals add to shapefile and in_path CSV, move shape to out_dir\n # only residuals for bias ratios, i.e. not for std dev, etc\n if layer in InterpGdal.default_layers:\n calc_pt_error(in_path, out_dir, layer, grid_var, \n grid_id_name=grid_id_name\n )\n # calculate zonal statistics save means for each gridMET cell\n if z_stats:\n zonal_stats(in_path, out_file, grid_id_name=grid_id_name)\n \n # plot layer's interpolated residuals as bar plot witheach Wx station \n # only produce residual plots for bias ratios, i.e. not for std dev, etc\n if res_plot and layer in InterpGdal.default_layers:\n layer = InterpGdal.var_residual_names.get(\n layer, \n layer.replace('mean','res')\n )\n y_label = 'residual (interpolated minus station value)'\n title = 'layer: {} algorithm: {} (RBF) resolution: {} deg.'.format(\n layer, function ,res)\n res_plot_dir = Path(out_dir)/'residual_plots'\n subtitle='parameters: smooth={}'.format(smooth)\n source_file = Path(out_dir)/Path(in_path).name\n\n station_bar_plot(source_file, layer, out_dir=res_plot_dir, \n y_label=y_label, title=title, subtitle=subtitle)", "def interpolating_model_DA(temp,grav,m_type='da2014'):\n # PARAMETERS # \n dir_models = basedir + '/WDModels_Koester.'+m_type+'_npy/'\n if m_type==\"pier\":\n teff=np.array([1500.,1750.,2000.,2250.,2500.,2750.,3000.,3250.,3500.,\n 3750.,4000.,4250.,4500.,4750.,5000.,5250.,5500.,6000.,\n 6500.,7000.,7500.,8000.,8500.,9000.,9500.,10000.,10500.,\n 11000.,11500.,12000.,12500.,13000.,13500.,14000.,14500.,\n 15000.,15500.,16000.,16500.,17000.,20000.,25000.,30000.,\n 35000.,40000.,45000.,50000.,55000.,60000.,65000.,70000.,\n 75000.,80000.,85000.,90000.])\n logg=np.array([6.50,7.00,7.50,7.75,8.00,8.25,8.50,9.00,9.50])\n elif m_type==\"da2014\":\n teff=np.array([6000.,6250.,6500.,6750.,7000.,7250.,7500.,7750.,8000.,\n 8250.,8500.,8750.,9000.,9250.,9500.,9750.,10000.,10100.,\n 10200.,10250.,10300.,10400.,10500.,10600.,10700.,10750.,\n 10800.,10900.,11000.,11100.,11200.,11250.,11300.,11400.,\n 11500.,11600.,11700.,11750.,11800.,11900.,12000.,12100.,\n 12200.,12250.,12300.,12400.,12500.,12600.,12700.,12750.,\n 12800.,12900.,13000.,13500.,14000.,14250.,14500.,14750.,\n 15000.,15250.,15500.,15750.,16000.,16250.,16500.,16750.,\n 17000.,17250.,17500.,17750.,18000.,18250.,18500.,18750.,\n 19000.,19250.,19500.,19750.,20000.,21000.,22000.,23000.,\n 24000.,25000.,26000.,27000.,28000.,29000.,30000.,35000.,\n 40000.,45000.,50000.,55000.,60000.,65000.,70000.,75000.,\n 80000.,90000.,100000.])\n logg=np.array([4.00,4.25,4.50,4.75,5.00,5.25,5.50,5.75,6.00,6.25,6.50,\n 6.75,7.00,7.25,7.50,7.75,8.00,8.25,8.50,8.75,9.00,9.25,\n 9.50])\n if (m_type=='pier') & (temp<1500. or temp>90000. or grav<6.5 or grav>9.5): \n return [],[]\n elif (m_type=='da2014') & (temp<6000. or temp>100000. or grav<4.0 or grav>9.5): \n return [],[]\n # INTERPOLATION #\n g1,g2 = np.max(logg[logg<=grav]),np.min(logg[logg>=grav])\n if g1!=g2: g = (grav-g1)/(g2-g1)\n else: g=0\n t1,t2 = np.max(teff[teff<=temp]),np.min(teff[teff>=temp])\n if t1!=t2: t = (temp-t1)/(t2-t1) \n else: t=0\t\n if m_type =='da2014': models = ['da%06d_%d_2.7.npy'%(i, j*100) for i in [t1,t2] \n for j in [g1,g2]]\n else: models = ['WD_%.2f_%d.0.npy'%(j, i) for i in [t1,t2] for j in [g1,g2]]\n try:\n m11, m12 = np.load(dir_models+models[0]), np.load(dir_models+models[1])\t\n m21, m22 = np.load(dir_models+models[2]), np.load(dir_models+models[3])\t\n flux_i = (1-t)*(1-g)*m11[:,1]+t*(1-g)*m21[:,1]+t*g*m22[:,1]+(1-t)*g*m12[:,1]\n return np.dstack((m11[:,0], flux_i))[0]\n except: return [],[]", "def stamps(d_data='',**kwargs):\n GR = glo.global_results()\n if p.gal_index == 'all':\n gal_indices = np.arange(GR.N_gal)\n else:\n gal_indices = p.gal_index\n\n p = copy.copy(params)\n for key,val in kwargs.items():\n setattr(p,key,val)\n\n for gal_index in gal_indices:\n\n fig, ax = plt.subplots(figsize=(8,8))\n\n gal_ob = gal.galaxy(GR=GR, gal_index=gal_index)\n simgas = gal_ob.particle_data.get_dataframe('simgas',d_data=d_data)\n map2D,lab,max_scale = make_projection_map(simgas,prop=p.prop,pix_size_kpc=p.pix_size_kpc,scale=1.5)\n\n # Plot\n ax.set_facecolor(\"black\")\n Rmax = max_scale/2\n if p.log:\n map2D[map2D < 10.**p.vmin] = 10.**p.vmin/2\n map2D[map2D > 10.**p.vmax] = 10.**p.vmax\n map2D = np.log10(map2D)\n if not p.log:\n map2D[map2D < p.vmin] = p.vmin/2\n map2D[map2D > p.vmax] = p.vmax\n im = ax.imshow(map2D,\\\n extent=[-Rmax,Rmax,-Rmax,Rmax],vmin=p.vmin,vmax=p.vmax,cmap=p.cmap)\n Rmax = p.R_max\n ax.set_xlim([-Rmax,Rmax])\n ax.set_ylim([-Rmax,Rmax])\n ax.text(0.05,0.05,'G%i' % gal_index,\\\n fontsize=55,transform=ax.transAxes,color='white')\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n ax.set_aspect('equal')\n\n #plt.gca().set_axis_off()\n #plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, \n # hspace = 0, wspace = 0)\n #plt.margins(0,0)\n #plt.gca().xaxis.set_major_locator(plt.NullLocator())\n #plt.gca().yaxis.set_major_locator(plt.NullLocator())\n if not os.path.isdir(p.d_plot + 'sim_data/stamps/'): os.mkdir(p.d_plot + 'sim_data/stamps/') \n plt.savefig(p.d_plot + 'sim_data/stamps/%s%s_G%i.png' % (p.sim_name,p.sim_run,gal_index),\\\n bbox_inches = 'tight', pad_inches = 0)" ]
[ "0.5963468", "0.59558034", "0.5918713", "0.589341", "0.5847284", "0.58300716", "0.5703463", "0.5690351", "0.5650076", "0.5646748", "0.56256956", "0.55975646", "0.5596329", "0.5573423", "0.55663466", "0.55646265", "0.5511411", "0.55077195", "0.5489869", "0.54598117", "0.5450311", "0.54183257", "0.54062396", "0.5401389", "0.53804684", "0.5379006", "0.53418905", "0.5341473", "0.53231657", "0.53207844", "0.53180736", "0.530777", "0.5291084", "0.52750593", "0.52524877", "0.52524805", "0.5248287", "0.5247466", "0.5245833", "0.5242242", "0.52342016", "0.5232627", "0.5224761", "0.52233374", "0.5218842", "0.5217346", "0.5212071", "0.521084", "0.5203079", "0.51866645", "0.5182905", "0.5165376", "0.5163047", "0.5162325", "0.51577514", "0.51575476", "0.51491517", "0.5149011", "0.5147609", "0.51373905", "0.5130344", "0.5128186", "0.5123508", "0.5116181", "0.5114735", "0.5109679", "0.5108474", "0.5107884", "0.5106447", "0.5104243", "0.50957733", "0.5095492", "0.50870675", "0.50830495", "0.50800985", "0.50788385", "0.50766855", "0.507494", "0.5061517", "0.50512284", "0.5050664", "0.5048307", "0.5032752", "0.502368", "0.50216216", "0.5019701", "0.50175", "0.5012098", "0.5008966", "0.5007142", "0.4997125", "0.49920642", "0.4991789", "0.4986874", "0.49857494", "0.49822932", "0.49792442", "0.49782515", "0.49728486", "0.4971696", "0.49710944" ]
0.0
-1
Keep trying random points until it's mirror is far enough away
def __generate_spawn_points(self): while True: p1x = random.randint(0, self.width - 1) p1y = random.randint(0, self.height - 1) p2x, p2y = self.__mirror(p1x, p1y) d_sq = (p1x - p2x)**2 + (p1y - p2y)**2 if d_sq >= (self.width / 2)**2: break return (p1x, p1y), (p2x, p2y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def random(self):\n adj = self.adjacent()\n self.switch(random.choice([pos for pos in adj if self.in_grid(pos) and pos != self.prev]))", "def bc19_4_radius(randomize=False):\n moves = [\n Point(0, 1),\n Point(1, 0),\n Point(0, -1),\n Point(-1, 0),\n Point(1, 1),\n Point(1, -1),\n Point(-1, 1),\n Point(-1, -1),\n Point(2, 0),\n Point(0, 2),\n Point(0, -2),\n Point(-2, 0)\n ]\n\n if randomize:\n return random.shuffle(moves)\n else:\n return moves", "def choose_next(self, round):\n return random.choice(self.possible_coords)", "def random_walker_generator(rows, cols, negative=False):\n attempts = 0\n while True:\n steps = 0\n found_goal = False\n grid = np.zeros((rows, cols))\n # start on bottom row\n current = (rows - 1, random.randint(0, cols - 1))\n grid[current] = 1\n steps += 1\n visited = set(current)\n\n connection = 0\n\n neighbors = get_neighbors(current, grid, visited, similar_cells={1})\n while len(neighbors) > 0:\n for (neigh_x, neigh_y) in set(neighbors):\n # lookahead for neighbors neighbors\n lookahead = get_neighbors(\n (neigh_x, neigh_y), grid, visited, similar_cells={1})\n if len(lookahead) < 3: # contains neighbors with 1's\n # edge cases\n if neigh_x == 0 and random.random() >= 0.25:\n # chance of reaching goal at top\n continue\n elif ((neigh_y == 0 or neigh_y == rows - 1) and\n len(lookahead) == 2):\n continue\n else:\n neighbors.remove((neigh_x, neigh_y))\n\n if len(neighbors) == 0:\n # print (\"no more neighbors to pick\")\n break\n\n # time.sleep(0.15)\n # os.system(\"clear\")\n # draw_grid(grid)\n\n current = random.sample(neighbors, 1)[0] # pick a random neighbor\n # print (\"selected: \", current)\n grid[current] = 1\n steps += 1\n visited.add(current)\n if current[0] == 0: # top row\n # print (\"top row reached\")\n found_goal = True\n break\n neighbors = get_neighbors(current, grid, visited, similar_cells={1})\n\n if (found_goal and not negative) or (not found_goal and negative):\n # print (\"Succeeded after %d attempts\" % attempts)\n attempts = 0\n grid = apply_noise(grid)\n\n # hack\n # override above step counter, because the random noise\n # might have added more, shorter connections\n # we do this because network was picking up patterns\n # from making random noise not entirely random\n steps, connected = check_connections_length(grid)\n if connected and negative:\n continue\n\n # randomly flip grid upside down\n if random.random() <= 0.5:\n grid = np.flipud(grid)\n\n yield grid, steps, connected\n else:\n attempts += 1", "def random_step(self):\n pos = [i for i in range(9) if self.grid[i] == 0]\n move = random.choice(pos)\n return self.step(move)", "def random_step(self):\n pos = [i for i in range(9) if self.grid[i] == 0]\n move = random.choice(pos)\n return self.step(move)", "def targetpoint(self, initpoint):\n while True:\n col = int(random.uniform(0, COLS))\n row = int(random.uniform(0, ROWS))\n if (row, col) != initpoint:\n break\n return (row, col)", "def __random_movement(self):\n\t\tself.__steps += 1 \t\t# Increment after every frame\n\t\t# When __steps greater than threshold reverse the direction\n\t\t# and set threshold to a new random value\n\t\tif self.__steps >= self.__threshold_steps:\t\n\t\t\tif self.direction == 'RIGHT':\n\t\t\t\tself.move_left()\n\t\t\t\tself.direction = 'LEFT'\n\t\t\telse:\n\t\t\t\tself.move_right()\n\t\t\t\tself.direction = 'RIGHT'\n\t\t\tself.__threshold_steps = random.randint(25,50)\n\t\t\tself.__steps = 0\n\t\t# Confines the Donkeys movement to within the boundary \n\t\tself.__check_boundary()", "def move(self):\n if self._z >= 75:\n a = random.random()\n print(str(a))\n if a < 0.2:\n self._z += 1\n if a > 0.2 and a < 0.9:\n self._z -= 1\n if a > 0.9:\n self._z = self._z\n else: \n self._z -= 1\n \n b = random.random()\n print(str(b))\n if b < 0.1:\n self._y += 1\n if b > 0.1 and b < 0.2:\n self._y -= 1\n if b > 0.2 and b < 0.25:\n self._x -= 1\n if b > 0.25:\n self._x += 1", "def simulate_rerolling(p: float, n: int) -> int:\n\n counter = 0\n new_n = n\n while new_n > 0:\n for _ in range(new_n):\n ran = random.random()\n if ran < p:\n new_n -= 1\n counter += 1\n return counter", "def MoveRandom(self):\n r = random.randint(0,3)\n if r == 0: self.x += 1\n elif r == 1: self.y += 1\n elif r == 2: self.x -= 1\n elif r == 3: self.y -= 1", "def make_random_move(self):\n s=set()\n for i in range(self.height):\n for j in range(self.width):\n s.add((i,j))\n\n s=s-self.mines-self.moves_made\n if s==set(): return None\n return random.choice(list(s))\n #raise NotImplementedError", "def topology_random_reconnect(self, probability):\n\t\tfor i in range(len(self.sites)):\n\t\t\tfor j in range(len(self.sites)):\n\t\t\t\tif (i != j) and (self.sites[j] in self.sites[i].neighbors):\n\t\t\t\t\tif numpy.random.rand() < probability / 2.0:\n\t\t\t\t\t\tchoice_list = [s for s in self.sites if not (s in self.sites[i].neighbors)]\n\t\t\t\t\t\tif len(choice_list) > 0:\n\t\t\t\t\t\t\tchoosed = numpy.random.choice(choice_list)\n\t\t\t\t\t\t\tself.sites[i].neighbors.remove(self.sites[j])\n\t\t\t\t\t\t\tself.sites[j].neighbors.remove(self.sites[i])\n\t\t\t\t\t\t\tself.sites[i].neighbors.append(choosed)\n\t\t\t\t\t\t\tchoosed.neighbors.append(self.sites[i])", "def topology_random_connect(self, probability):\n\t\tfor i in range(len(self.sites) - 1):\n\t\t\tfor j in range(i + 1, len(self.sites)):\n\t\t\t\tif not (self.sites[j] in self.sites[i].neighbors):\n\t\t\t\t\tif numpy.random.rand() < probability:\n\t\t\t\t\t\tself.sites[i].neighbors.append(self.sites[j])\n\t\t\t\t\t\tself.sites[j].neighbors.append(self.sites[i])", "def mutate_point_circ(mutated_genome):\n seed = random.randint(0,3)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if seed == 0:\n move_point_circ(mutated_genome,index)\n elif seed == 1:\n shift_point_circ(mutated_genome,index)\n elif seed == 2:\n move_radius_circ(mutated_genome,index)\n else: #seed == 3:\n shift_radius_circ(mutated_genome,index)", "def shuffle(self):\n self.turn_by_deg(20)\n time.sleep(.25)\n self.fwd()\n time.sleep(1)\n self.stop()\n self.back()\n time.sleep(1)\n self.stop()\n self.turn_by_deg(-40)\n time.sleep(.25)\n self.fwd()\n time.sleep(1)\n self.back()\n time.sleep(1)\n self.stop()", "def shuffle_points(mutated_genome,index):\n random.shuffle(mutated_genome[index][2])", "def RandomCoordinate(): \r\n return ReturnRounded(np.random.uniform(-10,10))", "def adjacent_linear(randomize=False):\n moves = [\n Point(0, 1),\n Point(1, 0),\n Point(0, -1),\n Point(-1, 0)\n ]\n\n if randomize:\n return random.shuffle(moves)\n else:\n return moves", "def random_adjacent_tile(self):\n adj = self.adjacent()\n pos_list = [pos for pos in adj if self.in_grid(pos) and pos != self.prev]\n return random.choice(pos_list)", "def _sample_epislon(self, cur_y, cur_z):\n old_loglik = self._loglik(cur_y, cur_z)\n old_epislon = self.epislon\n \n # modify the feature ownership matrix\n self.epislon = np.random.beta(1,1)\n new_loglik = self._loglik(cur_y, cur_z)\n move_prob = 1 / (1 + np.exp(old_loglik - new_loglik));\n if random.random() < move_prob:\n pass\n else:\n self.epislon = old_epislon", "def shuffle(self): \n for x in range(12):\n self.right(primary=-60, counter=0)\n time.sleep(.1)\n self.left(primary=-60, counter=0)\n time.sleep(.1)\n self.stop()", "def update(self):\n if self.x<0:\n self.x = 0\n\n if self.y <0:\n self.y = 0\n\n if bool(randint(0, 1))==True:\n if self.walker == True:\n self.x += randint(-2, 2)\n self.y += randint(-2, 2)", "def on_epoch_end(self):\r\n if self.shuffle:\r\n np.random.shuffle(self.pathways)", "def mutate_point_rect(mutated_genome):\n seed = random.randint(0,1)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if seed == 0:\n move_point_rect(mutated_genome,index)\n else: #seed == 1:\n shift_point_rect(mutated_genome,index)", "def make_random_move(self):\n #completely random move\n all_moves = set(itertools.product(range(self.height), range(self.width)))\n moves_left = list(all_moves - self.mines - self.moves_made)\n if not moves_left:\n return None\n return random.choice(moves_left)", "def mutate_random(self, point, population):\n other = Point(self.model.generate())\n other.evaluate(self.model)\n while other in population or other == point:\n other = Point(self.model.generate())\n other.evaluate(self.model)\n return other", "def randomMove(board):\r\n go = True\r\n while go:\r\n y = random.randint(0, board.size - 1)\r\n x = random.randint(0, board.size - 1)\r\n go = not board.validMove((y, x))\r\n return (y, x)", "def randomConnect(self):\n if self.Nc == 0:\n return\n else:\n possible_pairs = np.vstack(np.triu_indices(self.numMonomers,k=2)).T\n Nl = len(possible_pairs)\n selected = possible_pairs[np.random.choice(Nl,size=self.Nc,replace=False)].T\n self.connect(selected)", "def random_location(self):\r\n\r\n while True:\r\n pt = (random.uniform(self.worldbox.tl[0], self.worldbox.br[0]),\r\n random.uniform(self.worldbox.tl[1], self.worldbox.br[1]))\r\n if not self.is_wall(pt) and not self.is_target(pt):\r\n return pt", "def random_walk(n):\n x,y = 0,0\n for i in range(n):\n (dx,dy) = random.choice([(0,1),(1,0),(0,-1),(-1,0)])\n x += dx\n y+=dy\n return(x,y)", "def shuffle_if_needed(self):\n if self.index >= len(self.wallpapers):\n self.shuffle()\n return True\n else:\n return False", "def randomcorners():\n r = lambda x: random.randint(int(x*0.4), int(x*0.6))\n cx = r(gs.DEFAULTS['width'])\n cy = r(gs.DEFAULTS['height'])\n\n w = int(gs.DEFAULTS['width'] * random.random() * 0.2)\n h = int(gs.DEFAULTS['height'] * random.random() * 0.2)\n\n rcrns = [(cx-w, cy-h), (cx+w, cy-h), (cx+w, cy+h), (cx-w, cy+h)]\n random.shuffle(rcrns)\n\n return rcrns", "def brute_force(savedPnts, unitRadius, point):\n for pnt in savedPnts:\n d = distance(pnt, point)\n if d < unitRadius: return False\n return True", "def random_walk(lgca):\n # disarrange(lgca.nodes, axis=-1)\n relevant = (lgca.cell_density[lgca.nonborder] > 0) & \\\n (lgca.cell_density[lgca.nonborder] < lgca.K)\n coords = [a[relevant] for a in lgca.nonborder]\n for coord in zip(*coords):\n npr.shuffle(lgca.nodes[coord])", "def cointoss():\n return random.random() < 0.5", "def move(self):\n if random.random() < 0.5:\n self.y = (self.y + 1) % 100\n else:\n self.y = (self.y - 1) % 100\n if random.random() < 0.5:\n self.x = (self.x + 1) % 100\n else:\n self.x = (self.x - 1) % 100", "def bc19_9_radius(randomize=False):\n moves = [\n Point(0, 1),\n Point(1, 0),\n Point(0, -1),\n Point(-1, 0),\n Point(1, 1),\n Point(1, -1),\n Point(-1, 1),\n Point(-1, -1),\n Point(2, 0),\n Point(0, 2),\n Point(0, -2),\n Point(-2, 0),\n Point(0, 3),\n Point(1, 2),\n Point(2, 1),\n Point(3, 0),\n Point(2, -1),\n Point(1, -2),\n Point(0, -3),\n Point(-1, -2),\n Point(-2, -1),\n Point(-3, 0),\n Point(-2, 1),\n Point(-1, 2),\n Point(2, 2),\n Point(2, -2),\n Point(-2, 2),\n Point(-2, -2)\n ]\n\n if randomize:\n return random.shuffle(moves)\n else:\n return moves", "def test_full_setup(n):\n for x in range(n):\n for y in range(n):\n Stitch(x,y)\n Stitch.stitches[(x,y)].vital = True if round(rnd.random()) == 1 else False", "def uniform_random(self) -> None:\n\n size = self.circ_size\n random.seed(self.seed)\n\n gates = [self.h, self.x, self.y, self.z, self.s, self.t, self.cx]\n candidates = set(range(size))\n\n for i in range(size):\n for j in range(size):\n to_apply = random.choice(gates)\n\n num_qubits = 2 if to_apply == self.cx else 1\n targets = random.sample(candidates, num_qubits)\n to_apply(*targets)\n\n if self.meas: self.measure(self.qr, self.cr)", "def generate(self):\n for i in range(4):\n random_first = randomize_first_box()\n self.randomize(random_first)\n for i in range(9):\n random_pos = randomize_position()\n self.randomize(random_pos)\n self.board.solve()", "def stream():\n while True:\n yield random_point()", "def switch_points(mutated_genome,index):\n point_index1 = random.randint(0,max(0,len(mutated_genome[index][2])-1))\n point_index2 = random.randint(0,max(0,len(mutated_genome[index][2])-1))\n temp = mutated_genome[index][2][point_index1]\n mutated_genome[index][2][point_index1] = mutated_genome[index][2][point_index2]\n mutated_genome[index][2][point_index2] = temp", "def totem_random():\n random_head()\n random_head()\n random_head()", "def move_random(self, board):\n self.get_moves(board.board)\n return random.choice(self.available_moves)", "def random_points_ascending_hillclimber(house, all_houses, waters, total_value_map):\n total_value_map_NEW = total_value_map\n\n # check in welke range het huis geplaats kan worden, niet kijkend naar water of andere \n rangex = MAXIMUM_WIDTH - house.width\n rangey = MAXIMUM_HEIGHT - house.length\n\n for x in range(100):\n # maak random x en y coördinaat\n randomizex = rangex * random()\n randomizey = rangey * random()\n\n # bewaar oude locaties\n tempx = house.bottom_left[0]\n tempy = house.bottom_left[1]\n \n # verander locatie\n bottom_left = (randomizex,randomizey)\n house.location(bottom_left)\n\n # als je je huis op nieuwe locatie kan plaatsen\n if place_house(house, all_houses, waters) == True:\n # bereken nieuw waarde map, waarin huis is verplaatst\n total_value_map_temp = 0\n for item in all_houses.values():\n for house in item:\n house.extra_meters()\n total_value_map_temp += house.totalprice()\n\n # als waarde met nieuwe locatie hoger is, verander deze\n if total_value_map_NEW < total_value_map_temp:\n total_value_map_NEW = total_value_map_temp\n # als waarde niet hoger is verander naar oude locatie en bereken weer totale waarde map\n else:\n bottom_left = (tempx,tempy)\n house.location(bottom_left)\n if place_house(house, all_houses, waters) == True:\n for item in all_houses.values():\n for houses in item:\n houses.extra_meters()\n houses.totalprice()\n # als huis niet geplaats kan worden, verander naar oude locatie en bereken weer totale waarde map\n else:\n bottom_left = (tempx,tempy)\n house.location(bottom_left)\n if place_house(house, all_houses, waters) == True:\n for item in all_houses.values():\n for houses in item:\n houses.extra_meters()\n houses.totalprice()\n\n return all_houses, total_value_map_NEW", "def __random_pickup(self, guess):\n already_clustered = guess.sum(axis=0)\n while True:\n p1 = random.randint(0, guess.shape[1] - 1)\n p2 = random.randint(0, guess.shape[2] - 1)\n if not already_clustered[p1, p2]:\n return (p1, p2)", "def move(self):\n self._move_range_shuffle(3)\n self._move_satisfy_random_constraint()\n # self._move_range_shuffle(3)\n #if (curr_energy > 50):\n # self._move_satisfy_random_constraint()\n #else:\n # self._move_range_shuffle(3)", "def shuffle_pos(self, ):\n x, y = 0, 0\n while self.maze.structure[int(y / 40)][int(x / 40)] != \"0\" \\\n or (x, y) in self.forbiden_tulpes:\n x = random.randint(0, 14) * sprite_size\n y = random.randint(0, 14) * sprite_size\n self.forbiden_tulpes.append((x, y))\n return x, y", "def pull(self):\n chance = np.random.uniform()\n return chance < self.winning_prob", "def move_randomly(self, with_fight=False):\n delta = [(-1, -1), (-1, 0), (-1, 1), (0, 1), (0, -1), (1, -1), (1, 0), (1, 1)]\n rd.shuffle(delta)\n x, y = self.owner.pos\n while len(delta) > 0:\n dx, dy = delta.pop()\n if self.move_towards_position((x + dx, y + dy)):\n return", "def step():\n x_rand = sample()\n x_nearest = new_nearest_neighbour(x_rand)\n x_new = steer(x_nearest, x_rand)\n if obstacle_free(x_nearest, x_new):\n X_near = new_neighbourhood(x_new)\n x_min = x_nearest\n c_min = x_nearest.cost + x_nearest.dist_to(x_new)\n for x_near in X_near:\n if obstacle_free(x_near, x_new) and (x_near.cost + x_near.dist_to(x_new) < c_min):\n x_min = x_near\n c_min = (x_near.cost + x_near.dist_to(x_new) < c_min)\n x_new_node = add_node(x_new, x_min, True)\n for x_near in X_near:\n if obstacle_free(x_near, x_new) and (x_new_node.cost + x_near.dist_to(x_new) < x_near.cost):\n x_near.change_parent(x_new_node)\n # Here I check for goal paths and draw the circle\n updated = False\n if shared.root_path:\n updated = goal_path_resolve(shared.root_path[0])\n updated = updated or goal_path_resolve(shared.nodes[-1])\n if updated:\n diameter = shared.root_path_length\n center = ((shared.root_path[0].x + shared.root_path[-1].x) / 2,\n (shared.root_path[0].y + shared.root_path[-1].y) / 2)\n if shared.region:\n shared.region.remove_from_batch()\n shared.region = ellipse.Ellipse(center[0], center[1], diameter)\n shared.region.add_to_batch()", "def step_solution(self):\n import time, random\n time.sleep(1.0)\n print '(step_solution) Implement me!'\n return True if random.random() < 0.25 else False", "def new_and_near(self):\n if self.prob and random.random() < self.prob:\n x_rand = self.goal_config\n else:\n x_rand = self.planning_env.sample_free()\n x_nearest_id, x_nearest = self.tree.GetNearestVertex(x_rand)\n x_new = self.steer(x_nearest, x_rand)\n # check if new point is in X_free and not already in V\n # if x_new in self.tree.vertices or not self.planning_env.state_validity_checker(x_new):\n if x_new in self.tree.vertices or not self.planning_env.collision_free(x_new, x_nearest):\n return None, None\n\n self.tree.samples_taken += 1\n return x_new, x_nearest", "def choose_pos(self):\n s = self\n\n availablepos = []\n for dblock in s.pjs.dblocks:\n is_available = True\n\n for powerup in s.pjs.powerups:\n if powerup.rects[0].overlap(dblock.rects[0]):\n is_available = False\n break\n\n if is_available:\n availablepos.append(dblock.rpos)\n\n pos = random.randint(0, len(availablepos) - 1)\n s.rpos = availablepos[pos]", "def wait(self):\n self.set_vals(spin=.2)\n nearest_deg = 0\n nearest_deg_dist = self.perim_dist + 1\n for i, x in enumerate(self.ranges):\n if (x != 0) and (x < nearest_deg_dist):\n nearest_deg = i\n nearest_deg_dist = x\n if nearest_deg_dist < self.perim_dist:\n nearest_deg = ((nearest_deg + 180) % 360) - 180\n self.center(degree=nearest_deg)\n self.current_state = \"follow\"", "def direction_correction(self):\n self.directions.monster = random.uniform(self.directions.monster * self.get_monster_sensitivity(),\n self.directions.monster * (1 + (1 - self.get_monster_sensitivity())))\n self.directions.food = random.uniform(self.directions.food * self.get_food_sensitivity(),\n self.directions.food * (1 + (1 - self.get_food_sensitivity())))\n self.directions.water = random.uniform(self.directions.water * self.get_water_sensitivity(),\n self.directions.water * (1 + (1 - self.get_water_sensitivity())))", "def run(self):\n cell = self.grid.random_cell()\n unvisited = self.grid.size() - 1\n while unvisited > 0:\n neighbor = random.choice(cell.neighbors())\n\n if list(neighbor.links_list()) == []:\n cell.link(neighbor)\n unvisited -= 1\n\n cell = neighbor\n\n return self.grid", "def random_walk_2(n):\n x,y=0,0\n for i in range(n):\n dx,dy = random.choice([(0,1), (0,-1),(1,0) ,(-1,0)])\n x+= dx\n y+= dy\n return (x,y)", "def MovePoint(pos,points,s=0.5, k = 1000): \n \n x = [pos[0]] \n y = [pos[1]]\n rdm.seed(1) # comment out if you want randomness\n for i in range(k):\n n = len(points)\n r = int(rdm.random()*n)\n \n x.append(x[i] + (points[r][0] - x[i]) * s)\n y.append(y[i] + (points[r][1] - y[i]) * s)\n\n \n return(x,y)", "def prepare_next_turn(grid):\n\tempties = get_empty_cells(grid)\n\ty,x = random.choice(empties)\n\tgrid[y][x] = 2 if random.random() < prob_2 else 4\n\treturn any_possible_moves(grid)", "def new_tile(self):\n while True:\n random_row = random.randrange(self._grid_height)\n random_column = random.randrange(self._grid_width)\n if self._grid[random_row][random_column] == 0:\n self._grid[random_row][random_column] = random.choice([2] * 9 + [4])\n break", "def neighbor(R, L, p):\n Rp = []\n for k in range(0,len(R)):\n epsilon = random.random()\n if (epsilon > p):\n Rp.append(R[k])\n else:\n randpoint = randVacantPoint(L)\n # update L according to the random\n # vacant point previously found.\n L[randpoint[0]][randpoint[1]]=1\n L[R[k][0]][R[k][1]]=0\n\n Rp.append(randpoint)\n \n return Rp", "def changeState(self, xyPoints):\n nPts = len(xyPoints)\n ind0 = random.randint(1, nPts-1)\n ind1 = random.randint(1, nPts-1)\n while ind1 == ind0:\n ind1 = random.randint(1, nPts-1)\n # make copy of the sources to make sure the swap works correctly\n xyPoints[ind0], xyPoints[ind1] = tuple(xyPoints[ind1]), tuple(xyPoints[ind0])", "def rs():\n return random.choice([-1,1])", "def rs():\n return random.choice([-1,1])", "def run(self):\n for cell in self.grid.each_cell():\n neighbors = []\n if cell.north:\n neighbors.append(cell.north)\n if cell.east:\n neighbors.append(cell.east)\n if neighbors:\n neighbor = random.choice(neighbors)\n if neighbor:\n cell.link(neighbor)\n return self.grid", "def __generate_point_based_on_prob(self) -> Point:\n possible = False\n while not possible:\n # make the random decision based on a distribution (hot spots / different probabilities)\n prob_list = self.probability_distribution_grid.flatten()\n selected_index = np.random.choice(\n np.arange(0, len(prob_list)), p=prob_list)\n\n # get the indices of the cell (from the one array index)\n # width is the number of cells in x directions (it starts with cell 0/0) and is needed due to row-major order\n cell_x = int(selected_index % self.occupancy_map.info.width)\n cell_y = int(selected_index / self.occupancy_map.info.width)\n\n # get the real world coordinates (which represents the center of the cell)\n x = self.occupancy_map.info.origin.position.x + \\\n (cell_x + 0.5) * self.occupancy_map.info.resolution\n y = self.occupancy_map.info.origin.position.y + \\\n (cell_y + 0.5) * self.occupancy_map.info.resolution\n\n # Check if the actual cell is free of STATIC obstacles (not occupied)\n if not self.__cell_is_occupied(cell_x, cell_y):\n # Check for not occupied neighbors (the robot needs some space the reach it)\n if not self.__has_occupied_neighbors(cell_x, cell_y):\n # If actual spawning of dirt is enabled, then it should also be ensured that no other dirt object is already\n # at this position, because spawning a model in the same location of an already existing model can lead to problems\n if not self.prevent_duplicates or not self.__check_for_duplicates(Point(x, y, 0.0)):\n possible = True\n else:\n rospy.loginfo(\"*** WRAPPER MESSAGE ***\\n\\n\\tGenerated dirt at (%.2f | %.2f) was refused due to already \"\n \"active dirt at this position.\\n\\tGenerating next one...\\n\" % (x, y))\n else:\n rospy.loginfo(\"*** WRAPPER MESSAGE ***\\n\\n\\tGenerated dirt at (%.2f | %.2f) was refused due to occupied neighbor \"\n \"cells.\\n\\tGenerating next one...\\n\" % (x, y))\n else:\n rospy.loginfo(\"*** WRAPPER MESSAGE ***\\n\\n\\tGenerated dirt at (%.2f | %.2f) was refused due to occupied cell.\"\n \"\\n\\tGenerating next one...\\n\" % (x, y))\n return Point(x=x, y=y, z=0.0)", "def possibleMovements(self,numIterations:int=50)->list[tuple]:\n x=random.randint(0,self._side-1); y=random.randint(0,self._side-1)\n possible_positions=[]\n positionsCovered=[(x,y)]\n for _ in range(numIterations):\n if x+2<self._side and y+1<self._side:\n possible_positions.append((x+2,y+1))\n \n if x+2<self._side and y-1<self._side and y-1>0:\n possible_positions.append((x+2,y-1))\n \n if x-2<self._side and y+1<self._side and x-2>0:\n possible_positions.append((x-2,y+1))\n \n if x-2<self._side and y-1<self._side and x-2>0 and y-1>0:\n possible_positions.append((x-2,y-1)) \n\n if x+1<self._side and y+2<self._side:\n possible_positions.append((x+1,y+2))\n \n if x+1<self._side and y-2<self._side and y-1>0:\n possible_positions.append((x+1,y-2))\n\n if x-1<self._side and y+2<self._side and x-1>0:\n possible_positions.append((x-1,y+2))\n \n if x-1<self._side and y-2<self._side and x-1>0 and y-2>0:\n possible_positions.append((x-1,y-2))\n\n newX,newY=random.choice(possible_positions) #choose randomly among the possible positions,and then repeat this \n x,y=newX,newY\n positionsCovered.append((newX,newY)) \n\n return positionsCovered", "def random_pose(self):\n position = self._start\n while self[position].distance < np.sum(self._rooms.shape) * 2:\n position = np.array(\n [random.randrange(limit) for limit in self._rooms.shape]\n )\n direction = random.choice(self.exits(position))\n return (position, direction)", "def randVacantPoint(L):\n pliste = vacantPoint(L)\n\n return pliste[random.randint(0, len(pliste)-1)]", "def new_tile(self):\r\n # check if is zero or not\r\n new_tile_added = False\r\n # a list to 2 90% of the time and 4 10% of the time\r\n new_tile_list = [2,2,2,2,2,2,2,2,2,4]\r\n counter = 0\r\n while not new_tile_added:\r\n row_position = random.randrange(0,self.grid_height)\r\n col_position = random.randrange(0,self.grid_width)\r\n if self.grid[row_position][col_position] == 0:\r\n self.grid[row_position][col_position] = random.choice(new_tile_list)\r\n new_tile_added = True\r\n if counter > self.grid_width * self.grid_height:\r\n print 'you failed'\r\n break\r\n\r\n counter +=1", "def mutate_point_poly(mutated_genome):\n seed = random.randint(0,7)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if len(mutated_genome[index][2]) < 3: seed = 0\n if seed == 0:\n insert_point(mutated_genome,index)\n elif seed == 1:\n remove_point(mutated_genome,index)\n elif seed == 2:\n switch_points(mutated_genome,index)\n elif seed == 3:\n shuffle_points(mutated_genome,index)\n elif seed == 4:\n move_point(mutated_genome,index)\n elif seed == 5:\n shift_point(mutated_genome,index)\n elif seed == 6:\n increment_point(mutated_genome,index)\n else: #seed == 7:\n decrement_point(mutated_genome,index)", "def random_position(self):\n while True:\n h = random.randrange(0, self.height)\n w = random.randrange(0, self.width)\n if self.grid[h, w] == 0:\n return (h, w)", "def _move_satisfy_random_constraint(self):\n secure_random = random.SystemRandom()\n done = False\n while not done:\n c = secure_random.choice(self.constraints)\n if self._is_constraint_violated(c):\n done = True\n # swap 2 wizards to move closer\n self._swap_wizards(c[random.randint(0, 1)], c[2])\n # with probability 0.5, swap the two border wizards\n if random.randint(0, 1) == 1:\n self._swap_wizards(c[0], c[1])\n if not done: print(\"Nothing to do...\")", "def random_walk(turtle, distance, steps):\n turtle.color(randcolor(), randcolor())\n for step in range(0,steps):\n random_move(turtle, distance)\n gohome(turtle)", "def randomMove(self, game):\n #time.sleep(0.25)\n return random.choice(game.get_all_legal_moves())", "def takeNaiveMove():\r\n\tnotFound=True\r\n\twhile notFound:\r\n\t\tmove=random.randint(1,9)\r\n\t\tif validMove(move):\r\n\t\t\tnotFound=False\r\n\treturn move", "def reroll(self):\n self._base_x = tf.random.uniform(\n (self._sample_count,),\n self.base_point_distribution._x_min,\n self.base_point_distribution._x_max\n )\n self._base_y = tf.random.uniform(\n (self._sample_count,),\n self.base_point_distribution._y_min,\n self.base_point_distribution._y_max\n )", "def fill(self, random, matches_allowed=True, reset_cycles=True):\n if reset_cycles:\n self.cycles_remaining = 10000\n for y in range(self.height):\n for x in range(self.width):\n if self[x][y] is None:\n return self.fill_space(x,\n y,\n random,\n matches_allowed)\n return True", "def click_aim(self, pos):\n x, y = pos\n if (self.x - x) ** 2 + (self.y - y) ** 2 <= self.r ** 2:\n self.color = random.choice(COLORS)\n self.x = randint(100, 1000)\n self.y = randint(100, 800)\n self.r = randint(50, 100)\n self.speed_x = randint(-200, 200)\n self.speed_y = randint(-200, 200)\n return True\n else:\n return False", "def _generate_pores(self):\n logger.info(\"Place randomly located pores in the domain\")\n #Original Random Point Generator\n #coords = sp.rand(self._Np,3)*[self._Lx,self._Ly,self._Lz]\n #Seeding Code\n coords = np.zeros([self._Np,3])\n #reject points close to boundaries - if False there will be slightly more\n rejection = [False,False,True]\n for j in range(3):\n i = 0\n while i < self._Np:\n coord = np.random.uniform(0,1,1)\n if self._reject(coord) == rejection[j]:\n coords[i][j]=coord\n i += 1\n coords*=np.array([self._Lx,self._Ly,self._Lz])\n #Seeding Code\n #Uniform Random Generator\n #coords = np.array([np.random.uniform(0,self._Lx,self._Np),np.random.uniform(0,self._Ly,self._Np),np.random.uniform(0,self._Lz,self._Np)]).T\n\n self['pore.coords'] = coords\n logger.debug(\"End of method\")", "def random_walk_2(n):\r\n x, y = 0, 0\r\n for i in range(n):\r\n (dx, dy) = random.choice([(0, 1), (0, -1), (1, 0), (-1, 0)])\r\n \r\n x += dx\r\n y += dy\r\n return (x, y)", "def random_move(self):\n available_idx = self.get_empty_cells(self.game_board)\n return random.choice(available_idx)", "def _move_randomly(self):\n a, b = randint(0, len(self.state) - 1), randint(0, len(self.state) - 1)\n wiz1, wiz2 = self.state[a], self.state[b]\n self._swap_wizards(wiz1, wiz2)", "def randomize_trajectory(self):\n self.angle = randint(-360, 360)\n self.speed = randint(1, 5)/2.5", "def _pick_random_angles_(self):\n movej_q = self._q_ref.copy()\n while True:\n reset_angles = self._rand_obj_.uniform(self._angles_low, self._angles_high)\n movej_q[self._joint_indices] = reset_angles\n inside_bound, inside_buffer_bound, mat, xyz = self._check_bound(movej_q)\n if inside_buffer_bound:\n break\n return reset_angles, xyz", "def random_walk(n, p):\n random_array = np.random.uniform(0, 1, n)\n left = random_array[random_array > p].size\n right = n - left\n \n return (right-left)", "def get_random_points(n=5, scale=0.8, mindst=None, rec=0):\n mindst = mindst or .7 / n\n a = np.random.rand(n, 2)\n d = np.sqrt(np.sum(np.diff(ccw_sort(a), axis=0), axis=1) ** 2)\n if np.all(d >= mindst) or rec >= 200:\n return a * scale\n else:\n return get_random_points(n=n, scale=scale, mindst=mindst, rec=rec + 1)", "def run(self):\n move_cmd = Twist()\n move_cmd.linear.x = 0\n move_cmd.angular.z = 0\n\n while not rospy.is_shutdown():\n # bump logic as previous psets\n if self.bump:\n self.bump = False\n # move backwards\n move_cmd.linear.x = LIN_SPEED * -1\n for i in range(5):\n self.cmd_vel.publish(move_cmd)\n self.rate.sleep()\n rospy.sleep(1)\n\n # turn randomly in a random direction\n move_cmd.linear.x = 0\n move_cmd.angular.z = ROT_SPEED * ((-1)**random.randint(1,2))\n\n if self.bump == 0:\n move_cmd.angular.z = ROT_SPEED * (-1)\n elif self.bump == 2:\n move_cmd.angular.z = ROT_SPEED\n\n for i in range(random.randint(5,15)):\n self.cmd_vel.publish(move_cmd)\n self.rate.sleep()\n rospy.sleep(1)\n\n move_cmd.angular.z = 0\n # if somethin in the screen is really close\n elif self.min_val < MIN_THRESHOLD:\n # make sure it's not the sock/leg warmer, and is actually an obstacle\n if self.obstacle_x <= self.x or self.obstacle_x >= self.x + self.w or abs(self.min_val - self.dist) > 0.1:\n move_cmd.linear.x = 0\n # turn away\n if self.obstacle_x > 320:\n move_cmd.angular.z = ROT_SPEED / 2\n else:\n move_cmd.angular.z = -ROT_SPEED / 2\n # self.min_val = 100\n for i in range(10):\n self.cmd_vel.publish(move_cmd)\n self.rate.sleep()\n self.last_move = rospy.Time.now()\n else:\n rospy.loginfo(\"Perimeter \" + str(self.perimeter_size))\n rospy.loginfo(\"Distance is \" + str(self.dist))\n\n # normalize angle error to rot speed\n ang_error_norm = -float(self.ang_error) / 100\n\n # set min and max rot speed\n if ang_error_norm < -ROT_SPEED:\n ang_error_norm = -ROT_SPEED\n elif ang_error_norm > ROT_SPEED:\n ang_error_norm = ROT_SPEED\n\n move_cmd.angular.z = ang_error_norm\n\n if RACE == False:\n # normalize dist error to lin speed\n self.dist_error = self.dist - 0.5\n dist_error_norm = float(self.dist_error) / 2\n\n if dist_error_norm < 0:\n # if NaN (self.dist gets set to -1)\n if dist_error_norm > -0.7:\n self.lost = 0\n # if too close\n else:\n self.lost += 1\n # if it's been more than 2 seconds\n if rospy.Time.now() > self.last_move + rospy.Duration(2):\n dist_error_norm = 0\n # if been lost for a while rotate and beep\n if self.lost > 20:\n move_cmd.angular.z = ROT_SPEED / 4\n self.beep.publish(4)\n else:\n # continue as previous\n dist_error_norm = self.last_speed\n else:\n # set max lin speed\n if dist_error_norm > LIN_SPEED:\n dist_error_norm = LIN_SPEED\n\n # reset lost stats\n self.lost = 0\n self.last_speed = dist_error_norm\n self.last_move = rospy.Time.now()\n\n move_cmd.linear.x = dist_error_norm\n else:\n move_cmd.linear.x = LIN_SPEED\n\n self.cmd_vel.publish(move_cmd)", "def mutate_point_wline(mutated_genome):\n seed = random.randint(0,7)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if len(mutated_genome[index][2]) < 1: seed = 0\n if seed == 0:\n insert_point_wline(mutated_genome,index)\n elif seed == 1:\n remove_point_wline(mutated_genome,index)\n elif seed == 2:\n switch_points_wline(mutated_genome,index)\n elif seed == 3:\n shuffle_points_wline(mutated_genome,index)\n elif seed == 4:\n move_point_wline(mutated_genome,index)\n elif seed == 5:\n shift_point_wline(mutated_genome,index)\n elif seed == 6:\n increment_point_wline(mutated_genome,index)\n else: #seed == 7:\n decrement_point_wline(mutated_genome,index)", "def make_random_move(self):\n \n\n if len(self.moves_made) == 56:\n return None\n\n random_move = random.randrange(self.height), random.randrange(self.height)\n\n not_safe_moves = self.moves_made | self.mines\n\n while random_move in not_safe_moves:\n random_move = random.randrange(self.height), random.randrange(self.height)\n\n return random_move", "def connectGreedy(self, random = False):\n if random:\n shuffle(self.houses)\n for house in self.houses:\n house.connectNearestBattery(self.batteries, self)\n self.calculateCosts()", "def _random_walk(self, preference_vectors: sps.csr_matrix) -> np.ndarray:\n similarity_rank_vertices = preference_vectors\n nb_iteration = 0\n while True:\n previous_similarity_rank_vertices = similarity_rank_vertices\n if self.verbose:\n print(\"Step: {}\".format(nb_iteration + 1))\n\n similarity_rank_vertices = self.damping_factor * similarity_rank_vertices.dot(self._transition_matrix) + (\n 1 - self.damping_factor) * preference_vectors\n\n diff = np.sum(\n np.abs(similarity_rank_vertices - previous_similarity_rank_vertices))\n if nb_iteration > 0 and diff < self.minimal_random_walk_change_difference_value:\n if self.verbose:\n print(\"Converged with error: {:.6f}\".format(diff))\n break\n\n nb_iteration += 1\n\n if nb_iteration > self.max_iterations:\n if self.verbose:\n print(\"Random walk did not converge, current error: {:.6f}\".format(\n diff))\n break\n return similarity_rank_vertices.toarray()", "def test_move_default_extra_steps(self):\n player = ss.ResilientPlayer()\n random.seed(2)\n player.move()\n random.seed(1)\n player.move()\n random.seed(2)\n player.move()\n assert player.position == 32", "def confused(self, rand):\n return rand > 0", "def fill_space(self, x, y, random, matches_allowed):\n rotation = random.randint(0, 3) * 90\n for _ in range(4):\n try:\n choices = self.choose_and_flip_extra_dominoes(\n random)\n for domino, is_flipped in choices:\n if self.cycles_remaining <= 0:\n return False\n self.cycles_remaining -= 1\n domino.rotate_to(rotation)\n self.add(domino, x, y)\n self.add_count += 1\n has_even_gaps = self.hasEvenGaps()\n if not has_even_gaps:\n self.remove(domino)\n break\n else:\n if is_flipped:\n domino.flip()\n if not matches_allowed and domino.hasMatch():\n pass\n else:\n if self.fill(random,\n matches_allowed,\n reset_cycles=False):\n return True\n self.remove(domino)\n except BadPositionError:\n pass\n rotation = (rotation + 90) % 360\n return False", "def is_exhausted(self):\n return random.random() < 0.5", "def realize(self):\n if numpy.random.rand() > self._P:\n self._w *= 0.0", "def getARandomFreeSample(self, num, maxDimLens, dim):\n failTime=0;\n while( failTime < num ):\n rnd = [0] * dim;\n for i in range( 0, dim ):\n rnd[i] = randrange( 0, maxDimLens[i] );\n pass\n angles = self.mCSpace.map2UnscaledSpace( rnd );\n if( self.mCollisionMgr.ifCollide( angles ) ):\n continue;\n\n newSamp = True;\n\n grid = self.mSpacePartition.getContainingGrid( rnd );\n for sphere in grid.mContainer:\n if sphere.isInside( rnd, maxDimLens ):\n newSamp = False;\n failTime += 1\n break;\n\n #for sample in self.mDistSamples:\n # if sample.isInside( rnd, maxDimLens ):\n # newSamp = False;\n # failTime += 1\n # break;\n if newSamp:\n # randomly shoot rays to get the nearest distance to obstacles\n rayShooter = RayShooter( rnd, self.mCollisionMgr, self.mCSpace );\n dist = rayShooter.randShoot(50 * (dim-1));\n if math.fabs(dist) >= 1.0:\n newDistSamp = DistSample( rnd, dist );\n print \"failed times: {0}\".format( failTime );\n failTime=0;\n return newDistSamp;\n else:\n failTime += 1;\n\n return None;" ]
[ "0.65802413", "0.6314837", "0.62275654", "0.61400115", "0.61373764", "0.61373764", "0.6013303", "0.6008652", "0.59597766", "0.59432083", "0.5940083", "0.5926595", "0.59224373", "0.5892852", "0.58895075", "0.5842744", "0.58392626", "0.5830656", "0.58206266", "0.58205765", "0.58132654", "0.5799286", "0.57960975", "0.5771451", "0.5762655", "0.57625", "0.5741609", "0.57373685", "0.57350034", "0.57311213", "0.5728878", "0.57107115", "0.57101923", "0.57082707", "0.5705172", "0.5701551", "0.56961244", "0.5694103", "0.5680299", "0.5676493", "0.56700134", "0.56571877", "0.5656566", "0.5653499", "0.56487125", "0.5647044", "0.5644442", "0.56339383", "0.5624072", "0.56185436", "0.56180525", "0.5617316", "0.56066066", "0.5596827", "0.55831885", "0.55823624", "0.5570801", "0.5569943", "0.5559909", "0.5557711", "0.5557707", "0.5554068", "0.5549027", "0.55478495", "0.5547419", "0.5547419", "0.5545641", "0.5533221", "0.552275", "0.55213064", "0.5520953", "0.55201215", "0.55191207", "0.5516685", "0.55055285", "0.5505241", "0.5502522", "0.5500113", "0.5497463", "0.54929924", "0.54928255", "0.54922175", "0.5483362", "0.54824114", "0.5477892", "0.547556", "0.54715097", "0.54693866", "0.5467904", "0.5466916", "0.5462227", "0.54617566", "0.54582584", "0.5457853", "0.5441424", "0.5437863", "0.5430049", "0.54296374", "0.5424456", "0.54221714" ]
0.62696826
2
return orthanc object of study
def get(self): return orthanc.study(self.orthanc_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def orthanc_studies(self):\n return [orthanc.study(x.orthanc_id) for x in self.studies]", "def salome_study_init(theStudyId=0):\n\n global salome_study_initial\n global myStudyManager, myStudyId, myStudy, myStudyName\n global orb, lcc, naming_service, cm\n\n if salome_study_initial:\n salome_study_initial = 0\n\n orb, lcc, naming_service, cm = salome_kernel.salome_kernel_init()\n\n # get Study Manager reference\n if verbose(): print \"looking for studyManager ...\"\n obj = naming_service.Resolve('myStudyManager')\n myStudyManager = obj._narrow(SALOMEDS.StudyManager)\n if verbose(): print \"studyManager found\"\n\n # get active study Id, ref and name\n myStudyId = getActiveStudy(theStudyId)\n if verbose(): print \"myStudyId\",myStudyId\n myStudy = myStudyManager.GetStudyByID(myStudyId)\n myStudyName = myStudy._get_Name()\n\n return myStudyManager, myStudyId, myStudy, myStudyName", "def __repr__(self):\n\n return \"<Study pmid=%d doi=%s title=%s year=%d>\" % (\n self.pmid, self.doi, self.title, self.year)", "def clone(self, *args):\n return _SALOMERuntime.OutputStudyPort_clone(self, *args)", "def demo_ortho_slicer():\n pl.clf()\n oslicer = OrthoSlicer(cut_coords=(0, 0, 0))\n from .anat_cache import _AnatCache\n map, affine, _ = _AnatCache.get_anat()\n oslicer.plot_map(map, affine, cmap=pl.cm.gray)\n return oslicer", "def _createObj(self) -> None:\n phase_img = skimage.img_as_float(skimage.data.camera())[::-1, ::-1]\n mod_img = skimage.img_as_float(skimage.data.immunohistochemistry()[:, :, 0])[::-1, ::-1]\n mod = skimage.transform.resize(mod_img, self.shape,\n mode='wrap', preserve_range=True)\n phase = skimage.transform.resize(phase_img, self.shape,\n mode='wrap', preserve_range=True)\n\n # Setting the ranges\n phase = (phase - np.min(phase)) / (np.max(phase) - np.min(phase)) * self.phase_range\n mod = (mod - np.min(mod)) / (np.max(mod) - np.min(mod)) * self.mod_range\n\n # Centering the phase at 0.\n phase = np.angle(np.exp(1j * (phase - scipy.stats.circmean(phase))))\n obj = (mod * np.exp(1j * phase)).astype('complex64')\n self._setObjArrayValues(obj)", "def phonology(request):\n\n perspective_cid = request.params.get('perspective_client_id')\n perspective_oid = request.params.get('perspective_object_id')\n\n # Checking if we have limits on number of computed results.\n\n limit = (None if 'limit' not in request.params else\n int(request.params.get('limit')))\n\n limit_exception = (None if 'limit_exception' not in request.params else\n int(request.params.get('limit_exception')))\n\n limit_no_vowel = (None if 'limit_no_vowel' not in request.params else\n int(request.params.get('limit_no_vowel')))\n\n limit_result = (None if 'limit_result' not in request.params else\n int(request.params.get('limit_result')))\n\n # TODO: get perspective's translation and language it belongs to.\n\n # We get lexical entries of this perspective with markup'ed sounds.\n\n Sound = aliased(Entity, name = \"Sound\")\n PublishingSound = aliased(PublishingEntity, name = \"PublishingSound\")\n\n query = DBSession.query(LexicalEntry, Entity, Sound, PublishingEntity, PublishingSound).filter(and_(\n LexicalEntry.parent_client_id == perspective_cid,\n LexicalEntry.parent_object_id == perspective_oid,\n LexicalEntry.marked_for_deletion == False,\n Entity.parent_client_id == LexicalEntry.client_id,\n Entity.parent_object_id == LexicalEntry.object_id,\n Entity.marked_for_deletion == False,\n Entity.additional_metadata.contains({\"data_type\": \"praat markup\"}),\n PublishingEntity.client_id == Entity.client_id,\n PublishingEntity.object_id == Entity.object_id,\n PublishingEntity.published == True,\n PublishingEntity.accepted == True,\n Sound.client_id == Entity.self_client_id,\n Sound.object_id == Entity.self_object_id,\n Sound.marked_for_deletion == False,\n PublishingSound.client_id == Sound.client_id,\n PublishingSound.object_id == Sound.object_id,\n PublishingSound.published == True,\n PublishingSound.accepted == True))\n\n # We process these lexical entries in batches. Just in case, it seems that perspectives rarely have more\n # then several hundred such lexical entries.\n\n exception_counter = 0\n no_vowel_counter = 0\n result_list = list()\n\n for index, row in enumerate(query.yield_per(100)):\n\n markup_url = row.Entity.content\n sound_url = row.Sound.content\n\n cache_key = 'phonology:{0}:{1}:{2}:{3}'.format(\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id)\n\n # Checking if we have cached result for this pair of sound/markup.\n\n cache_result = CACHE.get(cache_key)\n\n if cache_result == 'no_vowel':\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}) '\n '[CACHE {7}]: no vowels\\n{8}\\n{9}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n cache_key, markup_url, sound_url))\n\n no_vowel_counter += 1\n\n if (limit_no_vowel and no_vowel_counter >= limit_no_vowel or\n limit and index + 1 >= limit):\n break\n\n continue\n\n # If we have cached exception, we do the same as with absence of vowels, show its info and\n # continue.\n\n elif isinstance(cache_result, tuple) and cache_result[0] == 'exception':\n exception, traceback_string = cache_result[1:3]\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n '[CACHE {7}]: exception\\n{8}\\n{9}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n cache_key, markup_url, sound_url))\n\n log.debug(traceback_string)\n\n exception_counter += 1\n\n if (limit_exception and exception_counter >= limit_exception or\n limit and index + 1 >= limit):\n break\n\n continue\n\n # If we actually have the result, we use it and continue.\n\n elif cache_result:\n\n result_string = '\\n'.join(\n 'tier {0} \\'{1}\\': {2}'.format(tier_number, tier_name,\n \n tier_result_seq_list if not isinstance(tier_result_seq_list, list) else\n tier_result_seq_list[0] if len(tier_result_seq_list) <= 1 else\n ''.join('\\n {0}'.format(tier_result) for tier_result in tier_result_seq_list))\n\n for tier_number, tier_name, tier_result_seq_list in cache_result)\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}) '\n '[CACHE {7}]:\\n{8}\\n{9}\\n{10}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n cache_key, markup_url, sound_url, result_string))\n\n result_list.append(cache_result)\n\n if (limit_result and len(result_list) >= limit_result or\n limit and index + 1 >= limit):\n break\n\n continue\n\n try:\n # Getting markup, checking for each tier if it needs to be processed.\n\n markup_bytes = urllib.request.urlopen(urllib.parse.quote(markup_url, safe = '/:')).read()\n\n textgrid = pympi.Praat.TextGrid(xmax = 0)\n textgrid.from_file(\n io.BytesIO(markup_bytes),\n codec = chardet.detect(markup_bytes)['encoding'])\n\n tier_data_list = []\n vowel_flag = False\n\n for tier_number, tier_name in textgrid.get_tier_name_num():\n\n raw_interval_list = textgrid.get_tier(tier_number).get_all_intervals()\n raw_interval_seq_list = [[]]\n\n # Splitting interval sequence on empty intervals.\n\n for raw_index, interval in enumerate(raw_interval_list):\n\n if len(interval[2].strip()) <= 0:\n if len(raw_interval_seq_list[-1]) > 0:\n raw_interval_seq_list.append([])\n\n else:\n raw_interval_seq_list[-1].append((raw_index, interval))\n\n if len(raw_interval_seq_list[-1]) <= 0:\n del raw_interval_seq_list[-1]\n\n # Selecting interval sequences for analysis, checking if we have unusual markup.\n \n interval_seq_list = []\n interval_idx_to_raw_idx = dict()\n\n unusual_markup_flag = False\n unusual_markup_list = []\n\n for raw_interval_seq in raw_interval_seq_list:\n\n interval_seq_list.append([])\n interval_idx_to_raw_idx[len(interval_seq_list) - 1] = {}\n\n for partial_raw_index, (raw_index, interval) in enumerate(raw_interval_seq):\n\n interval_text = interval[2].strip()\n\n # Accepting interval if its text contains at least one vowel, and is short enough or\n # is a valid phonetic transcription.\n\n transcription_check = re.fullmatch(transcription_re, interval_text)\n\n if (len(interval_text) > 0 and\n any(character in vowel_set for character in interval_text) and\n (len(interval_text) <= 2 or transcription_check)):\n\n interval_seq_list[-1].append(interval)\n\n sequence_index = len(interval_seq_list) - 1\n interval_index = len(interval_seq_list[-1]) - 1\n\n interval_idx_to_raw_idx[(sequence_index, interval_index)] = raw_index\n interval_idx_to_raw_idx[sequence_index][interval_index] = partial_raw_index\n\n # Noting if the interval contains unusual (i.e. non-transcription) markup.\n\n elif not transcription_check:\n\n unusual_markup_flag = True\n unusual_markup_list.append((raw_index, interval))\n\n transcription_list = [text for begin, end, text in raw_interval_list]\n transcription = ''.join(transcription_list)\n\n selected_list = [text\n for interval_list in interval_seq_list\n for begin, end, text in interval_list]\n\n selected = ''.join(selected_list)\n\n # If we have intervals with unusual markup, we report them.\n\n if unusual_markup_flag:\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n 'tier {7} \\'{8}\\' has interval(s) with unusual transcription text: '\n '{9} / {10}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n tier_number, tier_name, transcription, dict(unusual_markup_list)))\n\n # If the markup does not have any vowels, we note it and also report it.\n\n if all(character not in vowel_set for character in transcription):\n\n tier_data_list.append((tier_number, tier_name, 'no_vowel'))\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n 'tier {7} \\'{8}\\' doesn\\'t have any vowel markup: {9}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n tier_number, tier_name, transcription_list))\n\n # It is also possible that while full transcription has vowels, intervals selected for\n # analysis do not. In that case we also note it and report it.\n\n elif not any(character in vowel_set for character in selected):\n\n tier_data_list.append((tier_number, tier_name, 'no_vowel_selected'))\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n 'tier {7} \\'{8}\\' intervals to be processed don\\'t have any vowel markup: '\n 'markup {9}, selected {10}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n tier_number, tier_name,\n transcription_list, selected_list))\n\n # Otherwise we store tier data to be used during processing of the sound file.\n\n else:\n tier_data_list.append((tier_number, tier_name,\n (raw_interval_list, raw_interval_seq_list, interval_seq_list,\n interval_idx_to_raw_idx, transcription)))\n\n vowel_flag = True\n\n # If there are no tiers with vowel markup, we skip this sound-markup file altogether.\n\n if not vowel_flag:\n\n CACHE.set(cache_key, 'no_vowel')\n no_vowel_counter += 1\n\n if (limit_no_vowel and no_vowel_counter >= limit_no_vowel or\n limit and index + 1 >= limit):\n break\n\n continue\n\n # Otherwise we retrieve the sound file and analyse each vowel-containing markup.\n # Partially inspired by source code at scripts/convert_five_tiers.py:307.\n\n sound = None\n with tempfile.NamedTemporaryFile() as temp_file:\n\n sound_file = urllib.request.urlopen(urllib.parse.quote(sound_url, safe = '/:'))\n temp_file.write(sound_file.read())\n temp_file.flush()\n\n sound = AudioPraatLike(pydub.AudioSegment.from_wav(temp_file.name))\n\n tier_result_list = []\n\n for tier_number, tier_name, tier_data in tier_data_list:\n\n if tier_data == 'no_vowel' or tier_data == 'no_vowel_selected':\n tier_result_list.append((tier_number, tier_name, tier_data))\n continue\n\n # Analyzing vowel sounds of each interval sequence.\n\n (raw_interval_list, raw_interval_seq_list, interval_seq_list, interval_idx_to_raw_idx,\n transcription) = tier_data\n\n tier_result_list.append((tier_number, tier_name, []))\n\n for seq_index, (raw_interval_list, interval_list) in enumerate(zip(\n raw_interval_seq_list, interval_seq_list)):\n\n if len(interval_list) <= 0:\n continue\n\n (max_intensity_index, max_intensity, max_length_index, max_length) = \\\n find_max_interval_praat(sound, interval_list)\n\n max_intensity_interval = interval_list[max_intensity_index]\n max_length_interval = interval_list[max_length_index]\n\n max_intensity_f1_f2 = sound.get_interval_formants(*max_intensity_interval[:2])\n max_length_f1_f2 = sound.get_interval_formants(*max_length_interval[:2])\n\n # Compiling results.\n\n max_length_str = '{0} {1:.3f} [{2}]'.format(\n max_length_interval[2], max_length,\n len(''.join(text for index, (begin, end, text) in\n raw_interval_list[:interval_idx_to_raw_idx[seq_index][max_length_index]])))\n\n max_intensity_str = '{0} {1:.3f} [{2}]'.format(\n max_intensity_interval[2],\n max_intensity,\n len(''.join(text for index, (begin, end, text) in\n raw_interval_list[:interval_idx_to_raw_idx[seq_index][max_intensity_index]])))\n\n tier_result_list[-1][2].append([\n ''.join(text for index, (begin, end, text) in raw_interval_list),\n max_length_str,\n '{0:.3f}'.format(max_length_f1_f2[0]),\n '{0:.3f}'.format(max_length_f1_f2[1]),\n max_intensity_str,\n '{0:.3f}'.format(max_intensity_f1_f2[0]),\n '{0:.3f}'.format(max_intensity_f1_f2[1]),\n '+' if max_intensity_index == max_length_index else '-'])\n\n # Saving result.\n\n result_list.append(tier_result_list)\n CACHE.set(cache_key, tier_result_list)\n\n result_string = '\\n'.join(\n 'tier {0} \\'{1}\\': {2}'.format(tier_number, tier_name,\n \n tier_result_seq_list if not isinstance(tier_result_seq_list, list) else\n tier_result_seq_list[0] if len(tier_result_seq_list) <= 1 else\n ''.join('\\n {0}'.format(tier_result) for tier_result in tier_result_seq_list))\n\n for tier_number, tier_name, tier_result_seq_list in tier_result_list)\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}):'\n '\\n{7}\\n{8}\\n{9}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n markup_url, sound_url, result_string))\n\n # Stopping earlier, if required.\n\n if (limit_result and len(result_list) >= limit_result or\n limit and index + 1 >= limit):\n break\n\n except Exception as exception:\n\n #\n # NOTE\n #\n # Exceptional situations encountered so far:\n #\n # 1. TextGrid file actually contains sound, and wav file actually contains textgrid markup.\n #\n # Perspective 330/4, LexicalEntry 330/7, sound-Entity 330/2328, markup-Entity 330/6934\n #\n # 2. Markup for one of the intervals contains a newline \"\\n\", and pympi fails to parse it.\n # Praat parses such files without problems.\n #\n # Perspective 330/4, LexicalEntry 330/20, sound-Entity 330/6297, markup-Entity 330/6967\n #\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n 'exception\\n{7}\\n{8}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n markup_url, sound_url))\n\n # if we encountered an exception, we show its info and remember not to try offending\n # sound/markup pair again.\n\n traceback_string = ''.join(traceback.format_exception(\n exception, exception, exception.__traceback__))[:-1]\n\n log.debug(traceback_string)\n\n CACHE.set(cache_key, ('exception', exception,\n traceback_string.replace('Traceback', 'CACHEd traceback')))\n\n exception_counter += 1\n\n if (limit_exception and exception_counter >= limit_exception or\n limit and index + 1 >= limit):\n break\n\n log.debug('phonology {0}/{1}: {2} result{3}, {4} no vowels, {5} exceptions'.format(\n perspective_cid, perspective_oid,\n len(result_list), '' if len(result_list) == 1 else 's',\n no_vowel_counter, exception_counter))\n\n # If we have no results, we indicate the situation and also show number of failures and number of\n # markups with no vowels.\n\n if not result_list:\n request.response.status = HTTPPreconditionFailed.code\n\n return {\n \"error\": \"no markups for this query\",\n \"exception_counter\": exception_counter,\n \"no_vowel_counter\": no_vowel_counter}\n\n # Otherwise we create and then serve Excel file.\n\n excel_book = xlwt.Workbook(encoding = \"utf-8\")\n sheet = excel_book.add_sheet(\"Sheet 1\")\n\n sheet.write(0, 0, 'Transcription')\n sheet.write(0, 1, 'Longest (seconds) interval')\n sheet.write(0, 2, 'F1 (Hz)')\n sheet.write(0, 3, 'F2 (Hz)')\n sheet.write(0, 4, 'Highest intensity (dB) interval')\n sheet.write(0, 5, 'F1 (Hz)')\n sheet.write(0, 6, 'F2 (Hz)')\n sheet.write(0, 7, 'Coincidence')\n\n row_counter = 1\n\n for tier_result_list in result_list:\n for tier_number, tier_name, tier_result_seq_list in tier_result_list:\n\n if tier_result_seq_list == 'no_vowel':\n continue\n\n for tier_data in tier_result_seq_list:\n for index, tier_data_str in enumerate(tier_data):\n sheet.write(row_counter, index, tier_data_str)\n\n row_counter += 1\n\n # Formatting column widths.\n\n sheet.col(0).width = 24 * 256\n sheet.col(1).width = 24 * 256\n sheet.col(2).width = 12 * 256\n sheet.col(3).width = 12 * 256\n sheet.col(4).width = 24 * 256\n sheet.col(5).width = 12 * 256\n sheet.col(6).width = 12 * 256\n sheet.col(7).width = 12 * 256\n\n excel_stream = io.BytesIO()\n excel_book.save(excel_stream)\n excel_stream.seek(0)\n\n # See http://stackoverflow.com/questions/2937465/what-is-correct-content-type-for-excel-files for Excel\n # content-type.\n\n response = Response(content_type = 'application/vnd.ms-excel')\n\n response.app_iter = FileIter(excel_stream)\n response.headers['Content-Disposition'] = \"attachment; filename=phonology.xls\"\n\n return response", "def get_o(self):\n return self.o", "def __repr__(self):\n return \"{} ({}) <{}:{} pheno: {} study: {}\".format(\n self.id, self.snpid, self.chrom, self.pos, self.phenotype_desc,\n self.study.title\n )", "def object(self):", "def __str__(self):\r\n\r\n return 'Perspective(%s)' % self.id", "def clone(self, *args):\n return _SALOMERuntime.InputStudyPort_clone(self, *args)", "def get_main_object(tc):\n return Daal(tc)", "def get_scn_obs_date(self, unq_id):\n import copy\n logger.debug(\"Creating Database Engine and Session.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n logger.debug(\"Perform query to find scene.\")\n query_result = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.PID == unq_id).all()\n ses.close()\n scn_record = None\n if query_result is not None:\n if len(query_result) == 1:\n scn_record = query_result[0]\n else:\n logger.error(\n \"PID {0} has returned more than 1 scene - must be unique something really wrong.\".format(\n unq_id))\n raise EODataDownException(\n \"There was more than 1 scene which has been found - something has gone really wrong!\")\n else:\n logger.error(\"PID {0} has not returned a scene - check inputs.\".format(unq_id))\n raise EODataDownException(\"PID {0} has not returned a scene - check inputs.\".format(unq_id))\n return copy.copy(scn_record.BeginPosition)", "def create_isa_study(brapi_study_id):\n brapi_study = get_brapi_study(brapi_study_id)\n this_study = Study(filename=\"s_\" + str(brapi_study_id) + \".txt\")\n this_study.identifier = brapi_study['studyDbId']\n if 'name' in brapi_study:\n this_study.title = brapi_study['name']\n elif 'studyName' in brapi_study:\n this_study.title = brapi_study['studyName']\n\n this_study.comments.append(Comment(name=\"Study Start Date\", value=brapi_study['startDate']))\n this_study.comments.append(Comment(name=\"Study End Date\", value=brapi_study['endDate']))\n if brapi_study['location'] is not None and brapi_study['location']['name'] is not None :\n this_study.comments.append(Comment(name=\"Study Geographical Location\",\n value=brapi_study['location']['name']))\n else:\n this_study.comments.append(Comment(name=\"Study Geographical Location\",value=\"\"))\n\n study_design = brapi_study['studyType']\n oa_st_design = OntologyAnnotation(term=study_design)\n this_study.design_descriptors = [oa_st_design]\n\n oref_tt = OntologySource(name=\"OBI\", description=\"Ontology for Biomedical Investigation\")\n oa_tt = OntologyAnnotation(term=\"phenotyping\", term_accession=\"\", term_source=oref_tt)\n oref_mt = OntologySource(name=\"OBI\", description=\"Ontology for Biomedical Investigation\")\n oa_mt = OntologyAnnotation(term=\"multi-technology\", term_accession=\"\", term_source=oref_mt)\n isa_assay_file = \"a_\" + str(brapi_study_id) + \".txt\"\n this_assay = Assay(measurement_type=oa_tt, technology_type=oa_mt, filename=isa_assay_file)\n this_study.assays.append(this_assay)\n\n return this_study", "def get_study_info(self,std_id):\n raise NotImplementedError", "def get_queryset(self):\n return Objective.objects.order_by('perspective')", "def example_clinical_data(study_name, environment):\n\n odm = ODM(\"test system\")(\n ClinicalData(\"Mediflex\", \"DEV\")(\n SubjectData(\"MDSOL\", \"IJS TEST4\", transaction_type=\"Insert\")(\n StudyEventData(\"SUBJECT\")(\n FormData(\"EN\", transaction_type=\"Update\")(\n # Although Signature is ODM1.3.1 RWS does not support it inbound currently\n # RWSBuilders do support outbound generation of Signature at FormData level\n # Signature()(\n # UserRef(\"isparks\"),\n # LocationRef(\"MDSOL\"),\n # SignatureRef(\"APPROVED\"),\n # DateTimeStamp(datetime(2015, 9, 11, 10, 15, 22, 80))\n # ),\n ItemGroupData()(\n ItemData(\"SUBJINIT\", \"AAA\")(\n AuditRecord(edit_point=AuditRecord.EDIT_DATA_MANAGEMENT,\n used_imputation_method= False,\n identifier='X2011',\n include_file_oid=False)(\n UserRef(\"isparks\"),\n LocationRef(\"MDSOL\"),\n ReasonForChange(\"Data Entry Error\"),\n DateTimeStamp(datetime(2015, 9, 11, 10, 15, 22, 80))\n ),\n MdsolQuery(value=\"Subject initials should be 2 chars only.\", recipient=\"Site from System\",\n status=QueryStatusType.Open)\n ),\n ItemData(\"SUBJID\", '001')\n )\n )\n )\n )\n )\n )\n return odm", "def __repr__(self):\n return f'ResidenciaModel(name={self.neighbourhood_group}, neighbourhood={self.room_type})'", "def ortho(self):\r\n\r\n m11,m12,m13,m14,m21,m22,m23,m24,m31,m32,m33,m34,m41,m42,m43,m44 = self.mlist\r\n\r\n x = _vec3(m11, m21, m31)\r\n y = _vec3(m12, m22, m32)\r\n z = _vec3(m13, m23, m33)\r\n\r\n xl = x.length()\r\n xl*=xl\r\n y = y - ((x*y)/xl)*x\r\n z = z - ((x*z)/xl)*x\r\n\r\n yl = y.length()\r\n yl*=yl\r\n z = z - ((y*z)/yl)*y\r\n\r\n return mat4( x.x, y.x, z.x, m14,\r\n x.y, y.y, z.y, m24,\r\n x.z, y.z, z.z, m34,\r\n m41, m42, m43, m44)", "def search_research_studies_with_observations():\n return ResearchStudy.where(struct={}).include('focus', Observation, reverse=True)", "def get_objectives(self):\n return copy.deepcopy(self.objectives), self.gates_names", "def get_scn_record(self, unq_id):\n logger.debug(\"Creating Database Engine and Session.\")\n db_engine = sqlalchemy.create_engine(self.db_info_obj.dbConn)\n session_sqlalc = sqlalchemy.orm.sessionmaker(bind=db_engine)\n ses = session_sqlalc()\n\n logger.debug(\"Perform query to find scene.\")\n query_result = ses.query(EDDSentinel1ASF).filter(EDDSentinel1ASF.PID == unq_id).all()\n ses.close()\n scn_record = None\n if query_result is not None:\n if len(query_result) == 1:\n scn_record = query_result[0]\n else:\n logger.error(\n \"PID {0} has returned more than 1 scene - must be unique something really wrong.\".format(unq_id))\n raise EODataDownException(\n \"There was more than 1 scene which has been found - something has gone really wrong!\")\n else:\n logger.error(\"PID {0} has not returned a scene - check inputs.\".format(unq_id))\n raise EODataDownException(\"PID {0} has not returned a scene - check inputs.\".format(unq_id))\n return scn_record", "def get_full_article(self):\n raise NotImplementedError", "def get_rms(self):\r\n return self.rms.copy()", "def substantiate():", "def __init__(self, name=None, dss=28, date=None, project='SolarPatrol'):\n self.logger = logging.getLogger(logger.name+\".Observation\")\n DR.Observation.__init__(self, name=date, date=date, dss=dss, \n project=project)\n self.extended_init()\n \n #self.obs =Astronomy.Ephem.DSS(dss)\n #y,d = date.split('/')\n #self.year = int(y); self.DOY = int(d)\n #projdatapath, self.sessionpath, rawdatapath = \\\n # DR.get_obs_dirs(project, dss, self.year, self.DOY,\n # datafmt=None)", "def create_general_object(self, x, y):\n return self.img[y-self.rad:y+self.rad, x-self.rad:x+self.rad]", "def projection(self):\n pass", "def init_od_sr(state_dict: Dict) -> SpectralResidual:\n od = SpectralResidual(threshold=state_dict['threshold'],\n window_amp=state_dict['window_amp'],\n window_local=state_dict['window_local'],\n n_est_points=state_dict['n_est_points'],\n n_grad_points=state_dict['n_grad_points'])\n return od", "def orthopyroxene():\n\n rho = 3304.\n\n C = np.zeros((6,6), dtype=float)\n C[0,0] = 236.9; C[0,1] = 79.6; C[0,2] = 63.2; C[0,3] = 0.; C[0,4] = 0.; C[0,5] = 0.\n C[1,0] = C[0,1]; C[1,1] = 180.5; C[1,2] = 56.8; C[1,3] = 0.; C[1,4] = 0.; C[1,5] = 0.\n C[2,0] = C[0,2]; C[2,1] = C[1,2]; C[2,2] = 230.4; C[2,3] = 0.; C[2,4] = 0.; C[2,5] = 0.\n C[3,0] = C[0,3]; C[3,1] = C[1,3]; C[3,2] = C[2,3]; C[3,3] = 84.3; C[3,4] = 0.; C[3,5] = 0.\n C[4,0] = C[0,4]; C[4,1] = C[1,4]; C[4,2] = C[2,4]; C[4,3] = C[3,4]; C[4,4] = 79.4; C[4,5] = 0.\n C[5,0] = C[0,5]; C[5,1] = C[1,5]; C[5,2] = C[2,5]; C[5,3] = C[3,5]; C[5,4] = C[4,5]; C[5,5] = 80.1\n\n return C, rho", "def o(self, otra):\n return OEspecificacion(self, otra)", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def create_scene(self):\n \n self.scene=soya.World()", "def gen_obs(self):\n\n #grid, vis_mask = self.gen_obs_grid()\n\n # Encode the partially observable view into a numpy array\n image = self.grid.encode(self.agent_pos,self.drone_pos)\n\n #assert hasattr(self, 'mission'), \"environments must define a textual mission string\"\n\n # Observations are dictionaries containing:\n # - an image (partially observable view of the environment)\n # - the agent's direction/orientation (acting as a compass)\n # - a textual mission string (instructions for the agent)\n obs = {\n 'image': image,\n #'direction': self.agent_dir,\n 'mission': self.mission\n }\n obs=image\n #print(obs.shape)\n return self.render(mode='rgb_array')\n #return obs", "def get_mapping(self, type = 'orthogonal', anchor_method = 'mutual_nn', max_anchors = None):\n # Method 1: Orthogonal projection that best macthes NN\n self.compute_scores(score_type='coupling') # TO refresh\n if anchor_method == 'mutual_nn':\n pseudo = self.find_mutual_nn()#[:100]\n elif anchor_method == 'all':\n translations, oov = self.generate_translations()\n pseudo = [(k,v[0]) for k,v in translations.items()]\n if max_anchors:\n pseudo = pseudo[:max_anchors]\n print('Finding orthogonal mapping with {} anchor points via {}'.format(len(pseudo), anchor_method))\n if anchor_method in ['mutual_nn', 'all']:\n idx_src = [self.src_word2ind[ws] for ws,_ in pseudo]\n idx_trg = [self.trg_word2ind[wt] for _,wt in pseudo]\n xs_nn = self.xs[idx_src]\n xt_nn = self.xt[idx_trg]\n P = orth_procrustes(xs_nn, xt_nn)\n elif anchor_method == 'barycenter':\n ot_emd = ot.da.EMDTransport()\n ot_emd.xs_ = self.xs\n ot_emd.xt_ = self.xt\n ot_emd.coupling_= self.coupling\n xt_hat = ot_emd.inverse_transform(Xt=self.xt) # Maps target to source space\n P = orth_procrustes(xt_hat, self.xt)\n return P", "def getScene():\n #print \"servers direct scenes are \",soya.IDLER.scenes[:]\n \n return soya.IDLER.scenes[0]", "def get_modeller_objects(self):\n return self._modpt, self.__edat.modpt, self.__libs.modpt", "def get_representative_data_object(self, obj):\n if self.dim == 0:\n # In this way, obj can be a data object and this class can be\n # used even if the assignment is not between \"flattened components\"\n return obj\n else:\n nominal_index = self.nominal_index\n return obj[nominal_index]", "def getAuthoredFromSymplectic(researcher_object):\r\n #description\r\n #checking\r\n # if not(researcher_object) or (researcher_object.symplectic_int_id is None): # int_id version\r\n if not(researcher_object) or (researcher_object.symplectic_id is None): # guid version\r\n return\r\n #symplectic api url and local file path\r\n # url = SYMPLECTIC_API_URL + 'users/' + str(researcher_object.symplectic_int_id) # int_id version\r\n url = SYMPLECTIC_API_URL + 'users/' + str(researcher_object.symplectic_id) # guid version\r\n # tmp_filename = SYMPLECTIC_LOCAL_XML_FOLDER + SYMPLECTIC_LOCAL_AUTH_FOLDER + str(researcher_object.symplectic_int_id) + '.xml' # int_id version\r\n tmp_filename = SYMPLECTIC_LOCAL_XML_FOLDER + SYMPLECTIC_LOCAL_AUTH_FOLDER + str(researcher_object.symplectic_id) + '.xml' # guid version\r\n #get xml document from symplectic api and store on hd\r\n (tmp_filename, http_headers,) = urllib.urlretrieve(url, tmp_filename)\r\n #parse xml file\r\n publications_etree = ElementTree(file=tmp_filename)\r\n #delete local file from hd\r\n #try:\r\n os.remove(tmp_filename)\r\n #except:\r\n #pass \r\n #publication elements are held in a subtree\r\n publications_subtree = publications_etree.find(SYMPLECTIC_NAMESPACE + 'publications')\r\n #check if any publication elements in subtree\r\n if publications_subtree is None or len(publications_subtree) < 1:\r\n return\r\n #now that we have their newest \"i authored that pub\" info, we can delete their old \"i authored that pub\" info\r\n researcher_object.remove_all_authored() \r\n #for each publication element in subtree\r\n for publication_element in publications_subtree.getchildren():\r\n SymplecticXMLAuthored.__createAuthoredObjFromUserPubElement(publication_element, researcher_object)", "def composeWorkplaceOntology():\n\n import ossPyFuncs \n import pandas as pd\n \n #mysql query to extract full table from government organizations\n #certian table columns feature capital letters which cases uproblems\n postgreSql_selectQuery=\"SELECT * FROM us_gov_manual.us_govman_2019 ;\"\n #pass querry and obtain table\n govTable=ossPyFuncs.queryToPDTable(postgreSql_selectQuery)\n\n #mysql query to obtain academic instutions\n postgreSql_selectQuery=\"SELECT institution FROM hipolabs.universities ;\"\n #pass querry and obtain table\n univTable=ossPyFuncs.queryToPDTable(postgreSql_selectQuery)\n \n postgreSql_selectQuery=\"SELECT company FROM forbes.fortune2018_us1000;\"\n businesses1=ossPyFuncs.queryToPDTable(postgreSql_selectQuery)\n \n postgreSql_selectQuery=\"SELECT company FROM forbes.fortune2019_us1000;\"\n businesses2=ossPyFuncs.queryToPDTable(postgreSql_selectQuery)\n \n postgreSql_selectQuery=\"SELECT company FROM forbes.fortune2020_global2000;\"\n businesses3=ossPyFuncs.queryToPDTable(postgreSql_selectQuery)\n\n #combine theinsitutions into a vector\n combinedSeries=[govTable['AgencyName'],univTable['institution'],businesses1['company'],businesses2['company'],businesses3['company']]\n #turn the multi item vector into a single series\n fullWordbank=pd.concat(combinedSeries)\n #turn that series into a pd dataframe\n wordbankTable=pd.DataFrame(fullWordbank.unique())\n\n return wordbankTable", "def _orthogonal_init(self):\n # if is a conv layer, will need to reshape to fan in matrix,\n # which is of dimension\n # num input feature maps * filter height * filter width\n if(len(self.dims) > 2):\n rv_samp = np.random.randn(self.dims[2],\n self.dims[0] * self.dims[1] * self.dims[3])\n out_sigma = np.sqrt(1.0 / rv_samp.shape[1])\n # otherwise will be a densely connected layer\n else:\n rv_samp = np.random.randn(self.dims[0], self.dims[1])\n out_sigma = np.sqrt(1.0 / rv_samp.shape[0])\n # perform SVD\n U, _, V = np.linalg.svd(rv_samp, full_matrices=False) #pylint: disable=invalid-name\n # both U and V are orthoginal matricies, so will choose the one\n # that is the correct dimensions for our layer\n ortho_matrix = U if U.shape == rv_samp.shape else V\n # rescale so it is unit variance for each vector\n # print(\"std(q) = {}\".format(np.std(q)))\n ortho_norm = (ortho_matrix / np.std(ortho_matrix)) * out_sigma\n #print(\"std(qs) = {}\".format(np.std(qs)))\n #print(q.shape)\n return ortho_norm.reshape(self.dims).astype(np.float32)", "def get_final_reconstruction(self):", "def om(self):\n return self._om", "def _get_observation(self, unseen=False):\n img_arr = p.getCameraImage(width=self._width,\n height=self._height,\n viewMatrix=self._view_matrix,\n projectionMatrix=self._proj_matrix)\n rgb = img_arr[2]\n depth = img_arr[3]\n min = 0.97\n max=1.0\n segmentation = img_arr[4]\n depth = np.reshape(depth, (self._height, self._width,1) )\n segmentation = np.reshape(segmentation, (self._height, self._width,1) )\n\n np_img_arr = np.reshape(rgb, (self._height, self._width, 4))\n np_img_arr = np_img_arr[:, :, :3].astype(np.float64)\n\n view_mat = np.asarray(self._view_matrix).reshape(4, 4)\n proj_mat = np.asarray(self._proj_matrix).reshape(4, 4)\n # pos = np.reshape(np.asarray(list(p.getBasePositionAndOrientation(self._objectUids[0])[0])+[1]), (4, 1))\n\n AABBs = np.zeros((len(self._objectUids), 2, 3))\n cls_ls = []\n \n for i, (_uid, _cls) in enumerate(zip(self._objectUids, self._objectClasses)):\n AABBs[i] = np.asarray(p.getAABB(_uid)).reshape(2, 3)\n cls_ls.append(NAME2IDX[_cls])\n\n # np.save('/home/tony/Desktop/obj_save/view_mat_'+str(self.img_save_cnt), view_mat)\n # np.save('/home/tony/Desktop/obj_save/proj_mat_'+str(self.img_save_cnt), proj_mat)\n # np.save('/home/tony/Desktop/obj_save/img_'+str(self.img_save_cnt), np_img_arr.astype(np.int16))\n # np.save('/home/tony/Desktop/obj_save/AABB_'+str(self.img_save_cnt), AABBs)\n # np.save('/home/tony/Desktop/obj_save/class_'+str(self.img_save_cnt), np.array(cls_ls))\n\n np.save(OUTPUT_DIR + '/image_' + str(self.img_save_cnt), np_img_arr.astype(np.int16))\n dets = np.zeros((AABBs.shape[0], 5))\n for i in range(AABBs.shape[0]):\n dets[i, :4] = self.get_2d_bbox(AABBs[i], view_mat, proj_mat, IM_HEIGHT, IM_WIDTH)\n dets[i, 4] = int(cls_ls[i])\n np.save(OUTPUT_DIR + '/annotation_'+str(self.img_save_cnt), dets)\n\n test = np.concatenate([np_img_arr[:, :, 0:2], segmentation], axis=-1)\n\n return test", "def make_studyforrest_mockup(path):\n public = create(opj(path, 'public'), description=\"umbrella dataset\")\n # the following tries to capture the evolution of the project\n phase1 = public.create('phase1',\n description='old-style, no connection to RAW')\n structural = public.create('structural', description='anatomy')\n tnt = public.create('tnt', description='image templates')\n tnt.clone(source=phase1.path, path=opj('src', 'phase1'), reckless='auto')\n tnt.clone(source=structural.path, path=opj('src', 'structural'), reckless='auto')\n aligned = public.create('aligned', description='aligned image data')\n aligned.clone(source=phase1.path, path=opj('src', 'phase1'), reckless='auto')\n aligned.clone(source=tnt.path, path=opj('src', 'tnt'), reckless='auto')\n # new acquisition\n labet = create(opj(path, 'private', 'labet'), description=\"raw data ET\")\n phase2_dicoms = create(opj(path, 'private', 'p2dicoms'), description=\"raw data P2MRI\")\n phase2 = public.create('phase2',\n description='new-style, RAW connection')\n phase2.clone(source=labet.path, path=opj('src', 'labet'), reckless='auto')\n phase2.clone(source=phase2_dicoms.path, path=opj('src', 'dicoms'), reckless='auto')\n # add to derivatives\n tnt.clone(source=phase2.path, path=opj('src', 'phase2'), reckless='auto')\n aligned.clone(source=phase2.path, path=opj('src', 'phase2'), reckless='auto')\n # never to be published media files\n media = create(opj(path, 'private', 'media'), description=\"raw data ET\")\n # assuming all annotations are in one dataset (in reality this is also\n # a superdatasets with about 10 subdatasets\n annot = public.create('annotations', description='stimulus annotation')\n annot.clone(source=media.path, path=opj('src', 'media'), reckless='auto')\n # a few typical analysis datasets\n # (just doing 3, actual status quo is just shy of 10)\n # and also the real goal -> meta analysis\n metaanalysis = public.create('metaanalysis', description=\"analysis of analyses\")\n for i in range(1, 3):\n ana = public.create('analysis{}'.format(i),\n description='analysis{}'.format(i))\n ana.clone(source=annot.path, path=opj('src', 'annot'), reckless='auto')\n ana.clone(source=aligned.path, path=opj('src', 'aligned'), reckless='auto')\n ana.clone(source=tnt.path, path=opj('src', 'tnt'), reckless='auto')\n # link to metaanalysis\n metaanalysis.clone(source=ana.path, path=opj('src', 'ana{}'.format(i)),\n reckless='auto')\n # simulate change in an input (but not raw) dataset\n create_tree(\n aligned.path,\n {'modification{}.txt'.format(i): 'unique{}'.format(i)})\n aligned.save()\n # finally aggregate data\n aggregate = public.create('aggregate', description='aggregate data')\n aggregate.clone(source=aligned.path, path=opj('src', 'aligned'), reckless='auto')\n # the toplevel dataset is intentionally left dirty, to reflect the\n # most likely condition for the joint dataset to be in at any given\n # point in time", "def orthographic_matrix(self) -> TransformationMatrixType:\n near, far = self._clipping[self.projection_mode.value]\n return orthographic_matrix(self.fov, self.aspect_ratio, near, far)", "def __init__(self, parent):\n super(Demo4, self).__init__(parent)\n self.scenes = []\n self.draw_axes = True\n self.lighting = True\n self.current_scene = 0\n self.objects = []\n self.diffuse_light = [0.8, 0.8, 0.8, 1]", "def __init__(self):\n self.name = \"Schaffer\"\n objectives = [o_sh_1, o_sh_2]\n decisions = [Decision(-10 ** 5, 10 ** 5)]\n Model.__init__(self, objectives, None, decisions)", "def __createAuthoredObjFromUserPubElement(publication_element, researcher_object):\r\n #description\r\n #++++++PUBLICATION LITE++++++\r\n #check publication Element\r\n if publication_element is None:\r\n return \r\n #publication guid\r\n if publication_element is not None:\r\n guid = publication_element.get('id','')\r\n if guid == '':\r\n return\r\n #load Publication from db or create (flagged as needing refetch from symplectic) if doesnt exist\r\n publication_object = Publication.getOrCreatePublication(guid) \r\n #++++++BIBLIOGRAPHICRECORD LITE++++++\r\n #bibliographic-record element -> publication sub element (used to read XML)\r\n if publication_element is not None:\r\n #only ONE biblio element per publication will be returned when querying by User_id\r\n # this is in contrast to the multiple biblios elements per publication returned when querying by a Publication_guid\r\n biblio_element = publication_element.find(SYMPLECTIC_NAMESPACE + 'bibliographic-record')\r\n #biblio data-source\r\n if biblio_element is not None:\r\n data_source = biblio_element.get('data-source','')\r\n #load BibliographicRecord from db or create if doesnt exist (NB links biblio & publication)\r\n biblio_object = BibliographicRecord.getOrCreateBibliographicRecord(publication_object, data_source) \r\n #++++++AUTHORED++++++\r\n #authored preferences -> publication sub-elements (used to read XML)\r\n if publication_element is not None:\r\n preferences_element = publication_element.find(SYMPLECTIC_NAMESPACE + 'preferences-for-this-publication')\r\n #load Authored from db or create if doesnt exist (NB links authored & publication & researcher & bibliographic-record)\r\n authored_object = Authored.getOrCreateAuthored(publication_object, researcher_object, biblio_object)\r\n #preferences\r\n if preferences_element is not None:\r\n #Show this publication\r\n if preferences_element.get('visible','false') == 'true':\r\n authored_object.visible = True\r\n else:\r\n authored_object.visible = False\r\n #Favourite publication\r\n if preferences_element.get('is-a-favourite','false') == 'true':\r\n authored_object.is_a_favourite = True\r\n else:\r\n authored_object.is_a_favourite = False\r\n #Display order\r\n authored_object.reverse_sort_cue = preferences_element.get('reverse-sort-cue','')\r\n #Save Authored\r\n authored_object.save()", "def analysis(self):\r\n return analysis.Analysis(self.parent, self.object_id)", "def _make(self):\n\t\tself.scene.camera = self.camera\n\t\tself.camera.rotation_euler[0] = np.radians(np.random.randint(40, 100) +\n\t\t np.random.random())\n\t\tself.camera.rotation_euler[2] = np.radians(np.random.randint(0, 360) +\n\t\t np.random.random())\n\t\tprint([np.degrees(x) for x in self.camera.rotation_euler])", "def visible_objects_and_duplis():\n\n for obj in context.visible_objects:\n if obj.type == 'MESH':\n if obj.modeling_cloth: \n yield (obj, obj.matrix_world.copy())", "def visible_objects_and_duplis():\n\n for obj in context.visible_objects:\n if obj.type == 'MESH':\n if obj.modeling_cloth: \n yield (obj, obj.matrix_world.copy())", "def test_to_from_scene(self): # pragma: lpy\n super(TestObjDict, self).test_to_from_scene(_as_obj=True)", "def associated_coroot(self):", "def __init__(self, before, after):\r\n self.M = cv2.getPerspectiveTransform(before, after)\r\n self.inverse_M = cv2.getPerspectiveTransform(after, before)", "def topol (self):\n\n # child.connect(parent, parent_end, {child_start=0})\n # Distal (apical)\n self.dends['apical_trunk'].connect(self.soma, 1, 0)\n self.dends['apical_1'].connect(self.dends['apical_trunk'], 1, 0)\n self.dends['apical_2'].connect(self.dends['apical_1'], 1, 0)\n self.dends['apical_tuft'].connect(self.dends['apical_2'], 1, 0)\n\n # apical_oblique comes off distal end of apical_trunk\n self.dends['apical_oblique'].connect(self.dends['apical_trunk'], 1, 0)\n\n # Proximal (basal)\n self.dends['basal_1'].connect(self.soma, 0, 0)\n self.dends['basal_2'].connect(self.dends['basal_1'], 1, 0)\n self.dends['basal_3'].connect(self.dends['basal_1'], 1, 0)\n\n self.basic_shape() # translated from original hoc (2009 model)", "def qa_tractography(stream_path,qa_out_path,brain_path):\n \n #Use window to visualize the streamlines\n r = window.renderer()\n \n #Load the streamline.trk file\n streamlines_mni_load = nib.streamlines.load(stream_path).streamlines\n streamlines_mni_in = Streamlines(streamlines_mni_load)\n streamlines_actor = actor.line(\n streamlines_mni_in,\n colormap.line_colors(streamlines_mni_in),\n lod_points=10000,\n depth_cue=True,\n linewidth=0.2,\n fake_tube=True,\n opacity=0.3,\n )\n \n r.add(streamlines_actor)\n #window.show(r)\n \n showmng = window.ShowManager(r)\n #window.record function can rotate the 3D-image, then get the snapshot of the specific angle. \n window.record(r,cam_pos = (70.03, 64.97, 269.80), cam_view = (0,1,0),path_numbering= True,out_path = qa_out_path + '/Rotate_Z_axis_', az_ang=120,n_frames=3,reset_camera=True,size=(600,600))\n window.record(r,cam_pos = (70.03, 64.97, 269.80), cam_view = (1,0,0),path_numbering= True,out_path = qa_out_path + '/Rotate_Y_axis_', az_ang=120,n_frames=3,reset_camera=True,size=(600,600))\n window.record(r,cam_pos = (70.03, 64.97, 269.80), cam_view = (0,0,1),path_numbering= True,out_path = qa_out_path + '/Rotate_X_axis_', az_ang=120,n_frames=3,reset_camera=True,size=(600,600))\n showmng.exit()\n combine_plot(qa_out_path,brain_path)", "def create_antipodal_grasp_set(object):\n gripper_model = burg.gripper.Robotiq2F85()\n\n ags = burg.sampling.AntipodalGraspSampler()\n ags.only_grasp_from_above = True\n ags.mesh = object.object_type.mesh\n ags.gripper = gripper_model\n ags.n_orientations = 10\n ags.verbose = True\n ags.max_targets_per_ref_point = 2\n ags.no_contact_below_z = 0.01\n graspset, contacts, normals, approach_vectors = ags.sample(100)\n # gs.scores = ags.check_collisions(gs, use_width=False) # need to install python-fcl\n print('contacts.shape', contacts.shape)\n #burg.visualization.show_grasp_set([ags.mesh], graspset, gripper=gripper_model, use_width=False,\n # score_color_func=lambda s: [s, 1-s, 0], with_plane=True)\n \n \"\"\" print(\"graspset :\", len(graspset))\n print(\"contacts :\", contacts.shape)\n print(\"normales :\", normals.shape)\n print(\"angles :\", angles.shape) \"\"\"\n\n return graspset, contacts, normals, approach_vectors", "def get_standardarticle(self):\n return self._standardarticle", "def topdia(x):\r\n return Feature(x, \"TopDia\")", "def visible_objects_and_duplis():\r\n \r\n for obj in context.visible_objects:\r\n if obj.type == 'MESH':\r\n yield (obj, obj.matrix_world.copy())\r\n \r\n if obj.dupli_type != 'NONE':\r\n obj.dupli_list_create(scene)\r\n for dob in obj.dupli_list:\r\n obj_dupli = dob.object\r\n if obj_dupli.type == 'MESH':\r\n yield (obj_dupli, dob.matrix.copy())\r\n \r\n obj.dupli_list_clear()", "def __init__(self):\n self.name = \"Osyczka\"\n objectives = [ob_os_1, ob_os_2]\n constraints = [con_os_1, con_os_2, con_os_3, con_os_4, con_os_5, con_os_6]\n decisions = [Decision(0, 10), Decision(0, 10), Decision(1, 5), Decision(0, 6), Decision(1, 5), Decision(0, 10)]\n Model.__init__(self, objectives, constraints, decisions)", "def create_observation(self):", "def create_observation(self):", "def build_dom():\n glass_thickness = 10 #mm\n size = 100 #mm\n # outside of the glass envelope\n outside_mesh = make.sphere(size)\n # inside of the glass envelope\n inside_mesh = make.sphere(size-glass_thickness)\n\n # outside solid with ice on the outside, and glass on the inside\n outside_solid = Solid(outside_mesh,glass,ice) \n\n inside_surface = r7081hqe_photocathode\n inside_color = 0x00ff00\n\n # construct the inside solid\n inside_solid = Solid(inside_mesh,vacuum,glass,surface=inside_surface,\n color=inside_color)\n\n # you can add solids and meshes!\n return outside_solid + inside_solid", "def __init__(self):\n self.opening_scene = DungeonGate()\n # this list define the order of scenes in the corridor\n self.corridor_scenes = [GuardsRoom(), Cell(), Armory(), EmptyRoom(), Dormitory()]\n shuffle(self.corridor_scenes)\n self.explored_scenes = {\n \"GuardsRoom\": \"unexplored\",\n \"Cell\": \"unexplored\",\n \"Dormitory\": \"unexplored\",\n \"Armory\": \"unexplored\",\n \"EmptyRoom\": \"unexplored\",\n \"DungeonGate\": \"unexplored\"\n }", "def mortality(self):\n pass", "def output_projection(self):\n return self.projection(what='output')", "def getCriticStation(analyzer):\n mayIn = model.getRankMay(analyzer,\"in\")\n mayOut=model.getRankMay(analyzer,\"out\")\n less=model.getRankMen(analyzer,\"LessPopular\")\n return (mayIn,mayOut,less)", "def getOntologyItem(self, resource, oType=0):\n\n if isinstance(resource, int):\n resource = 'ontology/{0}/{1}'.format(resource, oType)\n\n res = self.getRequest(resource)\n onto = vsdModels.Ontology(**res)\n\n return onto", "def lro(self) -> global___Snippet.Lro:", "def view(self):\n view = self.empty_like()\n # Note that this is a shallow copy.\n view.sects = self.sects.copy()\n return view", "def asr_model():\n\n asr_model = EncoderDecoderASR.from_hparams(\n source=\"speechbrain/asr-transformer-transformerlm-librispeech\"\n )\n return asr_model", "def getRecord(self):\n logger.debug(\"Entering in ocentricWKT.getRecord\")\n\n\n record = {\n 'type':'OCENTRIC', \n OcentricMetadata.GEO_GCS_NAME.value: self.getGeoGcsName(),\n OcentricMetadata.DATUM_NAME.value: self.getDatumName(),\n OcentricMetadata.ELLIPSOIDE_NAME.value: self.getSpheroidName(),\n OcentricMetadata.RADIUS.value: self.getRadius(),\n OcentricMetadata.INVERSE_FLATTENING.value: self.getInverseFlattening(),\n OcentricMetadata.AUTHORITY_NAME.value: self.getAuthorityName(),\n OcentricMetadata.AUTHORITY_CODE.value: self.getAuthorityCode(), \n OcentricMetadata.LONGITUDE_ORDER.value: \"East\"\n }\n\n logger.debug(\"Exiting from ocentricWKT.getRecord\")\n return record", "def to_povray(vis,world,properties={}):\n #patch on vapory\n patch_vapory()\n \n #camera\n mat=vis.view.camera.matrix()\n pos=mat[1]\n right=mat[0][0:3]\n up=mat[0][3:6]\n dir=op.mul(mat[0][6:9],-1)\n tgt=op.add(mat[1],dir)\n #scale\n fovy=vis.view.fov*vis.view.h/vis.view.w\n fovx=math.atan(vis.view.w*math.tan(fovy*math.pi/360.)/vis.view.h)*360./math.pi\n right=op.mul(right,-float(vis.view.w)/vis.view.h)\n #camera\n camera_params=['orthographic' if vis.view.orthogonal else 'perspective',\n 'location',[pos[0],pos[1],pos[2]],\n 'look_at',[tgt[0],tgt[1],tgt[2]],\n 'right',[right[0],right[1],right[2]],\n 'up',[up[0],up[1],up[2]],\n 'angle',fovx,\n 'sky',get_property(properties,[],\"sky\",[0.,0.,1.])]\n camera=vp.Camera(*camera_params)\n \n #tempfile\n tempfile=get_property(properties,[],\"tempfile\",None)\n tempfile_path=os.path.dirname(tempfile) if tempfile is not None else '.'\n if not os.path.exists(tempfile_path):\n os.mkdir(tempfile_path)\n \n #objects\n objects=[]\n objs=[o for o in properties[\"visualObjects\"]] if \"visualObjects\" in properties else []\n objs+=[world.terrain(i) for i in range(world.numTerrains())]\n objs+=[world.rigidObject(i) for i in range(world.numRigidObjects())]\n for r in range(world.numRobots()):\n objs+=[world.robot(r).link(i) for i in range(world.robot(r).numLinks())]\n for obj in objs:\n transient=get_property(properties,[obj],\"transient\",default=True)\n if transient:\n objects+=geometry_to_povray(obj.appearance(),obj.geometry(),obj,None,properties=properties)\n else: \n path=tempfile_path+'/'+obj.getName()+'.pov'\n if not os.path.exists(path):\n R,t=obj.geometry().getCurrentTransform()\n obj.geometry().setCurrentTransform([1,0,0,0,1,0,0,0,1],[0,0,0])\n geom=geometry_to_povray(obj.appearance(),obj.geometry(),obj,None,properties=properties)\n if len(geom)>1:\n file_content=vp.Union(*geom)\n elif len(geom)>0: \n file_content=vp.Object(*geom)\n else: file_content=None\n if file_content is not None:\n f=open(path,'w')\n f.write(str(file_content))\n f.close()\n obj.geometry().setCurrentTransform(R,t)\n else: path=None\n #include \n if path is not None:\n R,t=obj.geometry().getCurrentTransform()\n objects.append(vp.Object('#include \"%s\"'%path,\"matrix\",R+t))\n \n #light\n if \"lights\" in properties:\n objects+=properties[\"lights\"]\n \n #scene\n gsettings=[]\n scene=vp.Scene(camera=camera,\n objects=objects,\n included=get_property(properties,[],\"included\",[]),\n global_settings=get_property(properties,[],\"global_settings\",[]))\n try:\n #this works with later version of vapory\n return \\\n render_povstring(str(scene), \\\n outfile=get_property(properties,[],\"outfile\",None), \\\n width=vis.view.w,height=vis.view.h, \\\n quality=get_property(properties,[],\"quality\",None), \\\n antialiasing=get_property(properties,[],\"antialiasing\",0.3), \\\n remove_temp=get_property(properties,[],\"remove_temp\",False), \\\n show_window=get_property(properties,[],\"show_window\",False), \\\n tempfile=tempfile, \\\n includedirs=get_property(properties,[],\"includedirs\",None), \\\n output_alpha=get_property(properties,[],\"output_alpha\",True))\n except:\n #this works with earlier version of vapory\n return \\\n render_povstring(str(scene), \\\n outfile=get_property(properties,[],\"outfile\",None), \\\n width=vis.view.w,height=vis.view.h, \\\n quality=get_property(properties,[],\"quality\",None), \\\n antialiasing=get_property(properties,[],\"antialiasing\",0.3), \\\n remove_temp=get_property(properties,[],\"remove_temp\",False))", "def __init__(self):\n self.rho=[]\n self.te=[]\n self.ti=[]\n self.ne=[]\n self.ni=[]\n self.ni1=[]\n self.ni2=[]\n self.ni3=[]\n self.vtor=[]\n self.zeff=[]\n\n self.nion=1\n self.Z=[]\n self.A=[]\n self.coll_mode=[]", "def setup_orographic_enhancement_cube():\n data = np.array([[1, 1, 1], [0, 1, 0], [0, 0, 0], [0, 0, 0]], dtype=np.float32)\n orographic_enhancement_cube = set_up_variable_cube(\n data, name=\"orographic_enhancement\", units=\"mm/hr\", spatial_grid=\"equalarea\"\n )\n return orographic_enhancement_cube", "def orthonormation_method(standardized_methods_cleaned):\n method_standardized_ortho = standardized_methods_cleaned.copy(deep=True)\n\n categories = method_standardized_ortho.columns.tolist()\n\n # Normation of the first category\n method_standardized_ortho[categories[0]] = method_standardized_ortho[categories[0]] / \\\n linalg.norm(method_standardized_ortho[categories[0]])\n\n # Normation of every following categories\n j = 0\n while j < len(categories):\n i = 0\n while i < j:\n # Calculates the orthogonal projection of j on each i and substraction of the projection from j\n method_standardized_ortho[categories[j]] = \\\n method_standardized_ortho[categories[j]] - method_standardized_ortho[categories[i]] * (\n sum(method_standardized_ortho[categories[i]] * method_standardized_ortho[categories[j]]) /\n sum(method_standardized_ortho[categories[i]] * method_standardized_ortho[categories[i]]))\n if linalg.norm(method_standardized_ortho[categories[j]]) == 0:\n # If after the projection, if the j columns is null it is droped (i.e it is linearly dependant with\n # the other columns) and the inner loop stops\n method_standardized_ortho.drop(method_standardized_ortho.columns[j], inplace=True, axis=1)\n categories.remove(categories[j])\n\n break\n else:\n # If the j column is not null, it is normed and the inner while loop keeps going\n method_standardized_ortho[categories[j]] = method_standardized_ortho[categories[j]] / \\\n (linalg.norm(method_standardized_ortho[categories[j]]))\n i += 1\n j += 1\n\n return method_standardized_ortho", "def _get_ortho_path(self, **kwargs) -> Union[CloudPath, Path]:\n\n if self.product_type in self._proj_prod_type:\n ortho_name = f\"{self.condensed_name}_ortho.tif\"\n\n ortho_path, ortho_exists = self._get_out_path(ortho_name)\n if not ortho_exists:\n LOGGER.info(\n \"Manually orthorectified stack not given by the user. \"\n \"Reprojecting whole stack, this may take a while. \"\n \"(May be inaccurate on steep terrain, depending on the DEM resolution)\"\n )\n\n # Reproject and write on disk data\n dem_path = self._get_dem_path(**kwargs)\n with rasterio.open(str(self._get_tile_path(**kwargs))) as src:\n\n out_arr, meta = self._reproject(\n src.read(), src.meta, src.rpcs, dem_path, **kwargs\n )\n rasters_rio.write(out_arr, meta, ortho_path)\n\n else:\n ortho_path = self._get_tile_path(**kwargs)\n\n return ortho_path", "def _get_viewer_container(self):\n self.viewer = self.traj._tempmol.draw3d(style='licorice')\n return self.viewer", "def spatial(self):", "def perspective(self):\n return self.container['perspective']", "def model_cohort(cohort):\n model_traj = []\n for part in cohort:\n for traj in part.trajectories:\n if traj.filter is True:\n model_traj.append(\n modelling.model(x=traj.data.age.tolist(),\n y=traj.data.AF.tolist(),\n mutation=traj.mutation,\n variant_class=traj.variant_class,\n gene=traj.mutation.split()[0],\n id=part.id,\n p_key=traj.p_key))\n return model_traj", "def get_contenu(self):\n return self.objets", "def __init__(self, por): \n logger.debug(\"Entering in ocentricWKT constructor\") \n super(OcentricWKT, self).__init__(\n por.getElement(OcentricMetadata.GEO_GCS_NAME),\n por.getElement(OcentricMetadata.DATUM_NAME),\n por.getElement(OcentricMetadata.ELLIPSOIDE_NAME),\n por.getElement(OcentricMetadata.RADIUS),\n por.getElement(OcentricMetadata.INVERSE_FLATTENING),\n por.getElement(OcentricMetadata.AUTHORITY_NAME),\n por.getElement(OcentricMetadata.AUTHORITY_CODE)\n ) \n logger.debug(\"Exiting from ocentricWKT constructor\")", "def get_study_by_pmid(cls, pmid):\n\n print \"Found study \", pmid\n study_obj = cls.query.filter(cls.pmid == pmid).first()\n return study_obj", "def Omat(self):\n if self.standard:\n return np.matrix(((0, -1, 0), (0, 0, 1), (-1, 0, 0)))\n else:\n return np.matrix(((0, 0, 1), (0, 1, 0), (-1, 0, 0)))", "def construct_data_LHS(model,num_pts,seed=None):\n num_dims = model.num_dims\n rv_trans = define_random_variable_transformation_hydromad(model)\n pts = latin_hypercube_design( num_pts, num_dims, seed )\n # returns points on [0,1] but need pts on [-1,1]\n pts = 2*pts-1.\n pts = rv_trans.map_from_canonical_distributions( pts )\n vals = model.evaluate_set( pts )\n numpy.savetxt( 'pts.txt', pts, delimiter = ',' )\n numpy.savetxt( 'vals.txt', vals, delimiter = ',' )", "def __init__(self, settings, conflict):\n #generates the initial setting\n self.start_setting = random.choice(settings)\n self.current_setting = self.start_setting\n\n #gets a random character from possible setting characters as protagonist\n self.protag = random.choice(self.start_setting.characters)\n\n #selects as antagonist a random character that either the protagonist knows or is related\n #to the setting\n antaglist = list(set(self.protag.characters).union(self.start_setting.characters))\n self.antag = random.choice(antaglist)\n\n self.antag.set_protagonist(self.protag)\n self.protag.set_antagonist(self.antag)\n\n #lays out the basic story relations\n self.subsections = []\n\n for relation_outline in conflict.conflict_outline: \n subsection = Subsection() \n subsection.init_from_outline(relation_outline, self.protag, self.current_setting)\n self.subsections.append(subsection)\n\n #connects the rest of the story together\n self.details = []", "def __init__(self):\n self.name = \"Kursawe\"\n objectives = [o_ku_1, o_ku_2]\n decisions = [Decision(-5, 5), Decision(-5, 5), Decision(-5, 5)]\n Model.__init__(self, objectives, None, decisions)", "def objects(self):", "def get_representation(self, reference):\n \n return Representation(data=self.get_data(reference), name=reference)", "def clonar(self):\n return Tortuga(self.posicion, self.orientacion, self.pluma.clonar())", "def make_multi_object_scene(self):\n multi1 = Scene3D()\n box = self.objects[0]\n box.set_location(1, 0, 0)\n box.set_size(0.4, 0.4, 0.1)\n multi1.add_object(box)\n\n box = self.objects[1]\n box.set_location(-1, 0, 0)\n multi1.add_object(box)\n\n self.scenes.append(multi1)" ]
[ "0.6370708", "0.5323299", "0.51920646", "0.5153776", "0.5107432", "0.5070821", "0.5055284", "0.49911162", "0.49496976", "0.49446896", "0.49231794", "0.49001023", "0.4827991", "0.48052597", "0.4804953", "0.47864586", "0.4774238", "0.47692737", "0.4762107", "0.4743235", "0.47330493", "0.47329876", "0.47263598", "0.47207245", "0.471975", "0.47196102", "0.4711031", "0.47081006", "0.4705919", "0.4705121", "0.46896866", "0.46767405", "0.467562", "0.467562", "0.467562", "0.467562", "0.467562", "0.4668627", "0.46669814", "0.46645162", "0.46600243", "0.46508408", "0.46411917", "0.46197522", "0.46158427", "0.4600766", "0.4581928", "0.45757353", "0.45709148", "0.4558139", "0.4552361", "0.45523325", "0.45490208", "0.45481914", "0.45446768", "0.4542857", "0.45420247", "0.45420247", "0.4538394", "0.45294303", "0.45262632", "0.4512127", "0.45084658", "0.4507053", "0.45067167", "0.45038936", "0.45034894", "0.45025218", "0.45025107", "0.45025107", "0.45011085", "0.4496832", "0.44852245", "0.44844362", "0.4483251", "0.44815874", "0.4478691", "0.44748443", "0.4474844", "0.44666067", "0.4463685", "0.44575936", "0.44568217", "0.44553483", "0.44490966", "0.4446391", "0.44451025", "0.44448847", "0.44425008", "0.44392443", "0.44297513", "0.44254318", "0.4425175", "0.4418159", "0.44138345", "0.44079903", "0.4402327", "0.44020832", "0.44019854", "0.43975428" ]
0.7259197
0
return orthanc objects of studies
def orthanc_studies(self): return [orthanc.study(x.orthanc_id) for x in self.studies]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self):\n return orthanc.study(self.orthanc_id)", "def studies(self):\n return self._study_queryset", "def DumpStudies():\n for name in myStudyManager.GetOpenStudies():\n s=myStudyManager.GetStudyByName(name)\n print \"study:\",name, s._get_StudyId()\n DumpStudy(s)", "def search_research_studies_with_observations():\n return ResearchStudy.where(struct={}).include('focus', Observation, reverse=True)", "def objects_rst(self):\n return [_.as_rst for _ in self.objects]", "def visible_objects_and_duplis():\n\n for obj in context.visible_objects:\n if obj.type == 'MESH':\n if obj.modeling_cloth: \n yield (obj, obj.matrix_world.copy())", "def visible_objects_and_duplis():\n\n for obj in context.visible_objects:\n if obj.type == 'MESH':\n if obj.modeling_cloth: \n yield (obj, obj.matrix_world.copy())", "def visible_objects_and_duplis():\r\n \r\n for obj in context.visible_objects:\r\n if obj.type == 'MESH':\r\n yield (obj, obj.matrix_world.copy())\r\n \r\n if obj.dupli_type != 'NONE':\r\n obj.dupli_list_create(scene)\r\n for dob in obj.dupli_list:\r\n obj_dupli = dob.object\r\n if obj_dupli.type == 'MESH':\r\n yield (obj_dupli, dob.matrix.copy())\r\n \r\n obj.dupli_list_clear()", "def anvil_research_studies_with_observations(anvil_server, search_research_studies_with_observations):\n return search_research_studies_with_observations.perform_resources(anvil_server.server)", "def objects(self):", "def get_administerable_studies_by_name():\n researcher_admin = get_session_researcher()\n if researcher_admin.site_admin:\n studies = Study.get_all_studies_by_name()\n else:\n studies = researcher_admin.get_administered_studies_by_name()\n return studies", "def get_queryset(self):\n return Objective.objects.order_by('perspective')", "def model_cohort(cohort):\n model_traj = []\n for part in cohort:\n for traj in part.trajectories:\n if traj.filter is True:\n model_traj.append(\n modelling.model(x=traj.data.age.tolist(),\n y=traj.data.AF.tolist(),\n mutation=traj.mutation,\n variant_class=traj.variant_class,\n gene=traj.mutation.split()[0],\n id=part.id,\n p_key=traj.p_key))\n return model_traj", "def visible_objects_and_duplis():\n\n for obj in context.visible_objects:\n if obj.type == 'MESH':\n yield (obj, obj.matrix_world.copy())\n\n if obj.instance_type != 'NONE':\n obj.dupli_list_create(scene)\n for dob in obj.dupli_list:\n obj_dupli = dob.object\n if obj_dupli.type == 'MESH':\n yield (obj_dupli, dob.matrix.copy())\n\n obj.dupli_list_clear()", "def get_objectives(self):\n return copy.deepcopy(self.objectives), self.gates_names", "def objets_uniques(self):\n objets = []\n for membre in self.membres:\n for objet in membre.equipe:\n if objet.unique:\n objets.append(objet)\n objets.extend(objet.prototype.objets_contenus(objet))\n if membre.tenu and membre.tenu.unique:\n objet = membre.tenu\n objets.append(objet)\n objets.extend(objet.prototype.objets_contenus(objet))\n\n return objets", "def all_research_studies_with_observations(aggregated_server, search_research_studies_with_observations):\n return search_research_studies_with_observations.perform_resources(aggregated_server.server)", "def composeWorkplaceOntology():\n\n import ossPyFuncs \n import pandas as pd\n \n #mysql query to extract full table from government organizations\n #certian table columns feature capital letters which cases uproblems\n postgreSql_selectQuery=\"SELECT * FROM us_gov_manual.us_govman_2019 ;\"\n #pass querry and obtain table\n govTable=ossPyFuncs.queryToPDTable(postgreSql_selectQuery)\n\n #mysql query to obtain academic instutions\n postgreSql_selectQuery=\"SELECT institution FROM hipolabs.universities ;\"\n #pass querry and obtain table\n univTable=ossPyFuncs.queryToPDTable(postgreSql_selectQuery)\n \n postgreSql_selectQuery=\"SELECT company FROM forbes.fortune2018_us1000;\"\n businesses1=ossPyFuncs.queryToPDTable(postgreSql_selectQuery)\n \n postgreSql_selectQuery=\"SELECT company FROM forbes.fortune2019_us1000;\"\n businesses2=ossPyFuncs.queryToPDTable(postgreSql_selectQuery)\n \n postgreSql_selectQuery=\"SELECT company FROM forbes.fortune2020_global2000;\"\n businesses3=ossPyFuncs.queryToPDTable(postgreSql_selectQuery)\n\n #combine theinsitutions into a vector\n combinedSeries=[govTable['AgencyName'],univTable['institution'],businesses1['company'],businesses2['company'],businesses3['company']]\n #turn the multi item vector into a single series\n fullWordbank=pd.concat(combinedSeries)\n #turn that series into a pd dataframe\n wordbankTable=pd.DataFrame(fullWordbank.unique())\n\n return wordbankTable", "def phonology(request):\n\n perspective_cid = request.params.get('perspective_client_id')\n perspective_oid = request.params.get('perspective_object_id')\n\n # Checking if we have limits on number of computed results.\n\n limit = (None if 'limit' not in request.params else\n int(request.params.get('limit')))\n\n limit_exception = (None if 'limit_exception' not in request.params else\n int(request.params.get('limit_exception')))\n\n limit_no_vowel = (None if 'limit_no_vowel' not in request.params else\n int(request.params.get('limit_no_vowel')))\n\n limit_result = (None if 'limit_result' not in request.params else\n int(request.params.get('limit_result')))\n\n # TODO: get perspective's translation and language it belongs to.\n\n # We get lexical entries of this perspective with markup'ed sounds.\n\n Sound = aliased(Entity, name = \"Sound\")\n PublishingSound = aliased(PublishingEntity, name = \"PublishingSound\")\n\n query = DBSession.query(LexicalEntry, Entity, Sound, PublishingEntity, PublishingSound).filter(and_(\n LexicalEntry.parent_client_id == perspective_cid,\n LexicalEntry.parent_object_id == perspective_oid,\n LexicalEntry.marked_for_deletion == False,\n Entity.parent_client_id == LexicalEntry.client_id,\n Entity.parent_object_id == LexicalEntry.object_id,\n Entity.marked_for_deletion == False,\n Entity.additional_metadata.contains({\"data_type\": \"praat markup\"}),\n PublishingEntity.client_id == Entity.client_id,\n PublishingEntity.object_id == Entity.object_id,\n PublishingEntity.published == True,\n PublishingEntity.accepted == True,\n Sound.client_id == Entity.self_client_id,\n Sound.object_id == Entity.self_object_id,\n Sound.marked_for_deletion == False,\n PublishingSound.client_id == Sound.client_id,\n PublishingSound.object_id == Sound.object_id,\n PublishingSound.published == True,\n PublishingSound.accepted == True))\n\n # We process these lexical entries in batches. Just in case, it seems that perspectives rarely have more\n # then several hundred such lexical entries.\n\n exception_counter = 0\n no_vowel_counter = 0\n result_list = list()\n\n for index, row in enumerate(query.yield_per(100)):\n\n markup_url = row.Entity.content\n sound_url = row.Sound.content\n\n cache_key = 'phonology:{0}:{1}:{2}:{3}'.format(\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id)\n\n # Checking if we have cached result for this pair of sound/markup.\n\n cache_result = CACHE.get(cache_key)\n\n if cache_result == 'no_vowel':\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}) '\n '[CACHE {7}]: no vowels\\n{8}\\n{9}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n cache_key, markup_url, sound_url))\n\n no_vowel_counter += 1\n\n if (limit_no_vowel and no_vowel_counter >= limit_no_vowel or\n limit and index + 1 >= limit):\n break\n\n continue\n\n # If we have cached exception, we do the same as with absence of vowels, show its info and\n # continue.\n\n elif isinstance(cache_result, tuple) and cache_result[0] == 'exception':\n exception, traceback_string = cache_result[1:3]\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n '[CACHE {7}]: exception\\n{8}\\n{9}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n cache_key, markup_url, sound_url))\n\n log.debug(traceback_string)\n\n exception_counter += 1\n\n if (limit_exception and exception_counter >= limit_exception or\n limit and index + 1 >= limit):\n break\n\n continue\n\n # If we actually have the result, we use it and continue.\n\n elif cache_result:\n\n result_string = '\\n'.join(\n 'tier {0} \\'{1}\\': {2}'.format(tier_number, tier_name,\n \n tier_result_seq_list if not isinstance(tier_result_seq_list, list) else\n tier_result_seq_list[0] if len(tier_result_seq_list) <= 1 else\n ''.join('\\n {0}'.format(tier_result) for tier_result in tier_result_seq_list))\n\n for tier_number, tier_name, tier_result_seq_list in cache_result)\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}) '\n '[CACHE {7}]:\\n{8}\\n{9}\\n{10}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n cache_key, markup_url, sound_url, result_string))\n\n result_list.append(cache_result)\n\n if (limit_result and len(result_list) >= limit_result or\n limit and index + 1 >= limit):\n break\n\n continue\n\n try:\n # Getting markup, checking for each tier if it needs to be processed.\n\n markup_bytes = urllib.request.urlopen(urllib.parse.quote(markup_url, safe = '/:')).read()\n\n textgrid = pympi.Praat.TextGrid(xmax = 0)\n textgrid.from_file(\n io.BytesIO(markup_bytes),\n codec = chardet.detect(markup_bytes)['encoding'])\n\n tier_data_list = []\n vowel_flag = False\n\n for tier_number, tier_name in textgrid.get_tier_name_num():\n\n raw_interval_list = textgrid.get_tier(tier_number).get_all_intervals()\n raw_interval_seq_list = [[]]\n\n # Splitting interval sequence on empty intervals.\n\n for raw_index, interval in enumerate(raw_interval_list):\n\n if len(interval[2].strip()) <= 0:\n if len(raw_interval_seq_list[-1]) > 0:\n raw_interval_seq_list.append([])\n\n else:\n raw_interval_seq_list[-1].append((raw_index, interval))\n\n if len(raw_interval_seq_list[-1]) <= 0:\n del raw_interval_seq_list[-1]\n\n # Selecting interval sequences for analysis, checking if we have unusual markup.\n \n interval_seq_list = []\n interval_idx_to_raw_idx = dict()\n\n unusual_markup_flag = False\n unusual_markup_list = []\n\n for raw_interval_seq in raw_interval_seq_list:\n\n interval_seq_list.append([])\n interval_idx_to_raw_idx[len(interval_seq_list) - 1] = {}\n\n for partial_raw_index, (raw_index, interval) in enumerate(raw_interval_seq):\n\n interval_text = interval[2].strip()\n\n # Accepting interval if its text contains at least one vowel, and is short enough or\n # is a valid phonetic transcription.\n\n transcription_check = re.fullmatch(transcription_re, interval_text)\n\n if (len(interval_text) > 0 and\n any(character in vowel_set for character in interval_text) and\n (len(interval_text) <= 2 or transcription_check)):\n\n interval_seq_list[-1].append(interval)\n\n sequence_index = len(interval_seq_list) - 1\n interval_index = len(interval_seq_list[-1]) - 1\n\n interval_idx_to_raw_idx[(sequence_index, interval_index)] = raw_index\n interval_idx_to_raw_idx[sequence_index][interval_index] = partial_raw_index\n\n # Noting if the interval contains unusual (i.e. non-transcription) markup.\n\n elif not transcription_check:\n\n unusual_markup_flag = True\n unusual_markup_list.append((raw_index, interval))\n\n transcription_list = [text for begin, end, text in raw_interval_list]\n transcription = ''.join(transcription_list)\n\n selected_list = [text\n for interval_list in interval_seq_list\n for begin, end, text in interval_list]\n\n selected = ''.join(selected_list)\n\n # If we have intervals with unusual markup, we report them.\n\n if unusual_markup_flag:\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n 'tier {7} \\'{8}\\' has interval(s) with unusual transcription text: '\n '{9} / {10}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n tier_number, tier_name, transcription, dict(unusual_markup_list)))\n\n # If the markup does not have any vowels, we note it and also report it.\n\n if all(character not in vowel_set for character in transcription):\n\n tier_data_list.append((tier_number, tier_name, 'no_vowel'))\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n 'tier {7} \\'{8}\\' doesn\\'t have any vowel markup: {9}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n tier_number, tier_name, transcription_list))\n\n # It is also possible that while full transcription has vowels, intervals selected for\n # analysis do not. In that case we also note it and report it.\n\n elif not any(character in vowel_set for character in selected):\n\n tier_data_list.append((tier_number, tier_name, 'no_vowel_selected'))\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n 'tier {7} \\'{8}\\' intervals to be processed don\\'t have any vowel markup: '\n 'markup {9}, selected {10}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n tier_number, tier_name,\n transcription_list, selected_list))\n\n # Otherwise we store tier data to be used during processing of the sound file.\n\n else:\n tier_data_list.append((tier_number, tier_name,\n (raw_interval_list, raw_interval_seq_list, interval_seq_list,\n interval_idx_to_raw_idx, transcription)))\n\n vowel_flag = True\n\n # If there are no tiers with vowel markup, we skip this sound-markup file altogether.\n\n if not vowel_flag:\n\n CACHE.set(cache_key, 'no_vowel')\n no_vowel_counter += 1\n\n if (limit_no_vowel and no_vowel_counter >= limit_no_vowel or\n limit and index + 1 >= limit):\n break\n\n continue\n\n # Otherwise we retrieve the sound file and analyse each vowel-containing markup.\n # Partially inspired by source code at scripts/convert_five_tiers.py:307.\n\n sound = None\n with tempfile.NamedTemporaryFile() as temp_file:\n\n sound_file = urllib.request.urlopen(urllib.parse.quote(sound_url, safe = '/:'))\n temp_file.write(sound_file.read())\n temp_file.flush()\n\n sound = AudioPraatLike(pydub.AudioSegment.from_wav(temp_file.name))\n\n tier_result_list = []\n\n for tier_number, tier_name, tier_data in tier_data_list:\n\n if tier_data == 'no_vowel' or tier_data == 'no_vowel_selected':\n tier_result_list.append((tier_number, tier_name, tier_data))\n continue\n\n # Analyzing vowel sounds of each interval sequence.\n\n (raw_interval_list, raw_interval_seq_list, interval_seq_list, interval_idx_to_raw_idx,\n transcription) = tier_data\n\n tier_result_list.append((tier_number, tier_name, []))\n\n for seq_index, (raw_interval_list, interval_list) in enumerate(zip(\n raw_interval_seq_list, interval_seq_list)):\n\n if len(interval_list) <= 0:\n continue\n\n (max_intensity_index, max_intensity, max_length_index, max_length) = \\\n find_max_interval_praat(sound, interval_list)\n\n max_intensity_interval = interval_list[max_intensity_index]\n max_length_interval = interval_list[max_length_index]\n\n max_intensity_f1_f2 = sound.get_interval_formants(*max_intensity_interval[:2])\n max_length_f1_f2 = sound.get_interval_formants(*max_length_interval[:2])\n\n # Compiling results.\n\n max_length_str = '{0} {1:.3f} [{2}]'.format(\n max_length_interval[2], max_length,\n len(''.join(text for index, (begin, end, text) in\n raw_interval_list[:interval_idx_to_raw_idx[seq_index][max_length_index]])))\n\n max_intensity_str = '{0} {1:.3f} [{2}]'.format(\n max_intensity_interval[2],\n max_intensity,\n len(''.join(text for index, (begin, end, text) in\n raw_interval_list[:interval_idx_to_raw_idx[seq_index][max_intensity_index]])))\n\n tier_result_list[-1][2].append([\n ''.join(text for index, (begin, end, text) in raw_interval_list),\n max_length_str,\n '{0:.3f}'.format(max_length_f1_f2[0]),\n '{0:.3f}'.format(max_length_f1_f2[1]),\n max_intensity_str,\n '{0:.3f}'.format(max_intensity_f1_f2[0]),\n '{0:.3f}'.format(max_intensity_f1_f2[1]),\n '+' if max_intensity_index == max_length_index else '-'])\n\n # Saving result.\n\n result_list.append(tier_result_list)\n CACHE.set(cache_key, tier_result_list)\n\n result_string = '\\n'.join(\n 'tier {0} \\'{1}\\': {2}'.format(tier_number, tier_name,\n \n tier_result_seq_list if not isinstance(tier_result_seq_list, list) else\n tier_result_seq_list[0] if len(tier_result_seq_list) <= 1 else\n ''.join('\\n {0}'.format(tier_result) for tier_result in tier_result_seq_list))\n\n for tier_number, tier_name, tier_result_seq_list in tier_result_list)\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}):'\n '\\n{7}\\n{8}\\n{9}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n markup_url, sound_url, result_string))\n\n # Stopping earlier, if required.\n\n if (limit_result and len(result_list) >= limit_result or\n limit and index + 1 >= limit):\n break\n\n except Exception as exception:\n\n #\n # NOTE\n #\n # Exceptional situations encountered so far:\n #\n # 1. TextGrid file actually contains sound, and wav file actually contains textgrid markup.\n #\n # Perspective 330/4, LexicalEntry 330/7, sound-Entity 330/2328, markup-Entity 330/6934\n #\n # 2. Markup for one of the intervals contains a newline \"\\n\", and pympi fails to parse it.\n # Praat parses such files without problems.\n #\n # Perspective 330/4, LexicalEntry 330/20, sound-Entity 330/6297, markup-Entity 330/6967\n #\n\n log.debug(\n '{0} (LexicalEntry {1}/{2}, sound-Entity {3}/{4}, markup-Entity {5}/{6}): '\n 'exception\\n{7}\\n{8}'.format(\n index,\n row.LexicalEntry.client_id, row.LexicalEntry.object_id,\n row.Sound.client_id, row.Sound.object_id,\n row.Entity.client_id, row.Entity.object_id,\n markup_url, sound_url))\n\n # if we encountered an exception, we show its info and remember not to try offending\n # sound/markup pair again.\n\n traceback_string = ''.join(traceback.format_exception(\n exception, exception, exception.__traceback__))[:-1]\n\n log.debug(traceback_string)\n\n CACHE.set(cache_key, ('exception', exception,\n traceback_string.replace('Traceback', 'CACHEd traceback')))\n\n exception_counter += 1\n\n if (limit_exception and exception_counter >= limit_exception or\n limit and index + 1 >= limit):\n break\n\n log.debug('phonology {0}/{1}: {2} result{3}, {4} no vowels, {5} exceptions'.format(\n perspective_cid, perspective_oid,\n len(result_list), '' if len(result_list) == 1 else 's',\n no_vowel_counter, exception_counter))\n\n # If we have no results, we indicate the situation and also show number of failures and number of\n # markups with no vowels.\n\n if not result_list:\n request.response.status = HTTPPreconditionFailed.code\n\n return {\n \"error\": \"no markups for this query\",\n \"exception_counter\": exception_counter,\n \"no_vowel_counter\": no_vowel_counter}\n\n # Otherwise we create and then serve Excel file.\n\n excel_book = xlwt.Workbook(encoding = \"utf-8\")\n sheet = excel_book.add_sheet(\"Sheet 1\")\n\n sheet.write(0, 0, 'Transcription')\n sheet.write(0, 1, 'Longest (seconds) interval')\n sheet.write(0, 2, 'F1 (Hz)')\n sheet.write(0, 3, 'F2 (Hz)')\n sheet.write(0, 4, 'Highest intensity (dB) interval')\n sheet.write(0, 5, 'F1 (Hz)')\n sheet.write(0, 6, 'F2 (Hz)')\n sheet.write(0, 7, 'Coincidence')\n\n row_counter = 1\n\n for tier_result_list in result_list:\n for tier_number, tier_name, tier_result_seq_list in tier_result_list:\n\n if tier_result_seq_list == 'no_vowel':\n continue\n\n for tier_data in tier_result_seq_list:\n for index, tier_data_str in enumerate(tier_data):\n sheet.write(row_counter, index, tier_data_str)\n\n row_counter += 1\n\n # Formatting column widths.\n\n sheet.col(0).width = 24 * 256\n sheet.col(1).width = 24 * 256\n sheet.col(2).width = 12 * 256\n sheet.col(3).width = 12 * 256\n sheet.col(4).width = 24 * 256\n sheet.col(5).width = 12 * 256\n sheet.col(6).width = 12 * 256\n sheet.col(7).width = 12 * 256\n\n excel_stream = io.BytesIO()\n excel_book.save(excel_stream)\n excel_stream.seek(0)\n\n # See http://stackoverflow.com/questions/2937465/what-is-correct-content-type-for-excel-files for Excel\n # content-type.\n\n response = Response(content_type = 'application/vnd.ms-excel')\n\n response.app_iter = FileIter(excel_stream)\n response.headers['Content-Disposition'] = \"attachment; filename=phonology.xls\"\n\n return response", "def getHierarchies():", "def getHierarchies():", "def iterate_studies(self, start, end):\n pass", "def get_queryset(self):\n return Initiative.objects.filter(objective__perspective__description='Financial').order_by('objective')", "def _get_all_oshapes(self):\n an_iname = self.node_list[0]\n an_inode = self.builder.nodes[an_iname]\n an_ishape = an_inode.oshapes['loc']\n \n return {'main' : an_ishape,\n 'loc' : an_ishape,\n 'cov' : an_ishape + [an_ishape[-1]]}", "def findAtypicalTerms(self):\n self.atypicalTermsDict = collections.OrderedDict()\n distanceList = list()\n distance = 0\n for key in self.summaryFilteredDict:\n partitionName = str(key).split(\" :\")[0]\n partition = voc.getPartition(partitionName)\n modNames = partition.getModNames()\n currentModality = str(key).split(\": \")[1]\n indexCurrentModality = modNames.index(currentModality)\n coverCurrentModality = self.getCoverFromModalityInDictionnary(self.summaryFilteredDict,partitionName + \" : \" + currentModality) #cover(v',R)\n if coverCurrentModality > 0:\n for modality in partition.getModalities():\n coverModality = self.getCoverFromModalityInDictionnary(self.summaryFilteredDict,partitionName + \" : \" + modality.getName()) # cover(v,R)\n if modality.isTrapeziumModality():\n indexModality = modNames.index(modality.getName())\n distance = abs(indexCurrentModality - indexModality) / (partition.getNbModalities() - 1) #d(v,v')\n elif modality.isEnumModality():\n if (modality.getName() == currentModality):\n distance = 0\n else:\n distance = 1\n distanceList.append(min(distance, 1 - coverCurrentModality, coverModality)) # min(d(v,v'),cover(v,R),1-cover(v',R))\n self.atypicalTermsDict[partitionName + \" : \" + currentModality] = max(distanceList) # D(v',R)\n distanceList = list()", "def all_objects():\n objs = {}\n objs['Section'] = list(h.all_sec())\n objs['Segment'] = []\n for sec in objs['Section']:\n objs['Segment'].extend(list(sec.allseg()))\n objs['PointProcess'] = []\n for seg in objs['Segment']:\n objs['PointProcess'].extend(list(seg.point_processes()))\n \n return objs", "def demo_ortho_slicer():\n pl.clf()\n oslicer = OrthoSlicer(cut_coords=(0, 0, 0))\n from .anat_cache import _AnatCache\n map, affine, _ = _AnatCache.get_anat()\n oslicer.plot_map(map, affine, cmap=pl.cm.gray)\n return oslicer", "def open_iossifov_neuron_cohort():\n logging.info('getting Iossifov et al Neuron 2012 cohort')\n s1 = pandas.read_excel(supp_s1_url, sheet_name='SNV.v4.1-normlized')\n s2 = pandas.read_excel(supp_s2_url, sheet_name='suppLGKTable')\n s3 = pandas.read_excel(supp_s3_url, sheet_name='ID.v4.1-normlized')\n \n fam_ids = list(s1.quadId) + list(s2.quadId) + list(s3.quadId)\n members = list(s1.inChild) + list(s2.inChild) + list(s3.inChild)\n \n sex = ['M', 'F']\n affected = ['aut', 'sib']\n possible = list(itertools.product(affected, sex))\n study = ['10.1016/j.neuron.2012.04.009']\n \n persons = set()\n for fam, children in zip(fam_ids, members):\n for affected, sex in possible:\n string = f'{affected}{sex}'\n if string in children:\n status = ['unaffected'] if affected != 'aut' else ['HP:0000717']\n member = 's1' if affected != 'aut' else 'p1'\n sex = 'female' if sex == 'F' else 'male'\n person_id = f'{fam}.{member}|asd_cohorts'\n \n person = Person(person_id, sex, status, study)\n persons.add(person)\n \n return persons", "def make_openalex_dataset(dataset: ObservatoryDataset) -> List[dict]:\n\n result = []\n for paper in dataset.papers:\n entry = {\n \"id\": str(paper.id),\n \"doi\": f\"https://doi.org/{paper.doi}\",\n \"cited_by_count\": len(paper.cited_by),\n \"concepts\": [\n {\"id\": str(fos.id), \"display_name\": fos.name, \"level\": fos.level} for fos in paper.fields_of_study\n ],\n \"authorships\": [\n {\n \"author\": {\n \"id\": str(author.id),\n \"display_name\": author.name,\n },\n \"institutions\": [\n {\n \"id\": str(author.institution.id),\n \"ror\": author.institution.ror_id,\n \"display_name\": author.institution.name,\n \"country_code\": author.institution.country_code,\n \"type\": author.institution.types,\n }\n ],\n }\n for author in paper.authors\n ],\n }\n result.append(entry)\n\n return result", "def salome_study_init(theStudyId=0):\n\n global salome_study_initial\n global myStudyManager, myStudyId, myStudy, myStudyName\n global orb, lcc, naming_service, cm\n\n if salome_study_initial:\n salome_study_initial = 0\n\n orb, lcc, naming_service, cm = salome_kernel.salome_kernel_init()\n\n # get Study Manager reference\n if verbose(): print \"looking for studyManager ...\"\n obj = naming_service.Resolve('myStudyManager')\n myStudyManager = obj._narrow(SALOMEDS.StudyManager)\n if verbose(): print \"studyManager found\"\n\n # get active study Id, ref and name\n myStudyId = getActiveStudy(theStudyId)\n if verbose(): print \"myStudyId\",myStudyId\n myStudy = myStudyManager.GetStudyByID(myStudyId)\n myStudyName = myStudy._get_Name()\n\n return myStudyManager, myStudyId, myStudy, myStudyName", "def dbgap_research_studies_with_observations(dbgap_server, search_research_studies_with_observations):\n return search_research_studies_with_observations.perform_resources(dbgap_server.server)", "def get_soma_objects(self):\n\n msg_store = MessageStoreProxy(database=\"soma2data\", collection=\"soma2\")\n objs = msg_store.query(SOMA2Object._type, message_query={\"map_name\":self.soma_map,\"config\":self.soma_conf})\n print \"queried soma2 objects >> \", objs\n self.soma_objects = ce.get_soma_objects()\n print \"hard coded objects >> \", [self.soma_objects[r].keys() for r in self.soma_objects.keys()]", "def examples(self):\n for obj_ind in range(len(self.objects)):\n yield self.get_object_intent_by_index(obj_ind)", "def orthonormation_method(standardized_methods_cleaned):\n method_standardized_ortho = standardized_methods_cleaned.copy(deep=True)\n\n categories = method_standardized_ortho.columns.tolist()\n\n # Normation of the first category\n method_standardized_ortho[categories[0]] = method_standardized_ortho[categories[0]] / \\\n linalg.norm(method_standardized_ortho[categories[0]])\n\n # Normation of every following categories\n j = 0\n while j < len(categories):\n i = 0\n while i < j:\n # Calculates the orthogonal projection of j on each i and substraction of the projection from j\n method_standardized_ortho[categories[j]] = \\\n method_standardized_ortho[categories[j]] - method_standardized_ortho[categories[i]] * (\n sum(method_standardized_ortho[categories[i]] * method_standardized_ortho[categories[j]]) /\n sum(method_standardized_ortho[categories[i]] * method_standardized_ortho[categories[i]]))\n if linalg.norm(method_standardized_ortho[categories[j]]) == 0:\n # If after the projection, if the j columns is null it is droped (i.e it is linearly dependant with\n # the other columns) and the inner loop stops\n method_standardized_ortho.drop(method_standardized_ortho.columns[j], inplace=True, axis=1)\n categories.remove(categories[j])\n\n break\n else:\n # If the j column is not null, it is normed and the inner while loop keeps going\n method_standardized_ortho[categories[j]] = method_standardized_ortho[categories[j]] / \\\n (linalg.norm(method_standardized_ortho[categories[j]]))\n i += 1\n j += 1\n\n return method_standardized_ortho", "def test_ResearchStudy_Observations(all_research_studies_with_observations):\n all_observations = {r.relativePath(): r for r in all_research_studies_with_observations if\n r.__class__.__name__ == 'Observation'}\n all_research_studies = {r.relativePath(): r for r in all_research_studies_with_observations if\n r.__class__.__name__ == 'ResearchStudy'}\n research_studies_missing_observations = dict(**all_research_studies)\n for observation in all_observations.values():\n del research_studies_missing_observations[observation.focus[0].reference]\n assert len(\n research_studies_missing_observations) == 0, f\"ResearchStudies missing Observations {[r.id for r in research_studies_missing_observations.values()]}\"", "def clarify_objects(self): \n dict_cxt = dict(list(zip(list(map(tuple, self)), self.objects)))\n table = list(map(list, list(dict_cxt.keys())))\n objects = list(dict_cxt.values())\n return Context(table, objects, self.attributes)", "def generate_voc(self):\n\n observations = [\"walk\", \"shop\", \"clean\", \"tennis\", \"read\"]\n states = [\"sunny\", \"rainy\", \"snowy\"]\n\n # Sort them alphabetically, just to be on the safe side\n observations.sort()\n states.sort()\n\n return (observations, states)", "def variations():", "def get_all_object_names(self):\n o_objects = []\n for s in [\"Non Model\", \"Solids\", \"Unclassified\", \"Sheets\", \"Lines\"]:\n o_objects += self.design.modeler.get_objects_in_group(s)\n return o_objects", "def get_representation(self, syl):\n \n first = syl[0]\n second = syl[1]\n \n return [self.representation[first], self.representation[second]]", "def _get_all_oshapes(self):\n iseq_name = self.node_list[0]\n iseq = self.builder.nodes[iseq_name]\n iseq_mainshape = iseq.oshapes['main']\n \n return {'main' : iseq_mainshape,\n 'loc' : iseq_mainshape,\n 'invscaled' : iseq_mainshape + [iseq_mainshape[-1]],\n 'invscaleoffd' : iseq_mainshape + [iseq_mainshape[-1]]}", "def _get_objects(\n self, model: Model, old_objects: List[Viewable], doc: Document,\n root: Model, comm: Optional[Comm] = None\n ):\n from ..pane.base import RerenderError, panel\n new_models, old_models = [], []\n for i, pane in enumerate(self.objects):\n pane = panel(pane)\n self.objects[i] = pane\n\n for obj in old_objects:\n if obj not in self.objects:\n obj._cleanup(root)\n\n current_objects = list(self.objects)\n ref = root.ref['id']\n for i, pane in enumerate(self.objects):\n if pane in old_objects and ref in pane._models:\n child, _ = pane._models[root.ref['id']]\n old_models.append(child)\n else:\n try:\n child = pane._get_model(doc, root, model, comm)\n except RerenderError as e:\n if e.layout is not None and e.layout is not self:\n raise e\n e.layout = None\n return self._get_objects(model, current_objects[:i], doc, root, comm)\n new_models.append(child)\n return new_models, old_models", "def get_queryset(self):\n return Initiative.objects.filter(objective__perspective__description='Processes').order_by('objective')", "def get_source_studies(self):\n return list(set([trait.source_dataset.source_study_version.study for trait in self.get_all_source_traits()]))", "def get_queryset(self):\n return Objective.objects.filter(perspective__description='Financial').order_by('code')", "def getLandsatCollection():\n ## standardize band names\n bn8 = ['B1', 'B2', 'B3', 'B4', 'B6', 'pixel_qa', 'B5', 'B7']\n bn7 = ['B1', 'B1', 'B2', 'B3', 'B5', 'pixel_qa', 'B4', 'B7']\n bn5 = ['B1', 'B1', 'B2', 'B3', 'B5', 'pixel_qa', 'B4', 'B7']\n bns = ['uBlue', 'Blue', 'Green', 'Red', 'Swir1', 'BQA', 'Nir', 'Swir2']\n\n # create a merged collection from landsat 5, 7, and 8\n ls5 = ee.ImageCollection(\"LANDSAT/LT05/C01/T1_SR\").select(bn5, bns)\n\n ls7 = (ee.ImageCollection(\"LANDSAT/LE07/C01/T1_SR\")\n .filterDate('1999-04-15', '2003-05-30')\n .select(bn7, bns))\n\n ls8 = ee.ImageCollection(\"LANDSAT/LC08/C01/T1_SR\").select(bn8, bns)\n\n merged = ls5.merge(ls7).merge(ls8)\n\n return(merged)", "def get_modeller_objects(self):\n return self._modpt, self.__edat.modpt, self.__libs.modpt", "def sngl_obj_evo(self, lacking):\n prob, algo = self.probinit('jde', 0)\n l = list()\n u = 6+(self.N-3)*4\n for i in range(lacking):\n archi = archipelago(algo,prob,8,16, topology=fully_connected())\n for j in range(u):\n archi.evolve(5)\n stdout.write(\"\\r{0} / {1}\".format(i*u+j+1, lacking*u))\n stdout.flush()\n tmp = [isl for isl in archi]\n tmp.sort(key = lambda x: x.population.champion.f[0]);\n l.append(tmp[0].population.champion)\n stdout.write(\" Done. \")\n return l, prob", "def phenotypes(self):\n\t\treturn Phenotype.PhenotypesByPatient(self.id, self.host)", "def get_designs(self):", "def _get_subobjects(self) -> Iterable[SymbolicObject]:\n\n return self._subobjects", "def test_returns_all_studies_with_archived_tagged_traits(self):\n tag = TagFactory.create()\n tagged_traits = []\n for study in self.studies:\n tmp = TaggedTraitFactory.create(trait__source_dataset__source_study_version__study=study,\n archived=True, tag=tag)\n tagged_traits.append(tmp)\n get_data = {'q': ''}\n response = self.client.get(self.get_url(), get_data)\n pks = get_autocomplete_view_ids(response)\n self.assertEqual(sorted([study.pk for study in self.studies]), sorted(pks))", "def _orthogonal_init(self):\n # if is a conv layer, will need to reshape to fan in matrix,\n # which is of dimension\n # num input feature maps * filter height * filter width\n if(len(self.dims) > 2):\n rv_samp = np.random.randn(self.dims[2],\n self.dims[0] * self.dims[1] * self.dims[3])\n out_sigma = np.sqrt(1.0 / rv_samp.shape[1])\n # otherwise will be a densely connected layer\n else:\n rv_samp = np.random.randn(self.dims[0], self.dims[1])\n out_sigma = np.sqrt(1.0 / rv_samp.shape[0])\n # perform SVD\n U, _, V = np.linalg.svd(rv_samp, full_matrices=False) #pylint: disable=invalid-name\n # both U and V are orthoginal matricies, so will choose the one\n # that is the correct dimensions for our layer\n ortho_matrix = U if U.shape == rv_samp.shape else V\n # rescale so it is unit variance for each vector\n # print(\"std(q) = {}\".format(np.std(q)))\n ortho_norm = (ortho_matrix / np.std(ortho_matrix)) * out_sigma\n #print(\"std(qs) = {}\".format(np.std(qs)))\n #print(q.shape)\n return ortho_norm.reshape(self.dims).astype(np.float32)", "def undulations(**kwargs):\n\n\t#---parameters\n\tsn = kwargs['sn']\n\twork = kwargs['workspace']\n\tcalc = kwargs['calc']\n\tupname = 'lipid_abstractor'\n\tgrid_spacing = calc['specs']['grid_spacing']\n\tvecs = datmerge(kwargs,upname,'vecs')\n\tnframes = int(np.sum(datmerge(kwargs,upname,'nframes')))\n\ttrajectory = datmerge(kwargs,upname,'points')\n\tattrs,result = {},{}\n\t#---! hacking through error with monolayer separation\n\ttry: monolayer_indices = kwargs['upstream'][upname+'0']['monolayer_indices']\n\texcept: monolayer_indices = kwargs['upstream'][upname]['monolayer_indices']\n\t#---choose grid dimensions\n\tgrid = np.array([round(i) for i in np.mean(vecs,axis=0)/grid_spacing])[:2]\n\t#---! removed timeseries from result for new version of omnicalc\n\t#---parallel\n\tmesh = [[],[]]\n\tfor mn in range(2):\n\t\tstart = time.time()\n\t\tmesh[mn] = Parallel(n_jobs=work.nprocs,verbose=0,require='sharedmem')(\n\t\t\tdelayed(makemesh_regular)(\n\t\t\t\ttrajectory[fr][np.where(monolayer_indices==mn)],vecs[fr],grid)\n\t\t\tfor fr in framelooper(nframes,start=start,text='monolayer %d, frame'%mn))\n\tchecktime()\n\n\t#---pack\n\tresult['mesh'] = np.array(mesh)\n\tresult['grid'] = np.array(grid)\n\tresult['nframes'] = np.array(nframes)\n\tresult['vecs'] = vecs\n\tattrs['grid_spacing'] = grid_spacing\n\treturn result,attrs", "def latents(self):\n self.assert_sampled()\n return self._representations", "def _get_halo(self,i):\n if self._order is False:\n if self._subs is True:\n #this needs to be tested again on a snapshot that is not ordered!\n x = Halo(i, self, self.base, np.where(np.in1d(self.base['iord'], self.ids[self._subhalodat['sub_off'][i]:self._subhalodat['sub_off'][i]+self._subhalodat['sub_len'][i]] )))\n else:\n x = Halo(i, self, self.base, np.where(np.in1d(self.base['iord'], self.ids[self._halodat['group_off'][i]:self._halodat['group_off'][i]+self._halodat['group_len'][i]] )))\n \n else:\n if self._subs is False: #to use groups as halos:\n x = Halo(i, self, self.base, self.ids[self._halodat['group_off'][i]:self._halodat['group_off'][i]+self._halodat['group_len'][i]] ) \n else:\n x=Halo(i, self, self.base, self.ids[self._subhalodat['sub_off'][i]:self._subhalodat['sub_off'][i]+self._subhalodat['sub_len'][i]] )\n \n x._descriptor = \"halo_\"+str(i)\n x.properties.update(self.get_halo_properties(i))\n return x", "def kids_first_research_studies_with_observations(caplog, kids_first_server, search_research_studies_with_observations):\n # caplog.set_level(logging.DEBUG)\n return search_research_studies_with_observations.perform_resources(kids_first_server.server)", "def get_session_researcher_study_ids():\n session_researcher = get_session_researcher()\n if session_researcher.site_admin:\n return Study.objects.exclude(deleted=True).values_list(\"id\", flat=True)\n else:\n return session_researcher.study_relations.filter(study__deleted=False).values_list(\"study__id\", flat=True)", "def make_objects(self):\n pass", "def representations(self):\n self.assert_sampled()\n return self._representations", "def GetObjects(self): \r\n return self.model.GetObjects()", "def get_queryset(self):\n return Objective.objects.filter(perspective__description='Learning and Capacity').order_by('code')", "def samples(self):\n samples = set()\n for trio in self.trios:\n if trio.child is None or trio.mother is None or trio.father is None:\n continue\n samples.add(trio.father)\n samples.add(trio.mother)\n samples.add(trio.child)\n return list(samples)", "def newAnalyzer():\n analyzer = {'crimes': None,\n 'dateIndex': None,\n 'autors': None,\n 'instrumentalness': None,\n 'tempo':None,\n 'liveness':None,\n 'speechiness':None,\n 'danceability':None,\n 'valence':None,\n 'loudness':None,\n 'acousticness':None,\n 'energy':None,\n 'generos':None\n }\n\n analyzer['crimes'] = lt.newList('ARRAY_LIST', compareIds)\n analyzer['ids'] = lt.newList('ARRAY_LIST', compareIds)\n analyzer['dateIndex'] = om.newMap(omaptype='RBT',\n comparefunction=compareDates)\n\n analyzer['autors'] = om.newMap(omaptype='RBT',\n comparefunction=compareAUTOR)\n\n analyzer['instrumentalness'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt)\n analyzer['tempo'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt)\n analyzer['liveness'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt)\n analyzer['speechiness'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt)\n analyzer['danceability'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt) \n analyzer['valence'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt)\n analyzer['loudness'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt)\n analyzer['acousticness'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt)\n analyzer['energy'] = om.newMap(omaptype='RBT',\n comparefunction=compareInt) \n\n analyzer['generos']= m.newMap(11,\n maptype='CHAINING',\n loadfactor=4.0)\n \n return analyzer", "def orbit(self):\n return [x for x in TransitiveIdeal(attrcall('simple_reflections'), [self])]", "def structures(self):\n pdb = self.name\n residues = self.__residues__(pdb)\n return Structure(list(residues), pdb=pdb)", "def getCriticStation(analyzer):\n mayIn = model.getRankMay(analyzer,\"in\")\n mayOut=model.getRankMay(analyzer,\"out\")\n less=model.getRankMen(analyzer,\"LessPopular\")\n return (mayIn,mayOut,less)", "def top_sources_male(args: Dict[str, Any]) -> List[object]:\n query = [\n {\n \"$match\": {\n \"body\": {\"$ne\": \"\"},\n \"quotesUpdated\": {\"$exists\": True},\n \"outlet\": {\"$in\": args[\"outlets\"]},\n \"publishedAt\": {\n \"$gte\": args[\"begin_date\"],\n \"$lt\": args[\"end_date\"] + timedelta(days=1),\n },\n }\n },\n {\"$project\": {\"outlet\": 1.0, \"sourcesMale\": 1.0}},\n {\"$unwind\": {\"path\": \"$sourcesMale\", \"preserveNullAndEmptyArrays\": False}},\n {\"$group\": {\"_id\": \"$sourcesMale\", \"count\": {\"$sum\": 1.0}}},\n {\"$sort\": {\"count\": args[\"sort\"]}},\n {\"$limit\": args[\"limit\"]},\n ]\n return query", "def __init__(self):\n self.rho=[]\n self.te=[]\n self.ti=[]\n self.ne=[]\n self.ni=[]\n self.ni1=[]\n self.ni2=[]\n self.ni3=[]\n self.vtor=[]\n self.zeff=[]\n\n self.nion=1\n self.Z=[]\n self.A=[]\n self.coll_mode=[]", "def get_queryset(self):\n return Initiative.objects.order_by('objective')", "def sdc_to_distributions(self, mysdc):\n if \"right\" in mysdc[\"verb\"]:\n D_mat = transpose(self.T_mat_right)\n elif \"left\" in mysdc[\"verb\"]:\n D_mat = transpose(self.T_mat_left)\n else:\n D_mat = transpose(self.T_mat_str)\n \n T_mat = ones([len(D_mat), len(D_mat[0])])*1.0\n \n \n if mysdc[\"sr\"] != None and len(mysdc[\"landmarks\"]) > 0 and self.use_spatial_relations:\n\n sr_i = self.sr_class.engineToIdx(mysdc[\"sr\"])\n SR_mat = self.srel_mat[sr_i,:,:,:]\n L_mat = self.get_prob_landmark_given_sdc_modifiers(mysdc)\n L_mat_entropy = entropy(L_mat)\n print \"using spatial relations\", str(mysdc)\n if L_mat_entropy > 2 and False:\n SR_mat = None\n L_mat = None\n else:\n SR_mat = None\n L_mat = None\n \n if mysdc[\"landmark\"] != None:\n O_mat = self.O_mat[:,self.names_to_index[mysdc[\"landmark\"]]]\n else:\n O_mat = None\n \n return O_mat, T_mat, SR_mat, L_mat, D_mat", "def merge(cls, analyses):\r\n raise NotImplementedError()", "def substantiate():", "def hispaniola_models():\n return [\n ('GFS (6-hr steps, 7 days)', 'gfs'),\n ('WRF-PR (1-hr steps, 2 days)', 'wrfpr'),\n ]", "def _get_orthogonal_states(self, and_state):\n if and_state not in self.and_states:\n states = and_state.get_orthogonal_states()\n states.insert(0, and_state)\n self.and_states[and_state] = states\n return self.and_states[and_state]", "def serotype_escherichia(metadata, analysistype):\n for sample in metadata:\n # Initialise negative results to be overwritten when necessary\n sample[analysistype].best_o_pid = '-'\n sample[analysistype].o_genes = ['-']\n sample[analysistype].o_set = ['-']\n sample[analysistype].best_h_pid = '-'\n sample[analysistype].h_genes = ['-']\n sample[analysistype].h_set = ['-']\n if sample.general.bestassemblyfile != 'NA':\n if sample.general.closestrefseqgenus in ['Escherichia', 'Shigella']:\n o = dict()\n h = dict()\n for result, percentid in sample[analysistype].blastresults.items():\n if 'O' in result.split('_')[-1]:\n o.update({result: float(percentid)})\n if 'H' in result.split('_')[-1]:\n h.update({result: float(percentid)})\n # O\n try:\n sorted_o = sorted(o.items(), key=operator.itemgetter(1), reverse=True)\n sample[analysistype].best_o_pid = str(sorted_o[0][1])\n\n sample[analysistype].o_genes = [gene for gene, pid in o.items()\n if str(pid) == sample[analysistype].best_o_pid]\n sample[analysistype].o_set = \\\n list(set(gene.split('_')[-1] for gene in sample[analysistype].o_genes))\n except (KeyError, IndexError):\n pass\n # H\n try:\n sorted_h = sorted(h.items(), key=operator.itemgetter(1), reverse=True)\n sample[analysistype].best_h_pid = str(sorted_h[0][1])\n sample[analysistype].h_genes = [gene for gene, pid in h.items()\n if str(pid) == sample[analysistype].best_h_pid]\n sample[analysistype].h_set = \\\n list(set(gene.split('_')[-1] for gene in sample[analysistype].h_genes))\n except (KeyError, IndexError):\n pass\n return metadata", "def build_from_landscape(self, landscape):\n assert(self.models_loaded)\n\n self.farms=list()\n for p in landscape.premises:\n f=self.farm_models[p.production_type].clone(p.name, p.size)\n self.farms.append(f)\n\n #### airborne\n if len(self.spread_models)>0:\n a_spread_model=next (iter (self.spread_models.values()))\n special_factor=a_spread_model.herd_factor(landscape.premises)\n for v in self.spread_models.values():\n v.set_special_factor(special_factor)\n self.airborne=list()\n for aidx, bidx in itertools.permutations(range(len(self.farms)), 2):\n a=self.farms[aidx]\n b=self.farms[bidx]\n from_type=landscape.premises[aidx].production_type\n to_type=landscape.premises[bidx].production_type\n dx=landscape.distances[aidx, bidx]\n air_model=self.spread_models[(from_type, to_type)].clone(\n a, b, dx)\n self.airborne.append(air_model)\n else:\n self.airborne=list()\n\n ### Indirect and Direct Contact\n self.indirect=list()\n if len(self.contact_models)>0 and self.use_indirect:\n for ind_idx in range(len(self.farms)):\n f=self.farms[ind_idx]\n p=landscape.premises[ind_idx]\n for cm in self.contact_models[p.production_type]:\n im=cm.clone(self.farms, landscape, ind_idx)\n self.indirect.append(im)", "def toggled_objects(self):\n\t\tobjects = []\n\t\tfor i in range(len(self.object_list)):\n\t\t\tif(self.toggles[i]):\n\t\t\t\tnext_object = (self.object_list[i], self.quantities[i])\n\t\t\t\tobjects.append(next_object)\n\t\treturn objects", "def get_queryset(self):\n return Initiative.objects.filter(objective__perspective__description='Customer').order_by('objective')", "def get_common_food(cls):\n objs = cls.objects\n return objs", "def get_objects(si, args):\n # Get datacenter object.\n datacenter_list = si.content.rootFolder.childEntity\n \"\"\"\n if args.datacenter_name:\n datacenter_obj = get_obj_in_list(args.datacenter_name, datacenter_list)\n else:\n \"\"\"\n datacenter_obj = datacenter_list[0]\n\n # Get datastore object.\n datastore_list = datacenter_obj.datastoreFolder.childEntity\n \"\"\"if args.datastore_name:\n datastore_obj = get_obj_in_list(args.datastore_name, datastore_list)\n elif len(datastore_list) > 0:\"\"\"\n datastore_obj = datastore_list[0]\n #else:\n # print \"No datastores found in DC (%s).\" % datacenter_obj.name\n\n # Get cluster object.\n cluster_list = datacenter_obj.hostFolder.childEntity\n \"\"\"if args.cluster_name:\n cluster_obj = get_obj_in_list(args.cluster_name, cluster_list)\n elif len(cluster_list) > 0:\"\"\"\n cluster_obj = cluster_list[0]\n #else:\n # print \"No clusters found in DC (%s).\" % datacenter_obj.name\n\n # Generate resource pool.\n resource_pool_obj = cluster_obj.resourcePool\n\n return {\"datacenter\": datacenter_obj,\n \"datastore\": datastore_obj\n ,\"resource pool\": resource_pool_obj}", "def getAncestors():", "def get_objects(self, image=None):\n output_dict = self.run_inference_for_single_image(image)\n return output_dict, self.category_index", "def find_own_objects(cs):\n own_objects = {}\n for con in cs:\n own_objects[con] = []\n for obj in con.extent:\n own_objects[con].append(obj)\n for sub_con in cs:\n if sub_con.extent < con.extent and\\\n obj in sub_con.extent:\n own_objects[con].pop()\n break\n return own_objects", "def get_variations_urls(self, obj):\n\n # Initiate return object\n return_object = {}\n\n # Get the field of the object\n field = obj.field\n\n # A lot of ifs going around, first check if it has the field variations\n if hasattr(field, 'variations'):\n # Get the variations\n variations = field.variations\n # Go through the variations dict\n for key in variations.keys():\n # Just to be sure if the stdimage object has it stored in the obj\n if hasattr(obj, key):\n # get the by stdimage properties\n field_obj = getattr(obj, key, None)\n if field_obj and hasattr(field_obj, 'url'):\n # store it, with the name of the variation type into our return object\n return_object[key] = super(StdImageFieldSerializer, self).to_representation(field_obj)\n\n # Also include the original (if possible)\n try:\n if hasattr(obj, 'url'):\n return_object['original'] = super(StdImageFieldSerializer, self).to_representation(obj)\n except ValueError:\n pass\n\n return return_object", "def writeOrthologSets(outfile, nexus,\n extract_species,\n extract_gene,\n options,\n reference_tree=None,\n method=\"strict\",\n outgroups=None):\n\n ######################################################################\n # build species set to compare\n sets = []\n species = options.column2org\n nspecies = len(species)\n\n if options.enumeration == \"monophyletic\":\n if reference_tree:\n for members, h1, h2 in TreeTools.GetSubsets(reference_tree):\n if len(members) > 1:\n sets.append(members)\n else:\n raise \"please specify a species tree for monophyletic enumeration\"\n\n elif options.enumeration == \"exhaustive\":\n for x in range(2, len(species)):\n sets += list(SetTools.xuniqueCombinations(species, x))\n sets.append(species)\n\n elif options.enumeration == \"pairwise\":\n\n for x in range(len(species) - 1):\n for y in range(x + 1, len(species)):\n sets.append((species[x], species[y]))\n\n elif options.enumeration == \"full\":\n sets.append(species)\n\n elif options.enumeration == \"lineage\":\n for s in species:\n sets.append((s,))\n\n elif options.enumeration == \"explicit\":\n for x in range(2, len(options.species_set)):\n sets += list(SetTools.xuniqueCombinations(options.species_set, x))\n sets.append(options.species_set)\n\n ######################################################################\n # build sets with positional information\n xsets = []\n map_frozenset2set = {}\n for x in range(len(sets)):\n ss = frozenset(map(lambda x: options.org2column[x], sets[x]))\n xsets.append(ss)\n map_frozenset2set[ss] = x\n\n ######################################################################\n # collect outgroups\n if outgroups:\n noutgroups = set()\n for x in outgroups:\n noutgroups.add(options.org2column[x])\n else:\n noutgroups = None\n\n ######################################################################\n # loop over each tree and set\n # I did not see a way to loop a tree once for all sets without doing\n # complicated counting. The problem is that counting has to be stopped\n # at different tree heights for different sets.\n ninput, noutput, nempty, nskipped = 0, 0, 0, 0\n\n counts = [0] * len(sets)\n\n options.stdout.write(\n \"nspecies\\tname\\tid\\tcluster\\tpattern\\t%s\\tnode_id\\tmembers\\n\" % \"\\t\".join(species))\n\n cluster_id = 0\n nerrors = 0\n\n for tree in nexus.trees:\n\n ninput += 1\n ntotal_tree = 0\n\n if options.loglevel >= 3:\n options.stdlog.write(\"# processing tree %s\\n\" % tree.name)\n\n if options.reroot:\n rerootTree(tree, extract_species, options)\n\n for c in range(len(xsets)):\n # numbered species set: 0,1,...\n sn = xsets[c]\n # literal species set: species1, species2, ...\n sl = sets[c]\n\n ortholog_nodes = getOrthologNodes(tree, sn, options, selector=method,\n outgroups=noutgroups)\n ntotal_tree += len(ortholog_nodes)\n\n n = 0\n\n pattern = buildPattern(nspecies, sn)\n\n # check for inconsistent partitions (the same gene in different\n # ortholog clusters) within the current tree\n found_genes = set()\n ortho_sets = set()\n\n # reverse ortholog_node - work in top-down manner.\n ortholog_nodes.reverse()\n\n for node_id, members in ortholog_nodes:\n n += 1\n cluster_id += 1\n\n otus = filter(\n lambda x: extract_species(x) in sl, tree.get_taxa(node_id))\n genes = set(map(extract_gene, otus))\n\n if found_genes.intersection(genes):\n\n # only take largest cluster for lineage specific\n # duplications\n if method == \"lineage\":\n continue\n\n if frozenset(genes) in ortho_sets:\n nskipped += 1\n if options.loglevel >= 1:\n options.stdlog.write(\"# %s: cluster %i: redundant node: %i - skipped because already present: %s\\n\" %\n (tree.name, n, node_id, str(found_genes.intersection(genes))))\n else:\n nerrors += 1\n if options.loglevel >= 1:\n options.stdlog.write(\"# %s: cluster %i: inconsistent node: %i - the same gene in different clusters: %s\\n\" %\n (tree.name, n, node_id, str(found_genes.intersection(genes))))\n\n found_genes = found_genes.union(genes)\n ortho_sets.add(frozenset(genes))\n\n xpattern = buildPattern(nspecies, sn, members)\n\n options.stdout.write(\"%i\\t%s\\t%i\\t%i\\t%s\\t%s\\t%i\\t%s\\n\" % (len(sl),\n tree.name,\n n,\n cluster_id,\n \"\".join(\n pattern),\n \"\\t\".join(\n xpattern),\n node_id,\n \";\".join(otus)))\n\n counts[c] += n\n\n if ntotal_tree == 0:\n nempty += 1\n else:\n noutput += 1\n\n if options.loglevel >= 1:\n options.stdout.write(\"# ninput=%i, nempty=%i, noutput=%i, nskipped=%i, nerrors=%i\\n\" % (\n ninput, nempty, noutput, nskipped, nerrors))\n\n # write summary information\n\n if options.filename_summary:\n outfile = open(options.filename_summary, \"w\")\n else:\n outfile = options.stdout\n outfile.write(\"//\\n\")\n\n outfile.write(\"cluster\\tpattern\\tcounts\\t%s\\n\" % (\"\\t\".join(species)))\n\n for c in range(len(xsets)):\n pattern = buildPattern(nspecies, xsets[c])\n outfile.write(\"%i\\t%s\\t%i\\t%s\\n\" % (c,\n \"\".join(pattern),\n counts[c],\n \"\\t\".join(pattern)))\n\n if outfile != options.stdout:\n outfile.close()", "def detect_objects(snap):\n client = vision.ImageAnnotatorClient()\n print(snap)\n\n with open(snap, 'rb') as im_file:\n content = im_file.read()\n image = vision.Image(content=content)\n\n objects = client.object_localization(image=image).localized_object_annotations\n\n print(f\"Found {len(objects)} objects\")\n [print(f\"{objet.name} : {round(objet.score*100,2)}\") for objet in objects]\n \n return objects", "def create_objects(self, user, instrument):\n created_ids = set() # IDs for created objects.\n\n # ONLY applicable to Likert Questions.\n qtypes = {t.lower() for t, q in instrument.questions}\n if 'likertquestion' not in qtypes and len(qtypes) != 1:\n raise ValueError(\"A SurveyResult is only valid for LikertQuestions\")\n\n questions = [q for qt, q in instrument.questions]\n if len(questions) % 2 != 0:\n # Maybe survey is incomplete?\n raise ValueError(\"Instruments must have even number of questions\")\n\n # TODO: There's probably a nicer way to do this :-/\n # Split the questions into two subscales; they should be ordered\n # correctly by default.\n middle = int(len(questions) - len(questions) / 2)\n a, b = questions[:middle], questions[middle:]\n\n try:\n for q1, q2 in zip(a, b):\n # Q1 - Q2; discard anything less than zero (keep 0)\n # We need the user's responses to these questions.\n r1 = q1.likertresponse_set.filter(user=user).latest()\n r2 = q2.likertresponse_set.filter(user=user).latest()\n score = max(r1.selected_option - r2.selected_option, 0)\n labels = list(set(q1.labels + q2.labels))\n\n obj = self.create(\n user=user,\n instrument=instrument,\n score=score,\n labels=labels\n )\n created_ids.add(obj.id)\n except ObjectDoesNotExist:\n return self.get_queryset().none()\n\n return self.get_queryset().filter(id__in=created_ids)", "def generate_asteroid_perspectives(self) -> list:\n for asteroid in self.asteroids:\n asteroid.generate_perspective()", "def make_studyforrest_mockup(path):\n public = create(opj(path, 'public'), description=\"umbrella dataset\")\n # the following tries to capture the evolution of the project\n phase1 = public.create('phase1',\n description='old-style, no connection to RAW')\n structural = public.create('structural', description='anatomy')\n tnt = public.create('tnt', description='image templates')\n tnt.clone(source=phase1.path, path=opj('src', 'phase1'), reckless='auto')\n tnt.clone(source=structural.path, path=opj('src', 'structural'), reckless='auto')\n aligned = public.create('aligned', description='aligned image data')\n aligned.clone(source=phase1.path, path=opj('src', 'phase1'), reckless='auto')\n aligned.clone(source=tnt.path, path=opj('src', 'tnt'), reckless='auto')\n # new acquisition\n labet = create(opj(path, 'private', 'labet'), description=\"raw data ET\")\n phase2_dicoms = create(opj(path, 'private', 'p2dicoms'), description=\"raw data P2MRI\")\n phase2 = public.create('phase2',\n description='new-style, RAW connection')\n phase2.clone(source=labet.path, path=opj('src', 'labet'), reckless='auto')\n phase2.clone(source=phase2_dicoms.path, path=opj('src', 'dicoms'), reckless='auto')\n # add to derivatives\n tnt.clone(source=phase2.path, path=opj('src', 'phase2'), reckless='auto')\n aligned.clone(source=phase2.path, path=opj('src', 'phase2'), reckless='auto')\n # never to be published media files\n media = create(opj(path, 'private', 'media'), description=\"raw data ET\")\n # assuming all annotations are in one dataset (in reality this is also\n # a superdatasets with about 10 subdatasets\n annot = public.create('annotations', description='stimulus annotation')\n annot.clone(source=media.path, path=opj('src', 'media'), reckless='auto')\n # a few typical analysis datasets\n # (just doing 3, actual status quo is just shy of 10)\n # and also the real goal -> meta analysis\n metaanalysis = public.create('metaanalysis', description=\"analysis of analyses\")\n for i in range(1, 3):\n ana = public.create('analysis{}'.format(i),\n description='analysis{}'.format(i))\n ana.clone(source=annot.path, path=opj('src', 'annot'), reckless='auto')\n ana.clone(source=aligned.path, path=opj('src', 'aligned'), reckless='auto')\n ana.clone(source=tnt.path, path=opj('src', 'tnt'), reckless='auto')\n # link to metaanalysis\n metaanalysis.clone(source=ana.path, path=opj('src', 'ana{}'.format(i)),\n reckless='auto')\n # simulate change in an input (but not raw) dataset\n create_tree(\n aligned.path,\n {'modification{}.txt'.format(i): 'unique{}'.format(i)})\n aligned.save()\n # finally aggregate data\n aggregate = public.create('aggregate', description='aggregate data')\n aggregate.clone(source=aligned.path, path=opj('src', 'aligned'), reckless='auto')\n # the toplevel dataset is intentionally left dirty, to reflect the\n # most likely condition for the joint dataset to be in at any given\n # point in time", "def get_activations_from_studies(cls, studies):\n\n activations = cls.query.filter(\n cls.pmid.in_(studies), cls.location_id < 81925).all()\n\n return activations", "def _get_orientations(self):\n for atom in self.invarioms:\n atom.get_orientation()", "def view_all_tracks_as_pairs(self):\n self.command(\"viewaspairs\")", "def observations(store, loqusdb, case_obj, variant_obj):\n composite_id = (\"{this[chromosome]}_{this[position]}_{this[reference]}_\"\n \"{this[alternative]}\".format(this=variant_obj))\n obs_data = loqusdb.get_variant({'_id': composite_id}) or {}\n obs_data['total'] = loqusdb.case_count()\n\n obs_data['cases'] = []\n institute_id = variant_obj['institute']\n for case_id in obs_data.get('families', []):\n if case_id != variant_obj['case_id'] and case_id.startswith(institute_id):\n other_variant = store.variant(variant_obj['variant_id'], case_id=case_id)\n other_case = store.case(case_id)\n obs_data['cases'].append(dict(case=other_case, variant=other_variant))\n\n return obs_data", "def get_stone_tests():\n stone_tests = []\n for rotation in stones_and_potions.possible_rotations():\n for sm in stones_and_potions.possible_stone_maps():\n stone_tests.append(\n ([(stones_and_potions.unalign(sm.apply_inverse(l), rotation), l)\n for l in stones_and_potions.possible_latent_stones()],\n unity_python_conversion.to_stone_unity_properties,\n functools.partial(from_stone_unity_properties, rotation=rotation),\n lambda x: x, _make_tuple))\n return stone_tests", "def rtsobjects():\n pass", "def skill_pedagogic_ressources(request, type, slug):\n\n if type == 'skill':\n base = get_object_or_404(Skill, code=slug)\n elif type == 'section':\n base = get_object_or_404(Section, id=slug)\n else:\n # type == 'coder'\n base = get_object_or_404(CodeR, id=slug)\n\n personal_resource = base.resource.filter(section=\"personal_resource\")\n lesson_resource = base.resource.filter(section=\"lesson_resource\")\n exercice_resource = base.resource.filter(section=\"exercice_resource\")\n other_resource = base.resource.filter(section=\"other_resource\")\n\n exercice_resource_sesamath = list()\n lesson_resource_sesamath = list()\n lesson_resource_khanacademy = list()\n\n sori_skills_lesson_resources = list()\n sori_skills_exercice_resources = list()\n sori_skills_other_resources = list()\n\n sori_skills_lesson_resource_sesamath = list()\n sori_skills_lesson_resource_khanacademy = list()\n sori_skills_exercice_resource_sesamath = list()\n\n sori_coder_lesson_resources = list()\n sori_coder_exercice_resources = list()\n sori_coder_other_resources = list()\n\n sori_coder_lesson_resource_sesamath = list()\n sori_coder_lesson_resource_khanacademy = list()\n sori_coder_exercice_resource_sesamath = list()\n\n # Sorting the different type of resources by category (personal, lesson, exercice or other)\n # and by type (khanacademy, sesamath or other)\n\n for exo in lesson_resource:\n if exo.content.get('from') and exo.content['from'] == \"skills_sesamathskill\":\n resource = get_object_or_404(Sesamath, pk=exo.content['referenced'])\n lesson_resource_sesamath.append([exo.pk, resource])\n lesson_resource = lesson_resource.exclude(pk=exo.pk)\n\n elif exo.content.get('from') and exo.content['from'] == \"skills_khanacademyvideoskill\":\n resource = get_object_or_404(KhanAcademy, pk=exo.content['referenced'])\n lesson_resource_khanacademy.append([exo.pk, resource])\n lesson_resource = lesson_resource.exclude(pk=exo.pk)\n\n for exo in exercice_resource:\n if exo.content.get('from') and exo.content['from'] == \"skills_sesamathskill\":\n resource = get_object_or_404(Sesamath, pk=exo.content['referenced'])\n exercice_resource_sesamath.append([exo.pk, resource])\n exercice_resource = exercice_resource.exclude(pk=exo.pk)\n\n # Do the same operation for similar or identical resources :\n # Some Skill or CodeR can be similar or identical to other Skill or CodeR\n # We have to display the resources of those as well\n\n # We achieve this only if we have either a Skill or a CodeR.\n # If it is a Section, we don't display the similar/identical resources\n # First step : is base a Skill or a CodeR ?\n if isinstance(base, Skill):\n # Begin with the Skills, same step is done for CodeR a bit further\n\n # Retrieve all the similar and identical Relations for Skills\n skills_from = Relations.objects.filter(Q(relation_type__in=['similar_to', 'identic_to'], from_skill=base.id))\n skills_to = Relations.objects.filter(Q(relation_type__in=['similar_to', 'identic_to'], to_skill=base.id))\n\n # This list will gather all the similar or 'identic' Skills\n # (similar_or_identic = sori)\n sori_skills = list()\n for skill_from in skills_from:\n sori_skills.append(skill_from.to_skill)\n for skill_to in skills_to:\n sori_skills.append(skill_to.from_skill)\n\n # Get the associated resources for these Skills\n # We need to keep them separated by section to ease their use in the template\n\n for skill in sori_skills:\n lesson = skill.resource.filter(section=\"lesson_resource\")\n if lesson:\n sori_skills_lesson_resources.append([skill, lesson])\n exercice = skill.resource.filter(section=\"exercice_resource\")\n if exercice:\n sori_skills_exercice_resources.append([skill, exercice])\n other = skill.resource.filter(section=\"other_resource\")\n if other:\n sori_skills_other_resources.append([skill, other])\n\n # sori_skills_lesson_resources has resources of different types, we need to distinguish them\n for skill in sori_skills_lesson_resources:\n # Lists to regroup resources together\n sesamath_list = list()\n khan_list = list()\n\n for res in skill[1]:\n if res.content.get('from') and res.content['from'] == \"skills_sesamathskill\":\n resource = get_object_or_404(Sesamath, pk=res.content['referenced'])\n sesamath_list.append([res.pk, resource])\n skill[1] = skill[1].exclude(pk=res.pk)\n\n elif res.content.get('from') and res.content['from'] == \"skills_khanacademyvideoskill\":\n resource = get_object_or_404(KhanAcademy, pk=res.content['referenced'])\n khan_list.append([res.pk, resource])\n skill[1] = skill[1].exclude(pk=res.pk)\n\n # If we have emptied the ressources in the Skill, we remove it from the list\n if not skill[1]:\n sori_skills_lesson_resources.remove(skill)\n\n # Once resources are sorted, we can add them with the associated Skill\n if sesamath_list:\n sori_skills_lesson_resource_sesamath.append([skill[0], sesamath_list])\n if khan_list:\n sori_skills_lesson_resource_khanacademy.append([skill[0], khan_list])\n\n # sori_skills_exercice_resources has resources of different types, we need to distinguish them\n for skill in sori_skills_exercice_resources:\n # List to regroup resources together\n sesamath_list = list()\n\n for res in skill[1]:\n if res.content.get('from') and res.content['from'] == \"skills_sesamathskill\":\n resource = get_object_or_404(Sesamath, pk=res.content['referenced'])\n sesamath_list.append([res.pk, resource])\n skill[1] = skill[1].exclude(pk=res.pk)\n\n # If we have emptied the ressources in the Skill, we remove it from the list\n if not skill[1]:\n sori_skills_exercice_resources.remove(skill)\n\n # Once resources are sorted, we can add them with the associated Skill\n if sesamath_list:\n sori_skills_exercice_resource_sesamath.append([skill[0], sesamath_list])\n\n if isinstance(base, CodeR) or isinstance(base, Skill):\n # Repeat this operation for the CodeR\n # TODO : Group these steps as a function for Skill and CodeR\n # Retrieve all the CodeR related to the current Skill or to the current CodeR\n\n if isinstance(base, Skill):\n related_coder = CodeR.objects.filter(skill=base.id)\n else:\n coder_from = CodeR_relations.objects.filter(\n Q(relation_type__in=['similar_to', 'identic_to'], from_coder=base.id))\n coder_to = CodeR_relations.objects.filter(\n Q(relation_type__in=['similar_to', 'identic_to'], to_coder=base.id))\n\n # This list will gather all the similar or 'identic' CodeR\n related_coder = list()\n for coder_from in coder_from:\n related_coder.append(coder_from.to_coder)\n for coder_to in coder_to:\n related_coder.append(coder_to.from_coder)\n # Case where we look for CodeR similar or identic to a CodeR\n\n # Get the associated resources for these CodeR\n\n for coder in related_coder:\n lesson = coder.resource.filter(section=\"lesson_resource\")\n if lesson:\n sori_coder_lesson_resources.append([coder, lesson])\n exercice = coder.resource.filter(section=\"exercice_resource\")\n if exercice:\n sori_coder_exercice_resources.append([coder, exercice])\n other = coder.resource.filter(section=\"other_resource\")\n if other:\n sori_coder_other_resources.append([coder, other])\n\n # sori_skills_lesson_resources has resources of different types, we need to distinguish them\n for coder in sori_coder_lesson_resources:\n # Lists to regroup resources together\n sesamath_list = list()\n khan_list = list()\n\n for res in coder[1]:\n if res.content.get('from') and res.content['from'] == \"skills_sesamathskill\":\n resource = get_object_or_404(Sesamath, pk=res.content['referenced'])\n sesamath_list.append([res.pk, resource])\n coder[1] = coder[1].exclude(pk=res.pk)\n\n elif res.content.get('from') and res.content['from'] == \"skills_khanacademyvideoskill\":\n resource = get_object_or_404(KhanAcademy, pk=res.content['referenced'])\n khan_list.append([res.pk, resource])\n coder[1] = coder[1].exclude(pk=res.pk)\n\n # If we have emptied the ressources in the Skill, we remove it from the list\n if not coder[1]:\n sori_coder_lesson_resources.remove(coder)\n\n # Once resources are sorted, we can add them with the associated Skill\n if sesamath_list:\n sori_coder_lesson_resource_sesamath.append([coder[0], sesamath_list])\n if khan_list:\n sori_coder_lesson_resource_khanacademy.append([coder[0], khan_list])\n\n # sori_coder_exercice_resources has resources of different types, we need to distinguish them\n for coder in sori_coder_exercice_resources:\n # List to regroup resources together\n sesamath_list = list()\n\n for res in coder[1]:\n if res.content.get('from') and res.content['from'] == \"skills_sesamathskill\":\n resource = get_object_or_404(Sesamath, pk=res.content['referenced'])\n sesamath_list.append([res.pk, resource])\n coder[1] = coder[1].exclude(pk=res.pk)\n\n # If we have emptied the ressources in the CodeR, we remove it from the list\n if not coder[1]:\n sori_coder_exercice_resources.remove(coder)\n\n # Once resources are sorted, we can add them with the associated CodeR\n if sesamath_list:\n sori_coder_exercice_resource_sesamath.append([coder[0], sesamath_list])\n\n\n sesamath_references_manuals = Sesamath.objects.filter(ressource_kind__iexact=\"Manuel\")\n sesamath_references_cahiers = Sesamath.objects.filter(ressource_kind__iexact=\"Cahier\")\n\n return render(request, \"professor/skill/update_pedagogical_resources.haml\", {\n \"sesamath_references_manuals\": sesamath_references_manuals,\n \"sesamath_references_cahier\": sesamath_references_cahiers,\n \"base\": base,\n \"personal_resources\": personal_resource,\n \"other_resources\": other_resource,\n \"exercice_resources\": exercice_resource,\n \"exercice_resource_sesamath\": exercice_resource_sesamath,\n \"lesson_resources\": lesson_resource,\n \"lesson_resource_sesamath\": lesson_resource_sesamath,\n \"lesson_resource_khanacademy\": lesson_resource_khanacademy,\n \"type\": type,\n \"sori_skills_exercice_resources\": sori_skills_exercice_resources,\n \"sori_skills_lesson_resources\": sori_skills_lesson_resources,\n \"sori_skills_other_resources\": sori_skills_other_resources,\n \"sori_skills_lesson_resource_sesamath\": sori_skills_lesson_resource_sesamath,\n \"sori_skills_lesson_resource_khanacademy\": sori_skills_lesson_resource_khanacademy,\n \"sori_skills_exercice_resource_sesamath\": sori_skills_exercice_resource_sesamath,\n \"sori_coder_exercice_resources\": sori_coder_exercice_resources,\n \"sori_coder_lesson_resources\": sori_coder_lesson_resources,\n \"sori_coder_other_resources\": sori_coder_other_resources,\n \"sori_coder_lesson_resource_sesamath\": sori_coder_lesson_resource_sesamath,\n \"sori_coder_lesson_resource_khanacademy\": sori_coder_lesson_resource_khanacademy,\n \"sori_coder_exercice_resource_sesamath\": sori_coder_exercice_resource_sesamath,\n })", "def triples():", "def get_contenu(self):\n return self.objets", "def analogies(self, queries):\n pass" ]
[ "0.6256025", "0.5748354", "0.5600851", "0.5587314", "0.5289076", "0.52684903", "0.52684903", "0.5218291", "0.50912505", "0.509042", "0.50746655", "0.50711817", "0.50640666", "0.5047204", "0.49905527", "0.49653897", "0.49444157", "0.49329725", "0.49328998", "0.4929676", "0.4929676", "0.49144045", "0.48689863", "0.48640102", "0.48568565", "0.48278075", "0.48118252", "0.47862917", "0.4778137", "0.4765602", "0.47376904", "0.47371423", "0.4712948", "0.47079098", "0.4690761", "0.46817285", "0.46692193", "0.46649206", "0.46633402", "0.46618327", "0.46478578", "0.46388152", "0.4627976", "0.46255767", "0.46221846", "0.46071997", "0.4599627", "0.4599293", "0.45980915", "0.45943493", "0.45882478", "0.45799354", "0.45698762", "0.45679364", "0.45601118", "0.45581642", "0.455512", "0.45536482", "0.45489037", "0.45432234", "0.45428252", "0.45411226", "0.4537223", "0.45338848", "0.4532763", "0.45249236", "0.45103616", "0.45084417", "0.45019335", "0.44992322", "0.44932643", "0.44907832", "0.44900984", "0.44881555", "0.4486375", "0.44840714", "0.44582695", "0.4452424", "0.4450995", "0.44497752", "0.44487044", "0.44430786", "0.4440595", "0.4435284", "0.44301262", "0.4421716", "0.44188485", "0.4417894", "0.44126633", "0.44124982", "0.44105592", "0.44101116", "0.4404261", "0.44027844", "0.4400198", "0.4399984", "0.4397268", "0.43954667", "0.4394178", "0.43939963" ]
0.7746891
0
Returns all numbers below N, that is a multiple of M.
def findMultiples(M, N): numbers = [] for i in range(N): if(i + 1 == N): break if(((i + 1) % M) == 0): numbers.append(i+1) return numbers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sixn(m):\n if m <= 2:\n return ()\n if m > 2:\n yield 2\n if m > 3:\n yield 3\n for n in count(1):\n x = 6 * n - 1\n y = x + 2\n if x < m:\n yield x\n else:\n break\n if y < m:\n yield y\n else:\n break", "def islice(n, m):\n npiece = int(math.ceil(1.0*n/m))\n for i in range(npiece):\n if (i+1)*m > n:\n yield i, i*m, n\n else:\n yield i, i*m, (i+1)*m", "def choose(n, m):\n assert n >= m, \"Cannot choose {0} elements from {1}\".format(m, n)\n result = 1\n for n_i, m_i in zip(range(n, m, -1), range(1, m+1)):\n result *= n_i\n result /= m_i\n return result", "def evenly_select(N, M):\n if N == M:\n return np.ones(N, dtype=int)\n assert N > M\n if M > N/2:\n cut = np.ones(N, dtype=int)\n q, r = divmod(N, N-M)\n indices = [q*i + min(i, r) for i in range(N-M)]\n cut[indices] = False\n else:\n cut = np.zeros(N, dtype=int)\n q, r = divmod(N, M)\n indices = [q*i + min(i, r) for i in range(M)]\n cut[indices] = True\n\n return cut", "def cashflow_times(n, m):\n return [i for i in range(m * n+1) if i != 0 ]", "def find_multiple(self, num):\n result = dict()\n for n in range(1, num+1):\n temp = self.find_prime_divisors(n)\n result.update({k:v for k,v in temp.items() if k not in result or result[k] < v})\n return reduce(operator.mul, (pow(k, v) for k,v in result.items()))", "def mult(n,m):\n result = 0\n\n if m == 0 or n == 0:\n result = 0\n\n elif n > 0:\n for x in range(n):\n result = result + m\n else:\n for x in range(-n):\n result = result - m\n return result", "def get_multiples(ratio, n):\n ls = [ratio ** i for i in range(n)]\n return ls", "def hart(N):\n m = 2\n i = 1\n while not is_square(m):\n s = isqrt(N * i) + 1\n m = pow(s, 2, N)\n i += 1\n t = isqrt(m)\n g = gcd(s - t, N)\n return g, N // g", "def partition(n, m, discard= False):\n steps = range(0, 1 + n, m)\n yield from zip(steps, steps[1:])\n if n % m and not discard:\n yield n - (n % m), n", "def slice(n, m):\n chunks = []\n for piece in islice(n, m):\n chunks.append(piece)\n return chunks", "def find_powers(n):\n # find_powers(6) --> [1, 2, 3, 4]\n return list(takewhile(lambda x: len(str(n**x)) == x, count(1)))", "def wrap(x, m, M):\n diff = M - m\n while x > M:\n x = x - diff\n while x < m:\n x = x + diff\n return x", "def algorithm_h(n, m):\n partition = [1]*m\n partition[0] = n - m + 1\n\n while True:\n yield partition[:]\n if partition[1] < partition[0] - 1:\n partition[0] -= 1\n partition[1] += 1\n else:\n j = 2\n s = partition[0] + partition[1] - 1\n while j < m and partition[j] >= partition[0] - 1:\n s += partition[j]\n j += 1\n if j >= m:\n return\n replacement = partition[j] + 1\n partition[j] = replacement\n j -= 1\n while j > 0:\n partition[j] = replacement\n s -= replacement\n j -= 1\n partition[0] = s", "def evenly_select(n, m):\n if n == m:\n return np.ones(n, dtype=int)\n assert n > m\n if m > n/2:\n cut = np.ones(n, dtype=int)\n q, r = divmod(n, n - m)\n indices = [q * i + min(i, r) for i in range(n - m)]\n cut[indices] = False\n else:\n cut = np.zeros(n, dtype=int)\n q, r = divmod(n, m)\n indices = [q * i + min(i, r) for i in range(m)]\n cut[indices] = True\n\n return cut", "def is_multiple(n,m):\n return n % m == 0", "def subdim(number):\n res = []\n for i in [2, 3, 4, 5, 6, 7, 8, 9, 10]:\n res.append(number % i)\n if number % i == 0:\n n = i\n m = number // i\n return n, m\n if not 0 in res:\n return subdim(number + 1)", "def ideal_fanin(k, m):\n fanin = 0\n needed = k\n for select in range(3, m + 1, 2):\n combinations = list(itertools.combinations(range(m), select))\n if len(combinations) <= needed:\n fanin += int(math.ceil(float(len(combinations) * select) / m))\n needed -= len(combinations)\n else:\n fanin += int(math.ceil(float(needed * select) / m))\n needed = 0\n if not needed:\n break\n return fanin", "def bruteForce_MC(N,M):\n hewlist = np.zeros(M)\n for i in range(M):\n x = createDist(N)\n x = np.abs(x-np.mean(x))\n x.sort()\n hewlist[i] = np.median(x)*2.\n return np.mean(hewlist), np.std(hewlist)", "def pick_chosen_points(m, n):\r\n return [i * n // m + n // (2 * m) for i in range(m)]", "def get_k(self, n, m):\n k = m/n * log(2)\n return int(k)", "def solution1(n):\n res = []\n while n > 0:\n m = int(math.floor(math.sqrt(n))**2)\n res.append(m)\n n -= m\n return res", "def maiores(lista, n):\n numeros = [lista for lista in lista if lista > n]\n return numeros", "def uniform_sample(n, m):\n interval = m / n\n indices = [0]\n index = 0.0\n while True:\n index += interval\n if index >= m - 1:\n indices.append(int(m - 1))\n break\n else:\n indices.append(int(index))\n\n return np.array(indices)", "def non_mcnugget():\n nugget = [0, 6, 9, 20]\n mcnugget = set([6, 9, 20])\n\n while True:\n mcnugget = set([m+n for m in mcnugget for n in nugget])\n\n for m in mcnugget:\n found = all([m+j in mcnugget for j in range(6)])\n if found:\n return [k for k in range(1, m) if k not in mcnugget]", "def countm(m):\n nfound=0\n\n for i in range(1,m+1):\n for jpk in range(2,(2*i)+1):\n d1=i*i+(jpk)*(jpk) \n if(checkpsq(d1)): \n if(jpk<=i):\n factor=jpk/2 \n else:\n factor=((2*i-jpk)+2)/2 \n nfound=nfound+factor\n\n return nfound", "def sum_n_m(n, m):\n total = 0\n for i in range(n, m+1):\n total += i\n return total", "def power(x, m, n):\n a = 1\n while m > 0:\n if m % 2 == 1:\n a=(a*x)%n\n x=(x*x)%n\n m//=2\n return a", "def tri(N, M=None, k=0, dtype=float):\r\n if M is None: M = N\r\n m = greater_equal(subtract.outer(arange(N), arange(M)),-k)\r\n return m.astype(dtype)", "def bartlett(M):\n if M < 1:\n return array([])\n if M == 1:\n return ones(1, float)\n n = arange(0,M)\n return where(less_equal(n,(M-1)/2.0),2.0*n/(M-1),2.0-2.0*n/(M-1))", "def solve(n=10):\n return sum(M_N_S(n, d)[2] for d in range(10))", "def workersNeeded(k, m):\n # formula: k/m\n from math import ceil\n return ceil(float(k)/float(m))", "def liste_N_nb_premier(N):\n liste = []\n i = 0\n while len(liste) < N:\n if is_prime(i):\n liste.append(i)\n i += 1\n return liste", "def getDivisors(n):", "def solve(total):\n\n if not isnatural(total):\n raise ValueError(\"El numero tiene que ser natural\")\n\n suplimit = total//2 + 2\n for m in range(1, suplimit):\n for n in range(m, suplimit):\n suma = (n-m+1)*(m+n) // 2\n if suma < total:\n continue\n elif suma == total:\n yield (m, n)\n break", "def findKthNumber(self, m: int, n: int, k: int) -> int:\n l, r = 1, m * n\n while l < r:\n mid = l + ((r - l) >> 1)\n\n # Check if there are k or more values that are less than mid.\n # For each row, its elements look like 1*i, 2*i, ... n*i, so the\n # largest number that is less than x will be x // i. But if x is\n # too large for the current row, the total count for this row\n # will be n instead.\n if sum(min(mid // r, n) for r in range(1, m + 1)) >= k:\n # mid is our candidate.\n r = mid\n else:\n l = mid + 1\n\n return l", "def main(n=20):\n return functools.reduce(lcm, range(1, 20))", "def get_set(dim, maximum):\n\n i = 0\n numbers = []\n while i**2 <= maximum:\n n = i**2\n counter = 0\n while n <= maximum and counter < dim:\n numbers += [i**2]\n n += i**2\n counter += 1\n i += 1\n return numbers", "def splitArray(self, nums: List[int], m: int) -> int:\n l = max(nums)\n r = sum(nums)\n ans = r\n\n while l <= r:\n mid = (l + r) // 2\n range_sum = 0\n range_sum_count = 1\n for i in range(len(nums)):\n if (range_sum + nums[i] > mid):\n range_sum = nums[i]\n range_sum_count += 1\n else:\n range_sum += nums[i]\n if range_sum_count <= m:\n ans = min(ans, mid)\n r = mid - 1\n else:\n l = mid + 1\n return ans", "def get_divisors(n, includeN=True):\n lower_divisors, upper_divisors = [], []\n i = 1\n while i * i <= n:\n if n % i == 0:\n lower_divisors.append(i)\n if i != n // i:\n upper_divisors.append(n//i)\n i += 1\n upper_divisors = upper_divisors[::-1]\n if not includeN:\n upper_divisors.pop()\n return lower_divisors + upper_divisors", "def gen_num(lim=10000):\n n = 1\n yield 2\n yield 3\n while 6 * n + 1 <= lim:\n yield 6 * n - 1\n yield 6 * n + 1\n n += 1", "def combination(num_m: int, num_n: int) -> int:\n if num_m < num_n:\n return 0\n\n tmp_m = 1\n tmp_n = 1\n tmp_cnt = 0\n for i in range(num_n, 0, -1):\n tmp_n *= i\n tmp_m *= num_m - tmp_cnt\n tmp_cnt += 1\n\n return tmp_m / tmp_n", "def smallest_multiple(n):\n return reduce(lowest_common_multiple, range(1, n+1))", "def primfact(e):\n for n in range(2, e):\n for x in range(2, n):\n if n % x == 0:\n break\n else:\n print n,", "def solution3(n):\n res = []\n while n > 0:\n m = int(math.sqrt(n))**2\n res.append(m)\n n -= m\n return res", "def m(self):\n\t\tn = 0\n\t\ti = self.k0\n\t\twhile 1:\n\t\t\tif i > self.j:\n\t\t\t\treturn n\n\t\t\tif not self.cons(i):\n\t\t\t\tbreak\n\t\t\ti = i + 1\n\t\ti = i + 1\n\t\twhile 1:\n\t\t\twhile 1:\n\t\t\t\tif i > self.j:\n\t\t\t\t\treturn n\n\t\t\t\tif self.cons(i):\n\t\t\t\t\tbreak\n\t\t\t\ti = i + 1\n\t\t\ti = i + 1\n\t\t\tn = n + 1\n\t\t\twhile 1:\n\t\t\t\tif i > self.j:\n\t\t\t\t\treturn n\n\t\t\t\tif not self.cons(i):\n\t\t\t\t\tbreak\n\t\t\t\ti = i + 1\n\t\t\ti = i + 1", "def listes(Mi, Ma, N):\n if N == 0:\n yield []\n elif Ma - Mi < N:\n yield None\n else:\n # Avec Mi + 1\n for reste in listes(Mi + 1, Ma, N - 1):\n if reste != None:\n yield [Mi + 1] + reste\n \n # Sans Mi + 1\n for reste in listes(Mi + 1, Ma, N):\n if reste != None:\n yield reste", "def get_integers(n: int) -> int:\n l = [int(sqrt(n))]\n val = l[0] * l[0]\n index = 0\n while val != n:\n val = sum([x*x for x in l])\n if val > n:\n l[index] -= 1\n elif val < n:\n index += 1\n l.append(l[index - 1])\n\n return len(l)", "def _P(m):\n P = np.zeros((m**2,m**2), dtype=np.int64)\n for i in range(1, m**2 + 1):\n j = 1 + m*((i - 1) % m) + (i - 1)//m\n P[i-1, j-1] = 1\n return P", "def dice(n, m):\n return Lea.interval(1, m).cprodTimes(n)", "def divisors(n):\r\n numbers = []\r\n for i in xrange(1, n+1):\r\n if n % i == 0:\r\n numbers.append(i)\r\n return numbers", "def between_m_and_n_ver(seq, m, n):\r\n num_AB = 0\r\n for item in seq:\r\n if np.array_equal(item, Quantifier.AB):\r\n num_AB += 1\r\n return Quantifier.T if (m <= num_AB and num_AB <= n) else Quantifier.F", "def proper_divisors(n):\r\n numbers = []\r\n for i in xrange(1, n):\r\n if n % i == 0:\r\n numbers.append(i)\r\n \r\n return numbers", "def get_3_5_sum(num):\n nums = range(num)\n mults = [num for num in nums if (num % 3 == 0 or num % 5 == 0)]\n return sum(mults)", "def getNPrime(num):\n prime_numbers = []\n for i in range(num):\n if isPrime(i + 1):\n prime_numbers.append(i)\n return prime_numbers", "def factor(N):\n\n factors = []\n sqrtN = math.sqrt(N)\n for x in range(2, int(sqrtN)+1):\n (d, r) = divmod(N, x)\n if r == 0:\n factors.append(x)\n if x != d: factors.append(d)\n return [1, N] + factors", "def _get_l1_mlist(n):\n global _l1_bucket_max\n if n == 1:\n mlist = [1]\n elif n <= _l1_bucket_max:\n mlist = [n, 2*n, n*n]\n else:\n mlist = [n, 2*n] #just to try\n return mlist", "def how_many_5(numbers):\r\n # Modify example to take argument that specifies threshold\r\n return sum( 1 for number in numbers if number > 5 )", "def primes_below(n):\n L, M = [2], [x for x in range(3, int(n), 2)]\n if n <= 2:\n print('There are no primes below 2')\n return None\n for i in range(3, int(n), 2):\n if M[i // 2 - 1] != 0 and is_prime(i):\n L.append(i)\n for j in range(i, int(n), 2 * i):\n M[j // 2 - 1] = 0\n return L", "def sum_divisibles(limit):\n res = [x for x in range(limit) if x % 3 == 0 or x % 5 == 0]\n return sum(res)", "def tourney_prob(k, N, m):\n\n if N < m:\n print \"The second argument cannot be smaller than the third one.\"\n sys.exit()\n\n if m < 1 or k <= 0:\n return 0.0\n elif m == 1:\n return 1.0 / N\n else:\n return float(N - k) * m / (N * (m - 1)) * tourney_prob(k, N - 1, m - 1)", "def splitArray(self, nums: List[int], m: int) -> int:\n low, high, res = max(nums), sum(nums), -1\n while low <= high:\n pivot=(low+high)//2\n if self.isValid(nums,m,pivot):\n res, high = pivot, pivot - 1\n else:\n low = pivot + 1\n return res", "def MM(x,N,n,l,t=0):\n Mat = np.zeros([2**l,2**l])\n for iii in range(N):\n Mat[iii,(x**n * iii)%N] = 1\n return Mat", "def choices(N, M):\n #yield from itertools.combinations(range(2, M+1), N)\n # Use stars and bars method\n allbars = itertools.combinations( range(1,N+M-1), M-2 )\n for bars in allbars:\n bars = [0] + list(bars) + [N+M-1]\n yi = [ y-x for x, y in zip(bars, bars[1:]) ]\n yield [ y-1 for y in yi ]", "def bruteForcePopulation(N):\n return list(itertools.permutations(range(N), N))", "def primes(m):\n if m <= 2:\n return ()\n sieve = [True] * m\n for i in sixn(m):\n if sieve[i]:\n yield i\n for mult in range(i * i, m, i):\n sieve[mult] = False", "def choose_m_n(li,min,max):\n n_items = random.randrange(min,max+1)\n if n_items == 0:\n return [ ]\n sample=random.sample(li,n_items) # Should it be sorted?\n return sample", "def Samples(n=6, m=1000):\n t = [Sample(n) for i in range(m)]\n return t", "def _two_of_m(m):\n return (m * (m - 1) / 2)", "def multiply(m, n):\n if n == 1:\n return m\n else:\n return m + multiply(m, n - 1)", "def count_partitions(n, m):\n if n == 0:\n return 1\n elif n < 0:\n return 0\n elif m == 0:\n return 0\n else:\n with_m = count_partitions(n - m, m)\n without_m = count_partitions(n, m - 1)\n return with_m + without_m", "def solve(limit):\n upper_limit = ceil(sqrt(limit - 2**4 - 2**3))\n p_list = PrimeList(upper_limit)\n\n num_set = set()\n for x in p_list:\n val = limit - 2**4 - x**3\n if val < 0: continue\n lim = ceil(sqrt(val))\n for y in takewhile(lambda i: i<lim, p_list):\n val = limit - min(x,y)**4 - max(x,y)**3\n if val < 0: continue\n lim = ceil(sqrt(val))\n for z in takewhile(lambda i: i<lim, p_list):\n\n for a,b,c in permutations([x,y,z]):\n ans = a**2 + b**3 + c**4\n if ans > limit: continue\n num_set.add(ans)\n if a ==b and b == c: break\n\n return len(num_set)", "def count_partitions(n, m):\n # print(n, m)\n if n == 0:\n return 1\n elif n < 0:\n return 0\n elif m == 0:\n return 0\n else:\n return count_partitions(n-m, m) + count_partitions(n, m//2)", "def make_evaluation_grids(W, M, N):\n nu = (np.arange(W * M, dtype=float) + 0.5) / (2 * M)\n x = np.arange(N + 1, dtype=float) / (2 * N)\n return nu, x", "def batches(l, n):\n for i in range(0, l, n):\n yield range(i,min(l,i+n))", "def ip(M, N):\n\n return M ^ N", "def gp(M, N):\n\n return M*N", "def _power(self, a, n, m):\n res = 1\n while n != 0:\n if n % 2 != 0:\n res *= a\n res %= m\n n -= 1\n else:\n a *= a\n a %= m\n n //= 2\n return res", "def numbers():\n for number in range(1, 76):\n yield number", "def get_k(M):\n k = np.arange(1,M+1)*np.pi/(M+1) # vector of all possible quasimomenta\n return k", "def sum_mult(number, limit):\n limit = limit - 1\n limit = (limit - (limit%number))/number\n return (number * limit * (limit+1))/2", "def primes_lt3(N):\n\t# test every number less than N/2\n\tprimes = [ i for i in xrange(2,N)\n\t\t\t\t if not any( ( i % p == 0 for p in xrange(2,int(sqrt(i))+1) ) )]\n\n\treturn primes", "def primes_lt3(N):\n\t# test every number less than N/2\n\tprimes = [ i for i in xrange(2,N)\n\t\t\t\t if not any( ( i % p == 0 for p in xrange(2,int(sqrt(i))+1) ) )]\n\n\treturn primes", "def prbs(m, n):\n return np.array(np.random.rand(m, n) > 0.5, dtype=np.int) - 0.5", "def MM_n(N, data):\n out = np.zeros(len(data))\n\n for j in range(N):\n out[j] = np.average(data[:j+1])\n for (j,d) in enumerate(data[N-1:]):\n out[j+N-1] = np.average(data[j:j+N])\n\n return out", "def divisior(n: int) -> list:\n j = [n]\n for d in range(n+1): #loop bis n\n d > 0", "def amicable_numbers(n):\n amicables = []\n sumDivisors = {}\n for i in range(1, n):\n divisors = proper_divisors(i)\n sumDivisors[i] = sum(divisors)\n for i in range(1, n):\n sumDivisorsOfi = sumDivisors[i]\n if sumDivisorsOfi < n:\n compare = sumDivisors[sumDivisorsOfi]\n if compare == i and sumDivisorsOfi != i:\n amicables.append(i)\n return amicables", "def generate_numbers():\n\n nums = []\n\n for i in range(0, 12):\n for j in range(1, 12):\n nums.append(i / j)\n\n return list(set(nums))", "def cycles(n, m):\n if n <= 2:\n return 1\n k = 2\n tk = 3\n pk = 2\n while k < n - 1:\n tk1 = (2 * pk * tk * tk) % m\n pk1 = (2 * pk * pk * tk) % m\n tk = tk1\n pk = pk1\n k += 1\n return (pk * pk * pk) % m", "def sum_amicable(limit):\n\n def find_amicable_pair(n):\n check_n= 0\n potential_half = 0\n for i in range(1,n):\n if n % i == 0:\n potential_half += i\n for i in range(1, potential_half):\n if potential_half % i == 0:\n check_n += i\n if check_n == n and n != potential_half: # exclude self amicable\n result.append(n)\n result.append(potential_half)\n\n result = []\n for num in range(1, limit):\n if num not in result:\n find_amicable_pair(num)\n return sum(result)", "def skip_mul(n):\n if n <= 0:\n return 1\n else:\n return n * skip_mul(n - 2)", "def ModExp(n, k, m):\n a = list(bin(k))[2:]\n a.reverse()\n s = 1\n for i in a:\n if i == '1':\n s = (s * n) % m\n n = (n * n) % m\n return s", "def problem3(m, n):\n # TODO: Implement and test this function.\n # We supplied some tests in test_problem3 above.", "def divisors(n):\n return [x for x in range(1, n) if n % x == 0]", "def f(n):\n\tfor i in range(101, n):\n\t\tif (i % 21 == 0):\n\t\t\treturn i", "def fast_power(a, n, m): # (a ^ n) % m\n result = 1\n value = a\n power = n\n while power > 0:\n if power % 2 == 1:\n result = result * value\n result %= m\n value = value * value\n value %= m\n power = power//2\n return result", "def eratosthenes_npo(limit):\n if isinstance(limit, (int, float)):\n limit = int(limit)\n else:\n raise ValueError\n mask = np.ones(limit//2, dtype=np.bool)\n for i in range(3, int(limit**0.5)+1, 2):\n if mask[i//2]:\n mask[i*i//2::i] = False\n return np.r_[2, 2*np.nonzero(mask)[0][1::]+1]", "def perfect_power(n: int):\n k = 2\n max_k = round(n ** (1/2))\n while True:\n m = round(n ** (1/k))\n if m ** k == n:\n return [m, k]\n elif k > max_k:\n return None\n else:\n k += 1", "def prime_numbers(limit):\n primes = [2, 3, 5]\n for p in primes:\n yield p\n n = 5\n count = 3\n last_idx = -1\n sqrd_prime = 0\n while count <= limit:\n n += 2\n if n > sqrd_prime:\n last_idx += 1\n sqrd_prime = primes[last_idx] ** 2\n is_prime = True\n for i in range(1, last_idx + 1):\n p = primes[i]\n if n % p == 0:\n is_prime = False\n break\n if is_prime:\n count += 1\n primes.append(n)\n yield n", "def prime_factorization(n):\r\n result = []\r\n for i in xrange(2, n+1):\r\n s = 0;\r\n while n / float(i) == floor(n/float(i)):\r\n n = n / float(i)\r\n s += 1\r\n if s > 0:\r\n for k in range(s):\r\n result.append(i)\r\n if n == 1:\r\n return result" ]
[ "0.67994684", "0.67914283", "0.6521023", "0.64013743", "0.632049", "0.62614083", "0.6100065", "0.6055581", "0.6036513", "0.6017747", "0.60122454", "0.60112625", "0.59835833", "0.59610224", "0.59551555", "0.59433913", "0.5900685", "0.587324", "0.5860821", "0.5820013", "0.58147126", "0.58118325", "0.58117676", "0.58067805", "0.5791375", "0.5788349", "0.5780253", "0.5773223", "0.5770476", "0.5767918", "0.5759007", "0.5741875", "0.57391894", "0.57294613", "0.57218325", "0.5710505", "0.5699357", "0.56893206", "0.56803834", "0.5676526", "0.56713736", "0.56521595", "0.5642624", "0.5642587", "0.5642252", "0.56401104", "0.56388783", "0.5626682", "0.56264246", "0.56111056", "0.56066567", "0.55986536", "0.5594367", "0.55916905", "0.55900174", "0.55831313", "0.55812496", "0.55482745", "0.5545712", "0.55340123", "0.5531779", "0.55310553", "0.5528484", "0.55244267", "0.5520194", "0.551607", "0.5513164", "0.5496783", "0.5495809", "0.54915017", "0.5488247", "0.5476103", "0.5455536", "0.54501975", "0.5437969", "0.5436581", "0.5434888", "0.54267466", "0.54249406", "0.542101", "0.54186136", "0.5398261", "0.5398261", "0.5397383", "0.5392915", "0.5377719", "0.53676444", "0.5362202", "0.53610384", "0.5353305", "0.5349506", "0.5343859", "0.53376096", "0.53353226", "0.53339803", "0.5333947", "0.5330843", "0.53298694", "0.5325", "0.53225803" ]
0.7943605
0
Check if object is exactly an instance of specified class
def is_same_class(obj, a_class): return type(obj) == a_class
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __instancecheck__(self, instance):\n\n if isinstance(instance, ObjCInstance):\n return bool(instance.isKindOfClass(self))\n else:\n return False", "def is_same_class(obj, a_class):\n return isinstance(obj, a_class)", "def is_same_class(obj, a_class):\n if type(obj) is a_class:\n return True\n else:\n return False", "def is_same_class(obj, a_class):\n return type(obj) is a_class", "def is_same_class(obj, a_class):\n return type(obj) is a_class", "def is_same_class(obj, a_class):\n return(type(obj) is a_class)", "def is_same_class(obj, a_class):\n\n if type(obj) is a_class:\n return True\n return False", "def is_same_class(obj, a_class):\n return (type(obj) is a_class)", "def is_same_class(obj, a_class):\n if type(obj) == a_class:\n return True\n else:\n return False", "def match(self, cls):\n return isinstance(self, cls)", "def is_same_class(obj, a_class):\n return(type(obj) == a_class)", "def is_kind_of_class(obj, a_class):\n return(isinstance(obj, a_class))", "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class):\n return True\n return False", "def is_same_class(obj, a_class):\n return (type(obj) == a_class)", "def is_kind_of_class(obj, a_class):\n return (isinstance(obj, a_class))", "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class):\n return True\n else:\n return False", "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class):\n return True\n else:\n return False", "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class) is True:\n return True\n else:\n return False", "def isclassinstance(object):\n if not hasattr(object, \"__class__\"):\n return False\n if isbuiltin(object.__class__):\n return False\n return True", "def is_kind_of_class(obj, a_class):\n\n return isinstance(obj, a_class)", "def is_kind_of_class(obj, a_class):\n\n return (isinstance(obj, a_class))", "def check_class(instance, type):\n\tif not issubclass(instance, type):\n\t\traise TypeError('Subclass expected type {0}, but got: {1}', type(type), type(instance))", "def is_kind_of_class(obj, a_class):\n\n if isinstance(obj, a_class):\n return True\n else:\n return False", "def class_is(cls: Class) -> bool:\n pass", "def isclass(object):\r\n return isinstance(object, (type, types.ClassType))", "def is_instance(instance, expected_types):\n for expected_type in expected_types:\n if isinstance(instance, expected_type):\n return True\n\n return False", "def assert_is_instance(self, obj, cls, msg=\"\"):\r\n assert isinstance(obj, cls)", "def test_is_instance(self):\n self.assertIsInstance(self.obj, Rectangle, \"created obj is not an \" +\n \"instance of Rectangle class.\")", "def is_instance(self, thing: Any) -> bool:\n return isinstance(thing, self.underlying)", "def is_dataclass_instance(obj: Any) -> bool:\n return dataclasses.is_dataclass(obj) and not isinstance(obj, type)", "def inherits_from(obj, a_class):\n if issubclass(type(obj), a_class) and not type(obj) == a_class:\n return True\n else:\n return False", "def verify_is_instance(self, obj, cls, msg=\"\"):\r\n try:\r\n self.assert_is_instance(obj, cls, msg)\r\n except AssertionError, e:\r\n if msg:\r\n m = \"%s:\\n%s\" % (msg, str(e))\r\n else:\r\n m = str(e)\r\n self.verification_erorrs.append(m)", "def test_isclassinstance():\n class MockClass:\n pass\n\n # Since Python3, everything is a class, so this means nothing (?)\n assert isclassinstance(0)\n assert isclassinstance(1.0)\n assert isclassinstance(complex(2.0))\n assert isclassinstance('foo')\n assert isclassinstance([])\n assert isclassinstance(())\n assert isclassinstance(range(6))\n assert isclassinstance(bytes(7))\n assert isclassinstance(bytearray())\n assert isclassinstance(memoryview(b'nine'))\n assert isclassinstance(set())\n assert isclassinstance(frozenset())\n assert isclassinstance({})\n assert isclassinstance(None)\n assert isclassinstance(MockClass())", "def inherits_from(obj, a_class):\n if issubclass(type(obj), a_class):\n if type(obj) is not a_class:\n return True\n return False", "def inherits_from(obj, a_class):\n if isinstance(type(obj), a_class) and type(obj) != a_class:\n return True\n return False", "def inherits_from(obj, a_class):\n\n if issubclass(type(obj), a_class) and type(obj) != a_class:\n return True\n return False", "def obj_is_in_class(obj: unrealsdk.UObject, in_class: str) -> bool:\n return bool(obj.Class == unrealsdk.FindClass(in_class))", "def inherits_from(obj, a_class):\n\n if isinstance(obj, a_class) and type(obj) is not a_class:\n return True\n\n return False", "def issubclass(obj, cls):\r\n if isinstance(obj, Assert):\r\n obj = obj.obj\r\n return assert_(issubclass(obj, cls),\r\n 'not issubclass(%s, %s)' % (_repr(obj), _repr(cls)))", "def __instancecheck__(cls, instance):\r\n # Inline the cache checking when it's simple.\r\n subclass = getattr(instance, '__class__', None)\r\n if subclass in cls._abc_cache:\r\n return True\r\n subtype = type(instance)\r\n if subtype is subclass or subclass is None:\r\n if (cls._abc_negative_cache_version ==\r\n ABCMeta._abc_invalidation_counter and\r\n subtype in cls._abc_negative_cache):\r\n return False\r\n # Fall back to the subclass check.\r\n return cls.__subclasscheck__(subtype)\r\n return (cls.__subclasscheck__(subclass) or\r\n cls.__subclasscheck__(subtype))", "def _isinstance(self, instance, raise_error=True):\n\n if isinstance(instance, self.__model__):\n return True\n elif raise_error:\n raise ValueError('{} is not of type {}.'.format(\n instance, self.__model__,\n ))\n else:\n return False", "def assertIsInstance(self, obj, cls, msg=None):\r\n if not _is_instance(obj, cls):\r\n standardMsg = '%s is not an instance of %r' % (safe_repr(obj), cls)\r\n self.fail(self._formatMessage(msg, standardMsg))", "def instance_of(cls):\n def check(value):\n return (\n isinstance(value, cls),\n u\"{value!r} is instance of {actual!s}, required {required!s}\".format(\n value=value,\n actual=fullyQualifiedName(type(value)),\n required=fullyQualifiedName(cls),\n ),\n )\n return check", "def is_instance_of_type(object_a, type_a):\n\n return is_type_subclass_of_type(type(object_a), type_a)", "def inherits_from(obj, a_class):\n if type(obj) is not a_class:\n return(issubclass(type(obj), a_class))\n else:\n return False", "def has_exactly_type(obj, tpe):\r\n return type(obj) == tpe", "def inherits_from(obj, a_class):\n if type(obj) == a_class:\n return False\n return issubclass(type(obj), a_class)", "def inherits_from(obj, a_class):\n if type(obj) is not a_class and issubclass(type(obj), a_class):\n return True\n else:\n return False", "def inherits_from(obj, a_class):\n return issubclass(type(obj), a_class) and type(obj) != a_class", "def __is_type_instance( self, instance_type ):\n for index, instance in enumerate(INSTANCE_TYPES):\n if instance == instance_type:\n return True\n return False", "def inherits_from(obj, a_class):\n if a_class == type(obj):\n return False\n return isinstance(obj, a_class)", "def inherits_from(obj, a_class):\n return(issubclass(type(obj), a_class) and type(obj) != a_class)", "def inherits_from(obj, a_class):\n return isinstance(obj, a_class) and type(obj) is not a_class", "def inherits_from(obj, a_class):\n return isinstance(obj, a_class) and type(obj) is not a_class", "def inherits_from(obj, a_class):\n return isinstance(obj, a_class) and type(obj) != a_class", "def inherits_from(obj, a_class):\n return isinstance(obj, a_class) and type(obj) != a_class", "def inherits_from(obj, a_class):\n\n return isinstance(obj, a_class) and type(obj) is not a_class", "def test_issubclass(self):\n self.assertTrue(issubclass(self.rev.__class__, BaseModel), True)", "def inherits_from(obj, a_class):\n return (isinstance(obj, a_class) and type(obj) != a_class)", "def _is_dataclass_instance(obj):\n return hasattr(type(obj), '__dataclass_fields__')", "def __instancecheck__(self, instance):\n\n if isinstance(instance, ObjCInstance):\n return bool(instance.conformsToProtocol(self))\n else:\n return False", "def __instancecheck__(self, obj: object) -> NoReturn:\n raise TypeError(\"isinstance() argument 2 cannot be a \"\n \"parameterized generic\")", "def assert_is_not_instance(self, obj, cls, msg=\"\"):\r\n assert not isinstance(obj, cls)", "def inherits_from(obj, a_class):\n return ((issubclass(type(obj), a_class)) and type(obj) != a_class)", "def _check(self, class_):\r\n\r\n if isinstance(class_, (types.FunctionType, types.LambdaType,\r\n types.ClassType, types.InstanceType)):\r\n return False\r\n if not hasattr(class_, '__dict__'):\r\n if not hasattr(class_, '__slots__'):\r\n return False\r\n return True", "def isclass(object):\n if not inspect.isclass(object):\n return False\n if isbuiltin(object):\n return False\n return type not in inspect.getmro(object)", "def exactly(base_cls):\n\n @meta\n def check(cls):\n return cls is base_cls\n\n return check", "def verify_type(self, obj):\n return isinstance(obj, self.type_)", "def needs_unique_instance(type_):\n return type_ in unique_instance_types", "def isNestedInstance(obj, cl):\n tree = []\n for k in cl.__subclasses__():\n tree += k.__subclasses__()\n tree += cl.__subclasses__() + [cl]\n return issubclass(obj.__class__, tuple(tree))", "def check_instance(self):\n self.assertIsInstance(self.amenity_1, amenity)\n self.assertIsInstance(self.amenity_2, amenity)", "def _isinstancetype(an_obj):\n if an_obj is None: return False\n if not PY3K:\n return isinstance(an_obj, types.InstanceType)\n typstr = str(type(an_obj))\n # the following logic works, as PyRAF users expect, in both v2 and v3\n return typstr==\"<type 'instance'>\" or \\\n (typstr.startswith(\"<class '\") and ('.' in typstr))", "def _isinstance(self, obj, raise_error=True):\n rv = isinstance(obj, self.__model__)\n if not rv and raise_error:\n raise ValueError('%s is not of type %s' % (obj, self.__model__))\n return rv", "def check_type(instance, type):\n\tif not isinstance(instance, type):\n\t\traise TypeError('Instance expected type {0}, but got: {1}', type(type), type(instance))", "def is_object(value, class_name):\n\n return isinstance(value, getattr(schema, class_name))", "def test_is_instance(self):\n self.assertTrue(isinstance(self.new_review, Review))", "def isinstancemethod(cls, obj):\n return _isinstancemethod(cls, obj)", "def is_icecube_class(obj: Any) -> bool:\n classname = str(type(obj))\n return \"icecube.\" in classname", "def is_type(obj):\n return type(obj) is type or type(obj) is types.ClassType", "def issubclass_(arg1, arg2):\n try:\n return issubclass(arg1, arg2)\n except TypeError:\n return False", "def assert_isinstance(object, class_or_tuple):\n assert isinstance(\n object, class_or_tuple\n ), \"unexpected instance type, expected=%s, actual=%s\" % (\n class_or_tuple,\n type(object),\n )\n return object", "def safe_isinstance(obj, class_path_str):\n # this function is copy-paste from the code of the SHAP Python library\n # Copyright (c) 2018 Scott Lundberg\n if isinstance(class_path_str, str):\n class_path_strs = [class_path_str]\n elif isinstance(class_path_str, list) or isinstance(class_path_str, tuple):\n class_path_strs = class_path_str\n else:\n class_path_strs = ['']\n\n # try each module path in order\n for class_path_str in class_path_strs:\n if \".\" not in class_path_str:\n raise ValueError(\"class_path_str must be a string or list of strings specifying a full \\\n module path to a class. Eg, 'sklearn.ensemble.RandomForestRegressor'\")\n\n # Splits on last occurence of \".\"\n module_name, class_name = class_path_str.rsplit(\".\", 1)\n\n # here we don't check further if the model is not imported, since we shouldn't have\n # an object of that types passed to us if the model the type is from has never been\n # imported. (and we don't want to import lots of new modules for no reason)\n if module_name not in sys.modules:\n continue\n\n module = sys.modules[module_name]\n\n #Get class\n _class = getattr(module, class_name, None)\n\n if _class is None:\n continue\n\n if isinstance(obj, _class):\n return True\n\n return False", "def issubclass_safe(value, type_):\n try:\n return issubclass(value, type_)\n except (TypeError, AttributeError):\n # Cannot perform issubclass on some types\n return False", "def issubclass(cls1, cls2):\n\n if cls1 == cls2:\n return 1\n\n for base in cls1.bases:\n for base_cls in lobj(base, strict=1):\n if base_cls == cls2:\n return 1\n else:\n return issubclass(base_cls, cls2)\n return 0", "def test_instance(self):\n b = Review()\n self.assertIsInstance(b, Review)\n self.assertTrue(issubclass(type(b), BaseModel))", "def match(cls, kind: 'dsl.Any') -> bool:\n return isinstance(kind, cls)", "def is_child_class(obj, classinfo):\n try:\n return issubclass(obj, classinfo)\n except TypeError:\n return None", "def _class(self, class_):\r\n\r\n if class_:\r\n if hasattr(class_, '__mro__'):\r\n #this is a class\r\n return class_\r\n else:\r\n #this is an instance\r\n return type(class_)", "def test_issubclass(self):\n self.assertTrue(issubclass(User()), BaseModel)", "def is_ctypes_instance(obj):\n return issubclass(type(obj), ctypes.Structure) or issubclass(type(obj), ctypes.Union)", "def is_instance_of(self, mu, env, obj_idx, class_idx):\n obj = self.get_reference(obj_idx)\n if not isinstance(obj, jobject):\n raise ValueError('Expected a jobject.')\n\n clazz = self.get_reference(class_idx)\n if not isinstance(clazz, jclass):\n raise ValueError('Expected a jclass.')\n\n # TODO: Casting check (?)\n\n return JNI_TRUE if obj.value.jvm_id == clazz.value.jvm_id else JNI_FALSE", "def _is_this_color(cls, obj: Any) -> bool:\n\n return type(obj) is cls", "def is_same_type_as_other(cls, other):\r\n return isinstance(other, cls)" ]
[ "0.7938733", "0.7921352", "0.7915792", "0.79037", "0.79037", "0.7898302", "0.7889888", "0.7864915", "0.7833402", "0.78156614", "0.778558", "0.77762145", "0.77750516", "0.77750516", "0.77750516", "0.77750516", "0.77750516", "0.77750516", "0.777345", "0.7769582", "0.7744331", "0.7743631", "0.7743631", "0.7735183", "0.77251005", "0.7699322", "0.7695979", "0.76958835", "0.7689502", "0.76553655", "0.76337165", "0.7629859", "0.7572446", "0.7460568", "0.74496037", "0.7399509", "0.7320281", "0.73165", "0.73128164", "0.7312458", "0.7310445", "0.72889775", "0.72826624", "0.72494704", "0.72482854", "0.72039306", "0.7198594", "0.7180114", "0.71746033", "0.716997", "0.71595746", "0.71568716", "0.7151595", "0.71453476", "0.7141164", "0.71212023", "0.7120686", "0.7114234", "0.7099861", "0.7099861", "0.70642793", "0.70642793", "0.7054249", "0.7054011", "0.7025262", "0.7024464", "0.7017139", "0.7015372", "0.6998749", "0.6988215", "0.6987929", "0.6980995", "0.6976936", "0.6918344", "0.6915231", "0.69132537", "0.6889717", "0.6880734", "0.68571705", "0.68527997", "0.6840351", "0.6806334", "0.678912", "0.6768781", "0.6719238", "0.6696865", "0.66788745", "0.6653535", "0.6651278", "0.66483116", "0.6645451", "0.6640547", "0.6624231", "0.66150194", "0.6597071", "0.65900713", "0.6588198", "0.6563817", "0.6559196" ]
0.7816911
10
setup any state tied to the execution of the given method in a class. setup_method is invoked for every test method of a class.
def setup_method(self, objectCreation):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_method(self, method):\n pass", "def setup_method(self, method):\n pass", "def _decorate_setup(self, setup_method):\n @wraps(setup_method)\n def setup_method_wrapper(*args, **kwargs):\n \"\"\"setup method wrapper.\n\n * Locks the required resources for the test.\n * Executes the original setUp method.\n * Upon exception, finalizes the resources.\n \"\"\"\n if self.is_main:\n if isinstance(self.result, Result):\n skip_reason = self.result.shouldSkip(self)\n\n if skip_reason is not None:\n self.skip_sub_components(skip_reason)\n self.skipTest(skip_reason)\n\n else:\n if self.mode in (MODE_CRITICAL, MODE_OPTIONAL):\n siblings = list(self.parent)\n for component in siblings:\n if component is self:\n break\n\n if component.is_failing():\n self.skip_sub_components(\n self.PREVIOUS_FAILED_MESSAGE)\n self.skipTest(self.PREVIOUS_FAILED_MESSAGE)\n\n try:\n self.request_resources(self.get_resource_requests(),\n use_previous=True)\n\n self.override_resource_loggers()\n\n except Exception as err:\n if isinstance(err, ServerError):\n self.skip_sub_components(self.NO_RESOURCES_MESSAGE)\n\n raise\n\n try:\n if not self.IS_COMPLEX:\n self._set_parameters(override_previous=False,\n **{input_name: value.default\n for (input_name, value) in\n iteritems(self.get_inputs())\n if value.is_optional()})\n\n for pipe_name, pipe in iteritems(self._pipes):\n if pipe_name in self.get_inputs():\n setattr(self, pipe_name, pipe.get_value(self))\n\n if not self.is_main:\n # Validate all required inputs were passed\n self.validate_inputs()\n\n setup_method(*args, **kwargs)\n if isinstance(self.result, Result):\n self.result.setupFinished(self)\n\n except Exception:\n self.release_resources(dirty=True)\n raise\n\n return setup_method_wrapper", "def setup_method(self, method):\n self.hass = get_test_home_assistant()", "def setup_method(self, test_method):\n self.wo_obj = TestCases()\n self.global_config, self.test_args = self.wo_obj.get_config_data(test_method=test_method.__name__)", "def setup_method(self, method):\n save_settings()\n set_known_good_settings()", "def setup_method(cls):\n seed()", "def setup_method(self, method):\n self.hass = get_test_home_assistant()\n self.calls = []\n\n @callback\n def record_call(service):\n \"\"\"Track function calls..\"\"\"\n self.calls.append(service)\n\n self.hass.services.register('test', 'automation', record_call)", "def setup_method(self, method):\n self.hass = get_test_home_assistant()\n self.port = mock.MagicMock()\n self.switch = mfi.MfiSwitch(self.port)", "def setupClass(cls):\n print(\"In setUpClass()...\")", "def setup_method(self, method):\r\n self.cf = self.config = saunter.ConfigWrapper.ConfigWrapper()\r\n\r\n self.current_method_name = method.__name__\r\n\r\n browser = self.cf[\"browsers\"][self.cf[\"saunter\"][\"default_browser\"]]\r\n if browser[\"type\"][0] == \"*\":\r\n browser = browser[\"type\"] = browser[\"type\"][1:]\r\n\r\n profile = None\r\n if browser[\"type\"] == 'firefox':\r\n if browser[\"profiles\"][sys.platform]:\r\n profile_path = os.path.join(self.cf[\"saunter\"][\"base\"], 'support', 'profiles', browser[\"profiles\"][sys.platform])\r\n elif browser[\"profiles\"][\"profile\"]:\r\n profile_path = os.path.join(self.cf[\"saunter\"][\"base\"], 'support', 'profiles', browser[\"profiles\"][\"profile\"])\r\n else:\r\n profile_path = None\r\n\r\n if profile_path:\r\n if os.path.isdir(profile_path):\r\n profile = FirefoxProfile(profile_path)\r\n else:\r\n raise ProfileNotFound(\"Profile not found at %s\" % profile_path)\r\n\r\n if \"saucelabs\" in browser and browser[\"saucelabs\"][\"ondemand\"]:\r\n desired_capabilities = {\r\n \"platform\": self.cf[\"sauceLabs\"][\"os\"],\r\n \"browserName\": self.cf[\"sauceLabs\"][\"browser\"],\r\n \"version\": self.cf.get(\"SauceLabs\", \"browser_version\"),\r\n \"name\": method.__name__\r\n }\r\n if desired_capabilities[\"browserName\"][0] == \"*\":\r\n desired_capabilities[\"browserName\"] = desired_capabilities[\"browserName\"][1:]\r\n if desired_capabilities[\"platform\"] in os_map:\r\n desired_capabilities[\"platform\"] = os_map[desired_capabilities[\"platform\"]]\r\n\r\n if self.cf.has_option(\"SauceLabs\", \"selenium_version\"):\r\n desired_capabilities['selenium-version'] = self.cf.get('SauceLabs', 'selenium_version')\r\n\r\n command_executor = \"http://%s:%s@ondemand.saucelabs.com:80/wd/hub\" % (self.cf.get(\"SauceLabs\", \"username\"), self.cf.get(\"SauceLabs\", \"key\"))\r\n else:\r\n desired_capabilities = capabilities_map[browser[\"type\"]]\r\n\r\n if browser[\"proxy\"][\"type\"] and browser[\"proxy\"][\"type\"].lower() == \"browsermob\":\r\n from browsermobproxy import Client\r\n self.client = Client(self.cf.get(\"Proxy\", \"proxy_url\"))\r\n self.client.add_to_webdriver_capabilities(desired_capabilities)\r\n\r\n if \"is grid\" in self.cf[\"selenium\"] and self.cf[\"selenium\"][\"executor\"][\"is grid\"]:\r\n if browser[\"grid filters\"][\"platform\"]:\r\n desired_capabilities[\"platform\"] = browser[\"grid filters\"][\"platform\"].upper()\r\n if browser[\"grid filters\"][\"version\"]:\r\n desired_capabilities[\"platform\"] = str(browser[\"grid filters\"][\"version\"])\r\n\r\n command_executor = \"http://%s:%s/wd/hub\" % (self.cf[\"selenium\"][\"executor\"][\"host\"], self.cf[\"selenium\"][\"executor\"][\"port\"])\r\n\r\n self.driver = WebDriver(desired_capabilities=desired_capabilities, command_executor=command_executor, browser_profile=profile)\r\n\r\n self.verificationErrors = []\r\n self.matchers = Matchers(self.driver, self.verificationErrors)\r\n\r\n if \"saucelabs\" in self.cf[\"browsers\"][self.cf[\"saunter\"][\"default_browser\"]] and self.cf[\"browsers\"][self.cf[\"saunter\"][\"default_browser\"]][\"saucelabs\"][\"ondemand\"]:\r\n self.sauce_session = self.driver.session_id\r\n\r\n self._screenshot_number = 1", "def setUpClass(cls):\n cls.state_f = inspect.getmembers(State, inspect.isfunction)", "def setup_method_wrapper(*args, **kwargs):\n if self.is_main:\n if isinstance(self.result, Result):\n skip_reason = self.result.shouldSkip(self)\n\n if skip_reason is not None:\n self.skip_sub_components(skip_reason)\n self.skipTest(skip_reason)\n\n else:\n if self.mode in (MODE_CRITICAL, MODE_OPTIONAL):\n siblings = list(self.parent)\n for component in siblings:\n if component is self:\n break\n\n if component.is_failing():\n self.skip_sub_components(\n self.PREVIOUS_FAILED_MESSAGE)\n self.skipTest(self.PREVIOUS_FAILED_MESSAGE)\n\n try:\n self.request_resources(self.get_resource_requests(),\n use_previous=True)\n\n self.override_resource_loggers()\n\n except Exception as err:\n if isinstance(err, ServerError):\n self.skip_sub_components(self.NO_RESOURCES_MESSAGE)\n\n raise\n\n try:\n if not self.IS_COMPLEX:\n self._set_parameters(override_previous=False,\n **{input_name: value.default\n for (input_name, value) in\n iteritems(self.get_inputs())\n if value.is_optional()})\n\n for pipe_name, pipe in iteritems(self._pipes):\n if pipe_name in self.get_inputs():\n setattr(self, pipe_name, pipe.get_value(self))\n\n if not self.is_main:\n # Validate all required inputs were passed\n self.validate_inputs()\n\n setup_method(*args, **kwargs)\n if isinstance(self.result, Result):\n self.result.setupFinished(self)\n\n except Exception:\n self.release_resources(dirty=True)\n raise", "def setup_method(self, method):\n super().setup_method(method)\n\n self.CONFIG = FakeEnv.generate_data()\n\n class MyEnv(Env):\n ENVIRON = self.CONFIG\n\n self.env = MyEnv()", "def __run_test_methods(self):\n for test_method in self.runnable_test_methods():\n\n result = TestResult(test_method)\n test_method.im_self.test_result = result\n\n try:\n self._method_level = True # Flag that we're currently running method-level stuff (rather than class-level)\n\n # run \"on-run\" callbacks. eg/ print out the test method name\n for callback in self.__callbacks[self.EVENT_ON_RUN_TEST_METHOD]:\n callback(result.to_dict())\n result.start()\n\n if self.__class_level_failure:\n result.end_in_failure(self.__class_level_failure)\n elif self.__class_level_error:\n result.end_in_error(self.__class_level_error)\n else:\n # first, run setup fixtures\n self._stage = self.STAGE_SETUP\n def _setup_block():\n for fixture_method in self.setup_fixtures + [ self.setUp ]:\n fixture_method()\n self.__execute_block_recording_exceptions(_setup_block, result)\n\n def _run_test_block():\n # then run the test method itself, assuming setup was successful\n self._stage = self.STAGE_TEST_METHOD\n if not result.complete:\n self.__execute_block_recording_exceptions(test_method, result)\n\n def _setup_teardown_block():\n self.__enter_context_managers(self.setup_teardown_fixtures, _run_test_block)\n\n # then run any setup_teardown fixtures, assuming setup was successful.\n if not result.complete:\n self.__execute_block_recording_exceptions(_setup_teardown_block, result)\n\n # finally, run the teardown phase\n self._stage = self.STAGE_TEARDOWN\n def _teardown_block():\n for fixture_method in [ self.tearDown ] + self.teardown_fixtures:\n fixture_method()\n self.__execute_block_recording_exceptions(_teardown_block, result)\n\n # if nothing's gone wrong, it's not about to start\n if not result.complete:\n result.end_in_success()\n except (KeyboardInterrupt, SystemExit):\n result.end_in_interruption(sys.exc_info())\n raise\n finally:\n for callback in self.__callbacks[self.EVENT_ON_COMPLETE_TEST_METHOD]:\n callback(result.to_dict())\n\n self._method_level = False\n\n if not result.success:\n self.failure_count += 1\n if self.failure_limit and self.failure_count >= self.failure_limit:\n return", "def setup_method(self, method):\n self.cmds = []", "def setup_method(self) -> None:\n self.client = Mock()", "def setUpClass(cls):\n cls.setup_log()\n cls.setup_conn()\n cls.setup_cache()\n cls.setup_params()", "def setup_method(self):\n self.hass = get_test_home_assistant()", "def setUp(self):\n # use self.attribute to keep anything which needs to be accessed later\n print('setUp method\\n')", "def setup_method(self):\n self.ae = None", "def setup_method(self) -> None:\n super(TestProfiles, self).setup_method()\n if os.getenv('SETUP_METHOD') is None:\n self.custom.setup_method(self)", "def setup_method(self, method):\n self.old_environ = os.environ\n os.environ = Env.ENVIRON = FakeEnv.generate_data()\n self.env = Env()", "def __run_class_setup_fixtures(self):\n self.__run_class_fixtures(\n self.STAGE_CLASS_SETUP,\n self.class_setup_fixtures + [ self.classSetUp ],\n self.EVENT_ON_RUN_CLASS_SETUP_METHOD,\n self.EVENT_ON_COMPLETE_CLASS_SETUP_METHOD,\n )", "def setup_method(self, method):\n self.user1 = UserFactory()\n self.user2 = UserFactory()\n self.budget1 = BudgetFactory(name=\"test1\", creator=self.user1)\n self.budget2 = BudgetFactory(name=\"budget0\", creator=self.user1)\n self.budget3 = BudgetFactory(name=\"third\", creator=self.user2)\n self.budget4 = BudgetFactory(name=\"fourth\", creator=self.user2, users=[self.user1])\n TransactionFactory(budget=self.budget1, amount=Decimal('30'), user=self.user1, category='test')\n TransactionFactory(budget=self.budget1, amount=Decimal('12.20'), user=self.user1)\n TransactionFactory(budget=self.budget1, amount=Decimal('-34.85'), user=self.user1)\n TransactionFactory(budget=self.budget4, user=self.user1)\n TransactionFactory(budget=self.budget4, user=self.user2)\n TransactionFactory(budget=self.budget4, user=self.user2)", "def setup_method(self, method):\n if _debug: TestAnnexJCodec._debug(\"setup_method %r\", method)\n\n # minature trapped stack\n self.client = TrappedClient()\n self.codec = AnnexJCodec()\n self.server = TrappedServer()\n bind(self.client, self.codec, self.server)", "def setup_method(self):\n self.hass = get_test_home_assistant()\n self.mock_mqtt = mock_mqtt_component(self.hass)", "def setup_method(self, method):\n self.opp = get_test_open_peer_power()", "def setup_method(self, method):\n self.opp = get_test_open_peer_power()", "def test_class_started(self, cls):", "def setUpClass(cls) -> None:\n super().setUpClass()\n # add any additional setup needed here\n return", "def setup_method(self, _method, filename=__file__):\n super().setup_method(_method=_method, filename=filename)", "def setUpClass(cls):\r\n print(\"Setting up class for tests!\")\r\n print(\"==========================\")", "def setup_class(cls):\n # ns.assert_true(False, \"setup_class run\")\n print('setup_class\\n')", "def setup_method(self):\n self.ti_helper = TIHelper(self.indicator_type, self.indicator_field_arg)\n self.ti = self.ti_helper.ti\n self.tcex = self.ti_helper.tcex", "def setUpClass(cls):\n # Establish the session for each test method\n cls.session = CutlassTestConfig.get_session()\n\n cls.util = CutlassTestUtil()", "def setUpClass(cls):\n # Establish the session for each test method\n cls.session = CutlassTestConfig.get_session()\n cls.util = CutlassTestUtil()", "def setUpClass(cls):\n # Establish the session for each test method\n cls.session = CutlassTestConfig.get_session()\n cls.util = CutlassTestUtil()", "def setupClass(cls):\n cls._tmp_dir = tempfile.mkdtemp()\n cls.test_filepath = os.path.join( cls._tmp_dir, \"test_data.h5\" )\n cls._generate_testdata_h5(cls.test_filepath)\n cls.server_proc, cls.shutdown_event = cls._start_mockserver( cls.test_filepath, same_process=True )\n cls.client_connection = httplib.HTTPConnection( \"localhost:8000\" )", "def setUpClass(cls) -> None:\n helper_funcs.setup_mock_files() # setup mock files\n cls.module, cls.ffi = helper_funcs.load(cls._filenames, cls._function_names,\n header_includes=cls.helper_include,\n compiled_file_end=\"helper_func\")", "def setUpClass(self):\n\n self.test_a_summarize()\n self.test_b_store()\n self.test_c_get_existing_records()\n self.test_d_remove_database()", "def setUpClass(cls):\n log.debug('setUpClass of {0} called...'.format(cls))\n # Check to see if the environment that this class is initialised for\n # matches the project path (if specified). If the environment was\n # initialised by the ChipTools internal loaded then skip these steps.\n if cls._environment_type != 'chiptools':\n if cls._loaded_path != cls.project:\n project = Project()\n project.load_project(cls.project)\n # Using external test runner (such as Nosetests) to execute this\n # test. The test environment therefore needs to be loaded from\n # the Project instance:\n simulator, root, libs = cls.get_environment(project)\n cls._loaded_path = cls.project\n cls._simulator = simulator\n cls._simulation_root = root\n cls._simulation_libraries = libs\n cls._environment_type = 'external'\n # Compile the design if required (simulators with caching will\n # perform this step once).\n cls._simulator.compile_project(\n includes=cls._simulation_libraries\n )\n log.debug(\n '...finished initialising environment for {0}'.format(cls)\n )\n if cls._environment_type is None:\n raise EnvironmentError(\n 'The simulation environment for this TestCase is not ' +\n 'initialised so the test cases cannot be executed. ' +\n 'If you are running this test directly ensure that ' +\n 'the TestCase class has a \"project\" attribute which ' +\n 'holds a path to a valid ChipTools project file.'\n )", "def setUp(self):\n GlusterBaseClass.setUp.im_func(self)\n self.test_method_complete = False", "def setUpClass(cls):\n # Call the setUpClass method(s) of the parent class(es)", "def setUpClass(cls):\n pass", "def setUpClass(cls):\n pass", "def setUpClass(cls):\n pass", "def setUpClass(cls):\n pass", "def setup_stage(method):\n def decorator(self):\n name = method.func_name\n if should_run_stage(name):\n say(\"Setup.%s\" % name)\n method(self)\n set_stage(name)\n decorator.__doc__ = method.__doc__\n return decorator", "def setUpClass(cls):\r\n pass", "def setUp(self):\n self._class = State()\n self._class2 = State()\n self._name = \"State\"", "def setUpClass(cls) -> None:\n stack_name = TestStateMachine.get_and_verify_stack_name()\n\n client = boto3.client(\"cloudformation\")\n response = client.list_stack_resources(StackName=stack_name)\n resources = response[\"StackResourceSummaries\"]\n state_machine_resources = [\n resource for resource in resources if resource[\"LogicalResourceId\"] == \"TransactionProcessorStateMachine\"\n ]\n transaction_table_purchase_resources = [\n resource for resource in resources if resource[\"LogicalResourceId\"] == \"PurchaseTable\"\n ]\n transaction_table_refund_resources = [\n resource for resource in resources if resource[\"LogicalResourceId\"] == \"RefundTable\"\n ]\n transaction_table_error_resources = [\n resource for resource in resources if resource[\"LogicalResourceId\"] == \"ErrorTable\"\n ]\n lambda_function_purchase_resources = [\n resource for resource in resources if resource[\"LogicalResourceId\"] == \"PurchaseFunction\"\n ]\n lambda_function_validator_resources = [\n resource for resource in resources if resource[\"LogicalResourceId\"] == \"ValidatorFunction\"\n ]\n lambda_function_refund_resources = [\n resource for resource in resources if resource[\"LogicalResourceId\"] == \"RefundFunction\"\n ]\n lambda_function_error_resources = [\n resource for resource in resources if resource[\"LogicalResourceId\"] == \"ErrorFunction\"\n ]\n\n if not state_machine_resources:\n raise Exception(\"Cannot find TransactionProcessorStateMachine\")\n if not transaction_table_purchase_resources:\n raise Exception(\"Cannot find DDBPurchaseTable\")\n if not transaction_table_refund_resources:\n raise Exception(\"Cannot find DDBRefundTable\")\n if not transaction_table_error_resources:\n raise Exception(\"Cannot find DDBErrorTable\")\n if not lambda_function_error_resources:\n raise Exception(\"Cannot find ErrorFunction\")\n if not lambda_function_purchase_resources:\n raise Exception(\"Cannot find PurchaseFunction\")\n if not lambda_function_refund_resources:\n raise Exception(\"Cannot find RefundFunction\")\n if not lambda_function_validator_resources:\n raise Exception(\"Cannot find ValidatorFunction\")\n\n cls.state_machine_arn = state_machine_resources[0][\"PhysicalResourceId\"]\n cls.transaction_table_purchase = transaction_table_purchase_resources[0][\"PhysicalResourceId\"]\n cls.transaction_table_refund = transaction_table_refund_resources[0][\"PhysicalResourceId\"]\n cls.transaction_table_error = transaction_table_error_resources[0][\"PhysicalResourceId\"]\n cls.lambda_function_validator = lambda_function_validator_resources[0][\"PhysicalResourceId\"]\n cls.lambda_function_purchase = lambda_function_purchase_resources[0][\"PhysicalResourceId\"]\n cls.lambda_function_refund = lambda_function_refund_resources[0][\"PhysicalResourceId\"]\n cls.lambda_function_error = lambda_function_error_resources[0][\"PhysicalResourceId\"]", "def run(self):\n\n self.__run_class_setup_fixtures()\n self.__enter_context_managers(self.class_setup_teardown_fixtures, self.__run_test_methods)\n self.__run_class_teardown_fixtures()", "def setup_class(klass):", "def setup_class(klass):", "def setUp(self):\n print('Calling \\'setUp\\'')", "def test_set_up(self):\n # set_up has been called for the test suite\n # (see runtests) so there is not much that\n # can be tested\n with patch('react.utils.shortcuts.set_up', autospec=True) as s:\n utils.set_up()\n s.assert_called_once_with()", "def __init_fixture_methods(self):\n # init our self.(class_setup|setup|teardown|class_teardown)_fixtures lists\n for fixture_type in fixture_types:\n setattr(self, \"%s_fixtures\" % fixture_type, [])\n\n # for setup methods, we want oldest class first. for teardowns, we want newest class first\n hierarchy = list(reversed(type(self).mro()))\n for cls in hierarchy[1:]:\n # mixins on TestCase instances that derive from, say, object, won't be set up properly\n if hasattr(cls, '_fixture_methods'):\n # the metaclass stored the class's fixtures in a _fixture_methods instance variable\n for fixture_type, fixture_methods in cls._fixture_methods.iteritems():\n bound_fixture_methods = [instancemethod(func, self, self.__class__) for func in fixture_methods]\n if fixture_type.endswith('setup'):\n # for setup methods, we want methods defined further back in the\n # class hierarchy to execute first\n getattr(self, \"%s_fixtures\" % fixture_type).extend(bound_fixture_methods)\n else:\n # for teardown methods though, we want the opposite\n setattr(self, \"%s_fixtures\" % fixture_type, bound_fixture_methods + getattr(self, \"%s_fixtures\" % fixture_type))", "def setup_class(self):\n # Initialize instance variable(s)\n self.n2vc = None\n\n # Track internal state for each test run\n self.state = {}\n\n # Parse the test's descriptors\n self.nsd = get_descriptor(self.NSD_YAML)\n self.vnfd = get_descriptor(self.VNFD_YAML)\n\n self.ns_name = self.nsd['name']\n self.vnf_name = self.vnfd['name']\n\n self.charms = {}\n self.parse_vnf_descriptor()\n assert self.charms is not {}\n\n # Track artifacts, like compiled charms, that will need to be removed\n self.artifacts = {}\n\n # Build the charm(s) needed for this test\n for charm in self.get_charm_names():\n # debug(\"Building charm {}\".format(charm))\n self.get_charm(charm)\n\n # A bit of a hack, in order to allow the N2VC callback to run parallel\n # to pytest. Test(s) should wait for this flag to change to False\n # before returning.\n self._running = True\n self._stopping = False", "def setup_method(self, method):\n super().setup_method(method)\n\n Env.ENVIRON = {}\n self.env.read_env(\n Path(__file__, is_file=True)('test_env.txt'),\n PATH_VAR=Path(__file__, is_file=True).__root__\n )", "def setUpClass(self):", "def setUpClass(cls):\n cls.student_f = inspect.getmembers(Student, inspect.isfunction)", "def _set_up():\n repl._setUp = self.setUp", "def setup_class(cls):", "def setup_class(cls):", "def setup():\n\n self.zorp_mock = Mock()\n\n for name, func in six.iteritems(self._get_zorp_mock_methods()):\n self.zorp_mock.server.registry.put(name, func)\n\n self.zorp_mock.start()", "def __call__(self, result=None):\n self._pre_setup()\n super(TestCase, self).__call__(result)\n self._post_tearDown()", "def teardown_method(self, method) -> None:", "def XXsetUp(self):\n print(\"FooTest:setUp_:begin\")\n ## do something...\n print(\"FooTest:setUp_:end\")", "def XXsetUp(self):\n print(\"FooTest:setUp_:begin\")\n ## do something...\n print(\"FooTest:setUp_:end\")", "def __init__(self):\n self.setup_called = False", "def setUp(self):\n self.p_state = p_state", "def setUp(self):\n raise NotImplementedError", "def setup(self):\n raise NotImplementedError(\"Need to be implemented in subclasses\")", "def setUp(self):\n log.debug('TestModule.setUp')\n if self.module is None:\n self.module = _import(self.moduleName, [self.path], self.conf)\n log.debug('Imported %s from %s on %s', self.module,\n self.moduleName, self.path)\n if hasattr(self.module, '__path__'):\n names = ['setupPackage', 'setUpPackage', 'setup_package']\n else:\n names = ['setupModule', 'setUpModule', 'setup_module']\n names += ['setUp', 'setup']\n try_run(self.module, names)", "def setup_class(self):\n pass", "def setUpClass(cls):\n super(CharmOperationTest, cls).setUpClass()", "def setUpClass(cls):\n super(BaseTest, cls).setUpClass()\n try:\n cls.configure()\n cls.execute()\n except:\n cls.annihilate()\n raise", "def setUpClass(cls, *_):\n super(PagingTest, cls).setUpClass()\n warnings.simplefilter('ignore')\n cls.service_manager = create_service_manager([], ['classifier'])\n cls._tbl_num = cls.service_manager.get_table_num(Classifier.APP_NAME)\n\n testing_controller_reference = Future()\n classifier_reference = Future()\n test_setup = TestSetup(\n apps=[\n PipelinedController.Classifier,\n PipelinedController.Testing,\n PipelinedController.StartupFlows,\n ],\n references={\n PipelinedController.Classifier:\n classifier_reference,\n PipelinedController.Testing:\n testing_controller_reference,\n PipelinedController.StartupFlows:\n Future(),\n },\n config={\n 'bridge_name': cls.BRIDGE,\n 'bridge_ip_address': cls.BRIDGE_IP,\n 'internal_ip_subnet': '192.168.0.0/16',\n 'ovs_gtp_port_number': 32768,\n 'ovs_mtr_port_number': 15577,\n 'ovs_internal_sampling_port_number': 15578,\n 'ovs_internal_sampling_fwd_tbl_number': 201,\n 'ovs_internal_conntrack_port_number': 15579,\n 'ovs_internal_conntrack_fwd_tbl_number': 202,\n 'clean_restart': True,\n 'paging_timeout': 30,\n 'classifier_controller_id': 5,\n 'enable_nat': True,\n 'ovs_uplink_port_name': \"patch-up\",\n },\n mconfig=PipelineD(),\n loop=None,\n service_manager=cls.service_manager,\n integ_test=False,\n rpc_stubs={'sessiond_setinterface': MagicMock()},\n )\n\n BridgeTools.create_bridge(cls.BRIDGE, cls.IFACE)\n cls.thread = start_ryu_app_thread(test_setup)\n cls.classifier_controller = classifier_reference.result()\n cls.testing_controller = testing_controller_reference.result()", "def setUp(self):\n self._control = test_control.PauseFailControl()\n self._digest_pool = logging_pool.pool(test_constants.POOL_SIZE)\n self._digest = _digest.digest(\n _stock_service.STOCK_TEST_SERVICE, self._control, self._digest_pool)\n\n generic_stub, dynamic_stubs, self._memo = self.implementation.instantiate(\n self._digest.methods, self._digest.event_method_implementations, None)\n self._invoker = self.invoker_constructor.construct_invoker(\n generic_stub, dynamic_stubs, self._digest.methods)", "def setup_method(self):\n self.project_dir = os.path.join(DIR, 'test-project')\n self.e2e = E2EEnv(self.project_dir)", "def setup_method(self):\n self.ti_helper = TIHelper(self.group_type, required_fields=self.required_fields)\n self.ti = self.ti_helper.ti\n self.tcex = self.ti_helper.tcex", "def make_set_up(set_up=None):\n def _do_set_up(obj):\n if set_up:\n return set_up(obj)\n return obj.setUp()\n return _do_set_up", "def setUp(self, up):\n\t\tself.up = up", "def setUpClass(cls):\n cls._tmpdir = tempfile.mkdtemp()\n if (not os.path.isdir(cls._tmpdir)):\n raise Exception(\"Failed to create tempdir to use for dump tests\")\n try:\n cls.extraSetUpClass()\n except Exception:\n pass", "def before_run_tests(cls):\n pass", "def setup(self) -> None:\n pass", "def setup(self) -> None:\n pass", "def setup(self) -> None:\n pass", "def setUpClass(cls):\r\n # Clear out any existing modulestores,\r\n # which will cause them to be re-created\r\n # the next time they are accessed.\r\n clear_existing_modulestores()\r\n TestCase.setUpClass()", "def setup_method(self,method):\n from importlib import reload\n reload(bt_temp)\n self.transport_obj = bt_temp.ns.modelList[0].levelModelList[0]\n self.bdm2_obj = self.transport_obj.velocityPostProcessor.vpp_algorithms[0]\n self._setRelativePath()", "def test_setup(self):\n with pytest.raises(NotImplementedError):\n self.behaviour.setup()", "def setup_class(cls):\n cls.mock_get_patcher = patch('project.services.requests.get')\n cls.mock_get = cls.mock_get_patcher.start()", "def setup(self):\n raise NotImplementedError", "def setup(self):\n raise NotImplementedError", "def setup(self):\n raise NotImplementedError", "def setup(self):\n raise NotImplementedError", "def __call__(self, result=None):\n try:\n self._pre_setup()\n super(TestCase, self).__call__(result)\n finally:\n self._post_teardown()", "def setup(self) -> None:", "def setUp(self):\n\t\tprint(\"\\n-------------------------------------\\nIn Test_RMT_Util:\", self._testMethodName)" ]
[ "0.75784695", "0.75784695", "0.74024475", "0.7322949", "0.72970635", "0.72218513", "0.70287365", "0.6968257", "0.6819937", "0.67959106", "0.6794511", "0.6696449", "0.66864085", "0.6648191", "0.6600183", "0.659615", "0.6559962", "0.65412855", "0.65281886", "0.6489964", "0.6485201", "0.6484623", "0.64716375", "0.6440813", "0.6377603", "0.63452506", "0.6340291", "0.63207495", "0.63207495", "0.63141537", "0.63056284", "0.63035023", "0.629433", "0.6284603", "0.62840784", "0.6275843", "0.6251558", "0.6251558", "0.6229501", "0.6195655", "0.619454", "0.617918", "0.6170356", "0.6157203", "0.6136336", "0.6136336", "0.6136336", "0.6136336", "0.61154723", "0.6093295", "0.60331804", "0.6031173", "0.6027659", "0.5994838", "0.5994838", "0.59663004", "0.5955467", "0.59321225", "0.5924233", "0.59034735", "0.59006816", "0.58982927", "0.5886498", "0.58788615", "0.58788615", "0.58711207", "0.58684015", "0.58506244", "0.5845662", "0.5845662", "0.5843906", "0.5839412", "0.5837896", "0.58251226", "0.5807423", "0.5805195", "0.58020914", "0.5796929", "0.57835203", "0.57796985", "0.57793236", "0.57732785", "0.57702404", "0.57690096", "0.57603365", "0.57466257", "0.5733444", "0.5733444", "0.5733444", "0.5718833", "0.5715744", "0.57118237", "0.57117844", "0.5701201", "0.5701201", "0.5701201", "0.5701201", "0.57008207", "0.569702", "0.56968194" ]
0.61271554
48
teardown any state that was previously setup with a setup_method call.
def teardown_method(self, objectCreation):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def teardown_method(self, method) -> None:", "def teardown_method(self, method):\n pass", "def teardown_method(self, method):\n pass", "def teardown_method(self, test_method):\n self.wo_obj = None\n self.config_data = None", "def teardown_method(self):", "def teardown_method(self, method):\n restore_settings()", "def _teardown(self):\n # No-op base implementation", "def teardown(self,**kwargs):\n pass", "def teardown(self):\n raise NotImplementedError", "def teardown(self):\n raise NotImplementedError", "def teardown(self) -> None:\n pass", "def teardown(self) -> None:\n pass", "def teardown(self):\n pass", "def teardown(self):\n pass", "def teardown(self):\n pass", "def teardown(self) -> None:", "def teardown(self) -> None:", "def teardown(self) -> None:", "def teardown(self):\n pass # pylint: disable=unnecessary-pass", "def teardown(self):", "def teardown(self):", "def teardown(self):", "def teardown_method(self):\n world.clear_paths()\n print(\"\\nEnd of tests in: %s\\n-------------------\\n\" % __name__)\n self.bigml = {}", "def teardown_method(self):\n if self.ae:\n self.ae.shutdown()", "def teardown_method(self):\n if self.ae:\n self.ae.shutdown()", "def teardown_method(self):\n if self.ae:\n self.ae.shutdown()", "def teardown_method(self):\n if self.ae:\n self.ae.shutdown()", "def teardown_method(self):\n if self.ae:\n self.ae.shutdown()", "def teardown_method(self):\n if self.ae:\n self.ae.shutdown()", "def teardown_method(self, method):\n self.opp.stop()", "def teardown_method(self, method):\n self.opp.stop()", "def teardown_method(self, method):\n self.hass.stop()", "def teardown_method(self, method):\n self.hass.stop()", "def teardown_method(self, method):\n self.hass.stop()", "def teardown_method(self, method):\n self.hass.stop()", "def teardown_method(self, method):\n self.hass.stop()", "def teardown_method(self, method):\n self.hass.stop()", "def teardown(self):\n del self.testInst, self.dname\n\n return", "def teardown(self):\n self.tcex.log.trace('teardown')", "def teardown_class(self):\n self._tester = None\n self._sut = None", "def teardown(self):\n del self.testInst, self.dname, self.test_vals, self.test_fracs\n\n return", "def teardown(self, event):\n pass", "def teardown_method(self) -> None:\n if os.getenv('TEARDOWN_METHOD') is None:\n self.custom.teardown_method(self)\n super(TestProfiles, self).teardown_method()", "def teardown_class(self):\n pass", "def teardown(self):\n del self.testInst, self.dname, self.test_val_length\n\n return", "def teardown(self):\n del self.testInst, self.dname, self.test_val_length\n\n return", "def teardown(self):\n\n del self.testC, self.insts, self.testInst, self.dname, self.test_vals\n\n return", "def _teardown(self) -> None:\n try:\n if (\n self._config\n and \"restore_state\" in self._config[\"core\"]\n and self._config[\"core\"][\"restore_state\"]\n ):\n self._save_state()\n except Exception as e:\n logger.warning(\"Unexpected error while saving state: %s\", str(e))", "def teardown_method(self):\n self.hass.stop()", "def teardown_method(self):\n self.hass.stop()", "def teardown_method(self):\n self.hass.stop()", "def teardown_method(self, method):\n assert self.old_environ is not None\n os.environ = self.old_environ", "def teardown_method(self, method):\n os.chdir(self.old_pwd)\n shutil.rmtree(self.test_workspace, True)", "def teardown():\n\n self.zorp_mock.stop()", "def teardown(self):\n del self.testC, self.testI, self.bounds\n\n return", "def teardown(self):\n\n del self.testInst, self.test_bins, self.test_label, self.test_data\n del self.out_keys, self.out_data\n\n return", "def teardown_provider(self):\n pass", "def teardown(self):\n # self.in_kwargs, self.ref_time,\n del self.warn_msgs, self.war\n return", "def tearDown(self):\n test_env_teardown()", "def tearDown(self):\n self.teardown_beets()", "def teardown_method(self, method):\n # Remove user file and user\n if self.user is not None:\n try:\n path = self.user._User__filename()\n os.remove(path)\n except OSError:\n pass\n del self.user\n\n # Restore original user\n self.request.cookies = self.saved_cookie\n self.request.user = self.saved_user\n\n # Remove user name to id cache, or next test will fail\n caching.CacheEntry(self.request, 'user', 'name2id', scope='wiki').remove()\n try:\n del self.request.cfg.cache.name2id\n except:\n pass", "def teardown(self):\n del self.testC, self.bounds, self.one_d_vars, self.unequal_one_d_vars\n del self.testI\n\n return", "def _tear_down():\n repl._tearDown = self.tearDown", "def teardown(self):\n del self.testInst, self.bounds1, self.bounds2\n\n return", "def test_teardown(self):\n with pytest.raises(NotImplementedError):\n self.behaviour.teardown()", "def teardown_class(klass):", "def teardown_class(klass):", "def teardown_method(self, method):\n\n db = _get_db()\n db.drop_collection('user_stat')\n db.drop_collection('user')\n disconnect(alias='test')", "def teardown_class(cls):", "def teardown_class(cls):", "def teardown_class(cls):", "def teardown(cls):\n del cls.user\n del cls.console", "def teardown(self, recursive=None):\n log_method_call(self, self.name, status=self.status,\n controllable=self.controllable)\n # we don't really care about the return value of _preTeardown here.\n # see comment just above mddeactivate call\n self._preTeardown(recursive=recursive)\n\n # Since BIOS RAID sets (containers in mdraid terminology) never change\n # there is no need to stop them and later restart them. Not stopping\n # (and thus also not starting) them also works around bug 523334\n if self.type == \"mdcontainer\" or self.type == \"mdbiosraidarray\":\n return\n\n if self.isDisk:\n # treat arrays whose members are disks as partitionable disks\n return\n\n # We don't really care what the array's state is. If the device\n # file exists, we want to deactivate it. mdraid has too many\n # states.\n if self.exists and os.path.exists(self.path):\n mdraid.mddeactivate(self.path)\n\n self._postTeardown(recursive=recursive)", "def teardown(self, rc):\n pass", "def teardown(self) -> BaseStep:\n if self.session is not None:\n self.session.close()\n self.is_initialized = False\n\n return self", "def teardown(self, log, info):\n raise NotImplementedError", "def teardown(self, exception):", "def tearDown(self):\n\n self._tear_down()", "def teardown_class(cls):\n pass", "def teardown_class(cls):\n pass", "def tearDown(self):\n pass\n # teardown called after each test\n # e.g. maybe write test results to some text file", "def teardown(self) -> None:\n self._unregister_service()\n self._unregister_agent()", "def tear_down(self):\n verdict, msg = TestStepEngine.tear_down(self)\n self._device.inject_device_log(\"i\", \"ACS_TESTCASE\", \"TEARDOWN: %s\" % self._name)\n return verdict, msg", "async def _teardown(self, commit: bool = None):", "def teardown(self):\n for cls in self.implements:\n getattr(self, cls.name).teardown()", "def teardown(self):\n if self.ae:\n self.ae.shutdown()", "def teardown(self):\n for mr in self.mrs:\n mr.restore_pretest(pretest=mr.pretest_info)", "def teardown(self):\n\n\t\tself.shutdown = True\n\t\tself.terminate_process()", "def teardown(self):\n del self.testInst, self.bounds1, self.bounds2, self.long_bins\n del self.mlt_bins\n\n return", "def teardown_test(self):\n self.log.info('Tearing down the test case')\n self.iperf_server.stop()\n self.access_point.bridge.teardown(self.brconfigs)\n self.access_point.close()\n wputils.reset_host_interface(self.pkt_sender.interface)\n self.mon.usb('on')", "def teardown(self):\n\n self.dummy.set_current()\n self.endpoints.lock()", "def tearDown(self):\n reset()", "def teardown_method(self, method):\r\n if hasattr(self, \"config\"):\r\n if \"saucelabs\" in self.cf[\"browsers\"][self.cf[\"saunter\"][\"default_browser\"]] and not self.cf[\"browsers\"][self.cf[\"saunter\"][\"default_browser\"]][\"saucelabs\"][\"ondemand\"]:\r\n self.take_named_screenshot(\"final\")\r\n\r\n if hasattr(self, \"driver\"):\r\n self.driver.quit()", "def teardown(cls):\n del cls.my_object", "def tearDown(self):\n super(TestCase, self).tearDown()\n self._context.check_done()", "def teardown_class(cls):\n cls._unpatch_logger()\n cls.multiplexer.disconnect()\n cls.decision_maker.stop()", "def test_teardown(self):\n with pytest.raises(NotImplementedError):\n self.handler.teardown()", "def teardown_application(self):\n pass", "def tearDown(self):\n self.testbed.deactivate()", "def tearDown(self):\n self.testbed.deactivate()" ]
[ "0.79968005", "0.78846675", "0.78846675", "0.7871643", "0.7792453", "0.7720631", "0.7659778", "0.7625319", "0.757913", "0.757913", "0.75738186", "0.75738186", "0.7567817", "0.7567817", "0.7567817", "0.7565642", "0.7565642", "0.7565642", "0.7450825", "0.7437996", "0.7437996", "0.7437996", "0.7431355", "0.7428631", "0.7428631", "0.7428631", "0.7428631", "0.7428631", "0.7428631", "0.7366032", "0.7366032", "0.7304177", "0.7304177", "0.7304177", "0.7304177", "0.7304177", "0.7304177", "0.72788274", "0.7175022", "0.71385556", "0.71075046", "0.7091692", "0.7056069", "0.70300037", "0.7023737", "0.7023737", "0.69832927", "0.6955219", "0.69526285", "0.69526285", "0.69526285", "0.6932532", "0.6914876", "0.69015354", "0.6883374", "0.6854549", "0.679873", "0.67654216", "0.67536587", "0.6738352", "0.6716742", "0.67105925", "0.67047703", "0.6701911", "0.668103", "0.6680603", "0.6680603", "0.6670707", "0.66693103", "0.66693103", "0.66693103", "0.66567045", "0.66522944", "0.663639", "0.6627201", "0.6607731", "0.6575961", "0.65752053", "0.65742815", "0.65742815", "0.6570281", "0.65686333", "0.6557653", "0.6547724", "0.6524568", "0.6519039", "0.64895844", "0.6482584", "0.64744186", "0.64699376", "0.64681727", "0.6467692", "0.6464617", "0.6460315", "0.6458118", "0.64537394", "0.6439651", "0.6438113", "0.64334685", "0.64334685" ]
0.71103287
40
Stop the simulator thread.
def stop(self): self._stop_event.set() super().stop()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stop(self):\n self._Thread__stop()", "def stop(self):\n\n self.stop_thread = True", "def stop(self):\n self._state.transit(sitcpy.THREAD_STOPPING)", "def stop(self):\n self._state.transit(sitcpy.THREAD_STOPPING)", "def stop(self) -> None:\n # set the stop event, then cancel the timer\n self._stop_event.set()\n\n # wait for the runthread to finish\n try:\n self.join()\n except RuntimeError:\n pass", "def stop_thread(self):\n t, e = self.workers[0]\n e = e.set() # put event to set True for stop thread\n del self.workers[0]", "def stop(self):\n logging.info(\"Shutting down thread...\")\n self.disconnect()\n self.running = False", "def stop(self):\n self.running = False\n self.join()", "def stop(self):\n self.thread.join()", "def stop(self):\n self._run = False\n self.IA.stop()", "def stop(self):\n debug(\"CBA4.__worker_thread.stop()\")\n self.__run = False\n #end stop()", "def stop(self):\n self.controller.stop()", "def stop(self) -> None:\n assert self.is_alive(), \"Thread not alive\"\n self.stopped.set()", "def stop(self):\r\n with self._lock:\r\n self._stopped = True\r\n self.join()", "def stop(self):\n\t\tself._run_flag = False\n\t\tself.wait()", "def stop(self):\n self._run_flag = False\n self.wait()", "def stop(self):\n self._run_flag = False\n self.wait()", "def stop(self):\n self._run_flag = False\n self.wait()", "def stop(self):\n self._run_flag = False\n self.wait()", "def stop(self):\n self._run_flag = False\n self.wait()", "def stop(self):\n self._run_flag = False\n self.wait()", "def stop(self):\n self.__running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\n self.running = False", "def stop(self):\r\n self.running = False", "def stop(self):\r\n self.running = False", "def stop(self):\n self.send_stop()\n self.join()", "def stop(self):\n self._running = False", "def stop(self):\n self._running = False", "def stop_thread(self):\n self.running = False\n if self.run_thread.is_alive():\n self.run_thread.join()", "def stop(self) -> None:\n self._running = False", "def stop(self):\r\n self.stopped = True\r\n time.sleep(1)", "def stop(self):\n with self.stop_lock:\n self._stop_event.set()", "def stop(self):\n if self._server_thread is None:\n return\n self._stopping.set()\n self._server_thread = None\n self._stopped.wait()", "async def stop(self) -> None:\n if self.id:\n await self._client.stop_thread(self.id)\n\n self._is_running = False\n self._id = 0", "def stop(self):\n self._stopped.set()\n if self._timer:\n self._timer.cancel()\n self._timer = None", "def stop(self):\n self.api.stop()", "def stop(self):\n with self.stop_event_lock:\n self.stop_event.set()\n self.clean_up()", "def stop(self):\n self._stop = True\n self.wake_up()", "def stop(self):\n self._run = False", "def stop(self):\n if self.thread_state is True:\n os.system('qq stop')\n self.thread_state = False", "def stop(self):\n\n self.detach()\n self._pi.stop()\n self._stopped = True", "def stop(self) -> None:\n ...", "def _stop(self):\n self._pi.stop()", "def stop(self):\n self.stopped = True", "def stop(self):\n self.stopped = True", "def stop(self):\n self._stop_event.set()", "def stop() -> None:", "def stop(self):\n self._stop_flag = True", "def stop(self):\r\n self.stopped = True", "def stop(self):\r\n self._stop.set()", "def _stop(self) -> None:\n self._stopped.set()", "def _stop(self, terminate_sim: bool = True):\n steppable_registry = CompuCellSetup.persistent_globals.steppable_registry\n steppable_registry.on_stop()", "def stop(self):\n self._log.info(\"Stopping\")\n self._running.clear()", "def stop(self):\n self._schedule(0, 0)\n self._started = False", "def slot_stop(self):\n\n self.thread.working = False", "def stop_threading(self):\n if self.algorithm_thread != None:\n self.running = False\n self.algorithm_thread.join()\n self.algorithm_thread = None\n if self.step_event.is_set():\n self.step_event.clear()", "def stop(self) :\n raise NotImplementedError(\"stop not implemented\")", "def stop(self, code=None):\n\n if not self.running:\n return\n\n self._running = False\n\n self.fire(stopped(self))\n\n if self.root._executing_thread is None:\n for _ in range(3):\n self.tick()\n\n if code is not None:\n raise SystemExit(code)", "async def stop(self):\n await self._backend.stop()", "def stop(self):\r\n self.stopped = True\r\n #self.worker.join()\r\n self.FPSThread.join()", "def stop(self):\n self.should_run = False\n if self.is_alive():\n self.join()", "async def stop(self):\n self._stopped.set()", "def stop(self):\n self._stop_signal = True", "def stop(self):\n\n self.__stop_threads = True\n self.__new_bus_Socket.close()\n self.__bus_stations_Socket.close()\n print(f\"stopped {self}\")", "def stop(self):\r\n self.stopped = True\r\n time.sleep(3)", "def stop(self):\n with self._lock:\n if not self.stopped():\n self._started = None\n getattr(self.factory, 'stop_' + self.class_name())(self)", "def stop():", "def stop():", "def stop():", "def stop():", "async def _stop(self) -> None:\n self._stopped.set()", "def stop(self) -> None:\n pass", "def stop(self):\n\n self.keep_running = False", "def stop(self):\n self.stopped = True", "def stop(self):\r\n raise NotImplementedError('method stop() is not implemented')", "def stopController(self):\n self.running = False", "def stop(self):\n if self.running:\n self._unschedule_all()\n self.loop.stop()", "def stop(self):\n self._stop.set()", "def stop (self):\n pass", "def stop (self):\n pass", "def stop(self):\n self.stop_recognising.set()\n self.thread.join()", "def stop(self):\n GameEngine().stop()\n self.on_stop()", "def stop(self):\n if self.debug:\n print(\"%s stop\" % self.name)\n self.force_exit()", "def stop(self):\n self.scion_sh('stop')", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n pass", "def stop(self):\n self.stop_event.set()", "def stop(self):\n self.stop_event.set()" ]
[ "0.81179816", "0.796208", "0.7771914", "0.7771914", "0.76687425", "0.76618785", "0.76455426", "0.76039106", "0.75676537", "0.75364685", "0.7525308", "0.7504455", "0.7481357", "0.7431703", "0.7421729", "0.7421202", "0.7421202", "0.7421202", "0.7421202", "0.7421202", "0.7421202", "0.7415426", "0.740481", "0.740481", "0.740481", "0.740481", "0.740481", "0.738109", "0.738109", "0.73742974", "0.7366818", "0.7366818", "0.7357696", "0.7328492", "0.73261267", "0.73089004", "0.7293952", "0.72899014", "0.7276056", "0.7268756", "0.72519207", "0.7237337", "0.7237328", "0.7234576", "0.72280896", "0.7218432", "0.72137105", "0.7194043", "0.7194043", "0.7191958", "0.7190089", "0.71898365", "0.718394", "0.71806604", "0.7180066", "0.71652025", "0.7162806", "0.71539694", "0.71496737", "0.71447766", "0.7136844", "0.71352494", "0.7124793", "0.71209615", "0.71130204", "0.7110021", "0.7106346", "0.71061146", "0.71025616", "0.7091283", "0.7086847", "0.7086847", "0.7086847", "0.7086847", "0.70839256", "0.70829076", "0.7081245", "0.7078081", "0.70766515", "0.707508", "0.7072506", "0.70709103", "0.7065228", "0.7065228", "0.7058199", "0.70555866", "0.7044494", "0.7044036", "0.7040721", "0.7040721", "0.7040721", "0.7040721", "0.7040721", "0.7040721", "0.7040721", "0.7040721", "0.7040721", "0.7040721", "0.7040721", "0.7040067", "0.7040067" ]
0.0
-1
Returns true if the logged in user is active
def is_active(self): return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_active(self):\n return self.status == ACTIVE_USER", "def is_active(self):\n return self.user.is_active", "def is_active_user(self):\n\n return self.is_active", "def check_active(self, user):\r\n if not self.require_active:\r\n # Ignore & move on.\r\n return True\r\n\r\n return user.is_active", "def is_active():\n return True", "def active(self):\n if self._active is not None:\n return self._active\n # Try to get it from the userprofile\n try:\n self._active = self.userprofile.user.is_active\n except UserProfile.DoesNotExist:\n # User profile does not exist.\n # The default value for active is False.\n self._active = False\n return self._active", "def is_active(self) -> bool:\r\n return self.active", "def is_active(self) -> bool:\n return self.active == \"active\"", "def is_active(self) -> bool:\n return self.__is_active", "def is_active(self) -> bool:", "def activate_user(self, user):\n if not user.active:\n user.active = True\n return True\n return False", "def logged_in(self):\n return self.auth.get_user_by_session() is not None", "def is_active(self) -> bool:\n return self._is_active", "def logged_in(self):\n return self.user is not None", "def is_active(self):\r\n return True", "def test_func(self):\n return self.request.user.is_active # any active user", "def is_active(self) -> bool:\n if not self.expires_at:\n return False\n return self.expires_at > datetime.datetime.now()", "def IsActive(self):\n return True", "def has_permission(self, request):\n return request.user.is_active", "def has_permission(self, request):\n return request.user.is_active", "def has_permission(self, request):\n return request.user.is_active", "def is_active(self):\n return self.active", "def is_active(self):\n return self.active", "def is_active(self):\n return self.active", "def is_active(self):\n return self.active", "def is_active(self):\n return self._is_active", "def is_active(self):\n return self._is_active", "def is_active(self):\n return self._is_active", "def is_active(self):\r\n return self.active", "def user_is_activated(self, user_name):\n return not self._simultanious_log_ins and \\\n user_name in self._active_users_names", "def active(self) -> bool:\n return pulumi.get(self, \"active\")", "def logged_in():\n\n if current_user.is_authenticated:\n return True\n\n return False", "def is_logged_in():\n return 'user' in session", "def is_logged_in(self) -> bool:\n return self.id is not None and self.username is not None", "def current_user_has_access(self):\n return self.user_has_access(users.get_current_user())", "def has_permission(self, request):\n\t\treturn request.user.is_active", "def is_registered(self):\n if self.user == getpass.getuser():\n return True\n else:\n return False", "def is_logged_in(self):\n return self.__is_logged_in", "def is_active(self):\n return self._active", "def _checkUserInactive(username,self):\r\n active = False\r\n user = _findUser(username)\r\n \r\n if user is not None:\r\n active = user.getIsActive()\r\n \r\n return active", "def isLoggedIn(self):\n session = self.getSession()\n if session is not None:\n return True\n return False", "def is_authenticated(self):\n return self.user is not None and self.state == AuthenticationOptions.authenticated", "def logged_in(self) -> bool:\n return self._logged_in", "def IsActive(self):\r\n\r\n return self.active", "def is_logged_in(self):\n return self.cookie is not None", "def test_activate_active_user(self):\n activate_user(self.user, self.request)\n self.assertEqual(self.user.is_active, True)", "def _can_login(self):\n return all([self.user.is_active, self.status, self.status_detail == \"active\"])", "def is_active(self):\n return self._is_record_status(self.ACTIVE)", "def active(self) -> bool:\n return self.relay(\"active\")", "def check_user_exists(self):\n is_exists = False\n if auth.UserInfo.objects.filter(\n user_id__username=self.username,\n is_active=True).exists():\n is_exists = True\n return is_exists", "def is_logged_in(self):\n return self.router.token is not None", "def is_logged_in(self, username):\n if username in self.users:\n return self.users[username].is_logged\n return False", "def logged_in(request):\n return request.current_user is not None", "def user_in_session():\n return 'user_id' in login_session", "def is_logged_in():\n logged_in = \"uid\" in session\n if logged_in:\n user = api.user.get_user(uid=session[\"uid\"])\n if not user or (user.get(\"disabled\", False) is True):\n logout()\n return False\n return logged_in", "def activate_user(self, user):\n if not user.active:\n user.active = True\n # noinspection PyUnresolvedReferences\n self.save(user)\n return True\n\n return", "def active(self) -> bool:\n return self._active", "def is_active(self):\n return self.type_id in ACTIVE_STATES", "def active(self) -> bool:\n return self.orchestration_status == \"Active\"", "def active(self):\n\n return True", "def validUser(self):\n if self.state == SessionStates.LOGGED_OUT:\n return False\n\n # if self.user == None:\n # return False\n return True", "def test_func(self):\n if not self.request.user.is_authenticated:\n return False\n if self.request.user.is_staff:\n return True\n return self.get_user() == self.request.user", "def user_auth(request):\n if request.user.is_authenticated:\n user = User.objects.get(email=request.user.email)\n if UserInformation.objects.filter(user=user).exists():\n return True\n return False", "def is_activated(self):\n return self._activated", "def is_active(self):\n with self._lock:\n return self._robot is not None", "def users_active(self):\n return self.users(\"inactive == NO\")", "def is_active(self):\n if self.wallet < 25:\n return True\n else:\n return False", "def logged_in(self):\n return self._token is not None", "def is_authenticated(self):\n return True #self.authenticated", "def is_logged_in(session):\n return 'user' in session", "def is_logged_in(self, params):\n email = self.credentials.get('email', '')\n password = self.credentials.get('password', '')\n if email != '' and password != '':\n return False\n return self.netflix_session.is_logged_in(account=self.credentials)", "def is_active(self):\n with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n return self._action('is-active').succeeded", "def is_authenticated(self):\n return self.ping() is not None", "def isActive(self):\n return self.data.active", "def has_user(self, user): # pylint: disable=unused-argument\r\n return False", "def is_integrated(user):\n if user.is_authenticated() and user.is_active:\n return NokiaUser.objects.filter(user=user).exists()\n return False", "def is_authenticated(self):\r\n return self.authenticated", "def active(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"active\")", "def active(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"active\")", "def is_active(self):\n current_time = datetime.now().hour\n early = self.active[0]\n late = self.active[-1]\n return early <= current_time < late", "def is_active(self):\n if self.state != 'checkout':\n return True \n if self.state == 'checkout':\n return False", "def is_active(self) -> bool:\n return not any((self.is_ancillary, self.is_removed, self.is_system))" ]
[ "0.88505435", "0.8798741", "0.8700004", "0.854557", "0.78350693", "0.7738377", "0.7705481", "0.76957625", "0.76723784", "0.7669489", "0.7668266", "0.76530737", "0.7633366", "0.76033133", "0.75975317", "0.7560072", "0.7550248", "0.7520325", "0.75200886", "0.75200886", "0.75200886", "0.7513665", "0.7513665", "0.7513665", "0.7513665", "0.74734646", "0.74734646", "0.74734646", "0.7452469", "0.74136424", "0.740429", "0.7385657", "0.7370619", "0.7370392", "0.73412925", "0.7323043", "0.73216283", "0.7320729", "0.7293575", "0.7265245", "0.7214026", "0.7208764", "0.7164602", "0.71411747", "0.71212655", "0.71150506", "0.7070638", "0.70642847", "0.70632637", "0.7063135", "0.70619744", "0.7055995", "0.70344687", "0.6987697", "0.69707084", "0.69520235", "0.6949824", "0.6947736", "0.69438994", "0.69418865", "0.6929059", "0.6917835", "0.6913844", "0.690583", "0.6892338", "0.6888636", "0.6883226", "0.68796337", "0.6869483", "0.68645257", "0.6859821", "0.6851972", "0.68455774", "0.6844722", "0.6835057", "0.68334204", "0.68239903", "0.6823747", "0.6813779", "0.6813363", "0.6810635", "0.6804151" ]
0.775682
21
Returns false if the user is not logged in
def is_anonymous(self): return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logged_in(self):\n return self.auth.get_user_by_session() is not None", "def isLoggedIn(self):\n session = self.getSession()\n if session is not None:\n return True\n return False", "def is_logged_in(self) -> bool:\n return self.id is not None and self.username is not None", "def logged_in(self):\n return self.user is not None", "def validUser(self):\n if self.state == SessionStates.LOGGED_OUT:\n return False\n\n # if self.user == None:\n # return False\n return True", "def is_logged_in():\n return 'user' in session", "def check_user_logged():\n global user\n if 'user' not in session:\n return False\n else:\n user = session.get('user')\n return user['username'] != ''", "def logged_in():\n\n if current_user.is_authenticated:\n return True\n\n return False", "def is_logged_in():\n logged_in = \"uid\" in session\n if logged_in:\n user = api.user.get_user(uid=session[\"uid\"])\n if not user or (user.get(\"disabled\", False) is True):\n logout()\n return False\n return logged_in", "def is_logged_in(self):\n return self.cookie is not None", "def is_logged_in(self):\n return self.__is_logged_in", "def is_logged_in(self):\n return self.router.token is not None", "def require_login(self):\n\tif users.get_current_user():\n\t return True\n\telse:\n\t self.redirect(users.create_login_url(self.request.uri))\n\t return False", "def is_logged_in():\n _has_cookie = util.web.has_cookie('pass')\n if _has_cookie:\n _is_expired = util.web.is_cookie_expired('pass')\n if _is_expired:\n return False\n return True\n return False", "def logged_in(self) -> bool:\n return self._logged_in", "def is_logged_in(session):\n return 'user' in session", "def logged_in(request):\n return request.current_user is not None", "def test_func(self):\n if not self.request.user.is_authenticated:\n return False\n if self.request.user.is_staff:\n return True\n return self.get_user() == self.request.user", "def is_authenticated(self):\n return False", "def logged_in(self):\n return self._token is not None", "def user_in_session():\n return 'user_id' in login_session", "def is_logged_in() -> bool:\n is_dev_login_disabled = SETTINGS.DEV_LOGIN_DISABLED and is_localhost()\n return bool(is_dev_login_disabled or is_logged_in_user())", "def is_valid(self):\n return self.user.is_authenticated", "def is_logged_in(self, username):\n if username in self.users:\n return self.users[username].is_logged\n return False", "def is_regular_user(user):\n return user.is_authenticated()", "def has_user(self, user): # pylint: disable=unused-argument\r\n return False", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_authenticated(self):\n return True", "def is_logged_in(self, params):\n email = self.credentials.get('email', '')\n password = self.credentials.get('password', '')\n if email != '' and password != '':\n return False\n return self.netflix_session.is_logged_in(account=self.credentials)", "def is_authenticated(self):\n return self.user is not None and self.state == AuthenticationOptions.authenticated", "def logged():\n if session.login==1:\n return True\n else:\n return False", "def is_authenticated(self):\n return True #self.authenticated", "def is_authenticated(self, request, **kwargs):\r\n return True", "def is_registered(self):\n if self.user == getpass.getuser():\n return True\n else:\n return False", "def check_user(self):\n try:\n if (self.get_user()[0][0] == self.username) and (self.check_password(self.password)):\n return True\n else:\n return False\n except:\n return False", "def is_authenticated(self):\n return self.ping() is not None", "def has_user(self):\n\t\treturn len( self.a_token ) > 0 and len( self.a_secret ) > 0", "def is_admin(self):\n if not self.current_user:\n return False\n else:\n return self.current_user in [\"1\"]", "def authenticated(self):\n # We don't support authentication yet\n return False", "def isLoggedOut(self):\n return self.sess is None or self.sess.isNew()", "def user_auth(request):\n if request.user.is_authenticated:\n user = User.objects.get(email=request.user.email)\n if UserInformation.objects.filter(user=user).exists():\n return True\n return False", "def is_authenticated(self):\n return bool(get_auth_token())", "def is_authenticated(self):\r\n return self.authenticated", "def is_logged(html):\n soup = BeautifulSoup(html, \"html.parser\")\n\n if soup.find('div', {'id': 'user_information'}) is None:\n return False\n return True", "def is_authenticated(self) -> bool:\n return self.requester.uuid is not None", "def check_user(self):\n try:\n if self.get_customer()[0][0] == self.dni:\n return True\n else:\n return False\n except:\n return False", "def is_authenticated(self):\n result = self.lpass(\"lpass status\")\n\n if \"Logged in as\" in result.output:\n return True\n\n return False", "def check_authentication():\r\n\r\n #TODO: Reservation based authentication\r\n try:\r\n authenticated_user()\r\n except Exception as e:\r\n return e\r\n\r\n return True", "def is_logged_in(self, login_url: str) -> bool:\n self.d('Check login - %s', login_url)\n if not login_url:\n return False\n res = self.get(login_url, allow_redirects=False)\n if res.status_code == 302:\n self.i('Is logged in')\n return True\n self.i('Is not logged in')\n return False", "def current_user_has_access(self):\n return self.user_has_access(users.get_current_user())", "def is_user_authenticated(request):\n return request.session.session_key", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authorized(self) -> bool:\n\t\tif \"access_token\" in session:\n\t\t\tif session.get(\"access_token\") is not None:\n\t\t\t\tif \"user\" in session:\n\t\t\t\t\treturn True\n\t\treturn False", "def check_login(request, username=None):\r\n if request.user is None:\r\n return False\r\n\r\n # if we have a username we're told to check against, make sure the\r\n # username matches\r\n if username is not None and username != request.user.username:\r\n return False\r\n\r\n return True", "def is_correct_user(self, login, password):\n pass", "def is_authenticated(self):\n return True", "def check_user_and_login(self) -> Response:\n pass", "def is_authenticated(self):\n return bool(self._authenticated)", "def isAdmin():\n\tif 'username' in session and session['username'] == 'admin':\n\t\treturn True\n\telse:\n\t\treturn False", "def is_allowed_to_submit(request):\n return not settings.REQUIRE_LOGIN or request.user.is_authenticated()", "def confirm_login_allowed(self, user):\n # if the user has been disabled due to incorrect\n # password retries or other.\n if not user.is_active:\n return False; \n return True", "def check(self):\n\n us = ServiceLocator.resolve(ServiceLocator.USERS)\n\n user_session = self.get()\n user = self.get_user()\n\n return user is not None and us.verify_auth_token(user_session.token, config.SESSION_EXPIRES)", "def is_active_user(self):\n\n return self.is_active", "def can_log_in_without_cas(self):\n return self.password is not None and self.password != \"\"", "def is_user_context(context):\n if not context:\n return False\n if context.is_admin:\n return False\n if not context.user_id or not context.project_id:\n return False\n return True", "def is_user_context(context):\n if not context:\n return False\n if context.is_admin:\n return False\n if not context.user_id or not context.project_id:\n return False\n return True", "def is_user_context(context):\n if not context:\n return False\n if context.is_admin:\n return False\n if not context.user_id or not context.project_id:\n return False\n return True", "def is_authenticated_CSC_user():\n key = SAML_ATTRIBUTES['CSC_username']\n if executing_travis():\n return False\n return True if 'samlUserdata' in session and len(session.get('samlUserdata', None)) > 0 and key in session.get('samlUserdata', None) else False", "def _is_user_context(context):\n if not context:\n return False\n if context.is_admin:\n return False\n if not context.user_id or not context.project_id:\n return False\n return True", "def _is_user_context(context):\n if not context:\n return False\n if context.is_admin:\n return False\n if not context.user_id or not context.project_id:\n return False\n return True", "def is_user_authenticated(user):\r\n\r\n if DJ_VERSION[0] > 1:\r\n return user.is_authenticated\r\n else:\r\n return user.is_authenticated()", "def has_username(self):\n return self.username is not None", "def authenticated():\n if 'user_id' in session and 'access_token' in session:\n user = db_session.query(User).filter_by(id=session['user_id']).first()\n\n if user:\n return user.access_token == session['access_token']\n return False", "def valid_credentials(self):\n path = '/api/session-user'\n url = '{}{}'.format(self._url_base, path)\n response, content = super(DSBaseService, self)._request(url,\n headers=self._headers(with_content_type=False))\n return int(response['status']) == 200", "def authenticated(self):\n return self.token is not None", "def is_logged_in(self, email):\n wait_for_element(self.browser, self.expand_left_panel)\n expand_panel = self.browser.find_element(*self.expand_left_panel)\n custom_click(self.browser, expand_panel)\n wait_for(lambda: len(self.browser.find_element(*self.logged_user).text) > 0, delay=1, num_sec=5)\n user_logged_in = self.browser.find_element(*self.logged_user).text\n if user_logged_in == email:\n return True\n else:\n raise Exception('Failed to login by user => {}'.format(email))", "def user_auth_inst(request):\n if request.user.is_authenticated:\n user = User.objects.get(email=request.user.email)\n if UserInformation.objects.filter(user=user).exists():\n inst = UserInformation.objects.get(user=user)\n if(inst.user_instructor):\n return True\n return False", "def is_user_context(context):\n if not context or not isinstance(context, RequestContext):\n return False\n if context.is_admin:\n return False\n return True", "def is_authenticated(self) -> bool:\n return self._authenticated", "def is_anonymous():\n return False", "def is_authenticated(self):\n if self._token is None:\n self.authenticate()\n\n return self._token is not None", "def check(self):\n self.log.debug('Testing for a valid login session')\n # If our cookie jar is empty, we obviously don't have a valid login session\n if not len(self.cookiejar):\n return False\n\n # Test our login session and make sure it's still active\n return requests.get(self.TEST_URL, cookies=self.cookiejar).status_code == 200", "def is_authenticated(self):\n if not self.token:\n return False\n\n try:\n self.lookup_token()\n return True\n except Forbidden:\n return False\n except InvalidPath:\n return False\n except InvalidRequest:\n return False", "def logged_in(browser: RoboBrowser):\n login_div = browser.find('div', content=\"Login\")\n return True if not login_div else False" ]
[ "0.85514784", "0.8487751", "0.8463887", "0.84311163", "0.83854413", "0.83422846", "0.8227351", "0.81986076", "0.8189232", "0.81029665", "0.80995315", "0.8037724", "0.79774845", "0.7907793", "0.78765494", "0.7856657", "0.7833945", "0.7820103", "0.7789442", "0.77781737", "0.77136534", "0.76728445", "0.7649724", "0.7607182", "0.76053685", "0.75882924", "0.75333154", "0.75333154", "0.75333154", "0.75333154", "0.75333154", "0.75333154", "0.75333154", "0.74919665", "0.74563384", "0.7451827", "0.74273866", "0.7421799", "0.7336918", "0.7330698", "0.7294168", "0.7268295", "0.72512484", "0.7248372", "0.7243039", "0.72335947", "0.7210918", "0.72059184", "0.71585065", "0.7157258", "0.7152786", "0.71364725", "0.71009403", "0.7087075", "0.706841", "0.7049101", "0.7043798", "0.7043798", "0.7043798", "0.7043798", "0.7043798", "0.7043798", "0.7043798", "0.7043798", "0.7043798", "0.7043798", "0.7043798", "0.7043798", "0.7043798", "0.7042321", "0.7037053", "0.7026143", "0.7022888", "0.6991125", "0.6978641", "0.6977515", "0.6972748", "0.69697255", "0.6961067", "0.69610333", "0.69396436", "0.6905875", "0.6905875", "0.6905875", "0.689996", "0.6897613", "0.6897613", "0.68971425", "0.68928164", "0.6891332", "0.6869483", "0.6859305", "0.6853357", "0.683687", "0.67950356", "0.6765828", "0.6761971", "0.6740324", "0.67367065", "0.6732457", "0.67310834" ]
0.0
-1
Returns true if the user is logged in
def is_authenticated(self): return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logged_in():\n\n if current_user.is_authenticated:\n return True\n\n return False", "def is_logged_in():\n return 'user' in session", "def logged_in(self):\n return self.auth.get_user_by_session() is not None", "def isLoggedIn(self):\n session = self.getSession()\n if session is not None:\n return True\n return False", "def is_logged_in(self) -> bool:\n return self.id is not None and self.username is not None", "def logged_in(self) -> bool:\n return self._logged_in", "def is_logged_in(self):\n return self.__is_logged_in", "def logged_in(self):\n return self.user is not None", "def is_logged_in(session):\n return 'user' in session", "def is_logged_in():\n logged_in = \"uid\" in session\n if logged_in:\n user = api.user.get_user(uid=session[\"uid\"])\n if not user or (user.get(\"disabled\", False) is True):\n logout()\n return False\n return logged_in", "def is_logged_in(self):\n return self.cookie is not None", "def logged_in(request):\n return request.current_user is not None", "def is_logged_in(self):\n return self.router.token is not None", "def check_user_logged():\n global user\n if 'user' not in session:\n return False\n else:\n user = session.get('user')\n return user['username'] != ''", "def is_logged_in():\n _has_cookie = util.web.has_cookie('pass')\n if _has_cookie:\n _is_expired = util.web.is_cookie_expired('pass')\n if _is_expired:\n return False\n return True\n return False", "def is_logged_in(self, username):\n if username in self.users:\n return self.users[username].is_logged\n return False", "def is_logged_in() -> bool:\n is_dev_login_disabled = SETTINGS.DEV_LOGIN_DISABLED and is_localhost()\n return bool(is_dev_login_disabled or is_logged_in_user())", "def is_logged_in(self, params):\n email = self.credentials.get('email', '')\n password = self.credentials.get('password', '')\n if email != '' and password != '':\n return False\n return self.netflix_session.is_logged_in(account=self.credentials)", "def is_authenticated(self):\n return self.user is not None and self.state == AuthenticationOptions.authenticated", "def logged_in(self):\n return self._token is not None", "def user_in_session():\n return 'user_id' in login_session", "def authenticated():\n if 'user_id' in session and 'access_token' in session:\n user = db_session.query(User).filter_by(id=session['user_id']).first()\n\n if user:\n return user.access_token == session['access_token']\n return False", "def is_user_authenticated(user):\r\n\r\n if DJ_VERSION[0] > 1:\r\n return user.is_authenticated\r\n else:\r\n return user.is_authenticated()", "def logged():\n if session.login==1:\n return True\n else:\n return False", "def is_authenticated(self):\n result = self.lpass(\"lpass status\")\n\n if \"Logged in as\" in result.output:\n return True\n\n return False", "def is_authenticated(self):\n return bool(get_auth_token())", "def is_user_authenticated(request):\n return request.session.session_key", "def is_authenticated(self):\n return True #self.authenticated", "def is_authenticated(self, request, **kwargs):\r\n return True", "def is_authenticated(self):\r\n return self.authenticated", "def is_regular_user(user):\n return user.is_authenticated()", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_authenticated(self):\n return self.authenticated", "def is_logged_in(self, login_url: str) -> bool:\n self.d('Check login - %s', login_url)\n if not login_url:\n return False\n res = self.get(login_url, allow_redirects=False)\n if res.status_code == 302:\n self.i('Is logged in')\n return True\n self.i('Is not logged in')\n return False", "def is_authenticated(self):\n return bool(self._authenticated)", "def validUser(self):\n if self.state == SessionStates.LOGGED_OUT:\n return False\n\n # if self.user == None:\n # return False\n return True", "def is_authenticated(self) -> bool:\n return self._authenticated", "def require_login(self):\n\tif users.get_current_user():\n\t return True\n\telse:\n\t self.redirect(users.create_login_url(self.request.uri))\n\t return False", "def is_valid(self):\n return self.user.is_authenticated", "def is_authenticated(self):\n return self.ping() is not None", "def test_func(self):\n if not self.request.user.is_authenticated:\n return False\n if self.request.user.is_staff:\n return True\n return self.get_user() == self.request.user", "def is_authenticated_CSC_user():\n key = SAML_ATTRIBUTES['CSC_username']\n if executing_travis():\n return False\n return True if 'samlUserdata' in session and len(session.get('samlUserdata', None)) > 0 and key in session.get('samlUserdata', None) else False", "def is_authenticated(self):\n return True", "def is_authorized(self) -> bool:\n\t\tif \"access_token\" in session:\n\t\t\tif session.get(\"access_token\") is not None:\n\t\t\t\tif \"user\" in session:\n\t\t\t\t\treturn True\n\t\treturn False", "def is_user_authenticated(self, user_id, auth_token):\n\n user = self.user_db[user_id]\n if user['token'] == auth_token:\n return True\n else:\n return False", "def user_auth(request):\n if request.user.is_authenticated:\n user = User.objects.get(email=request.user.email)\n if UserInformation.objects.filter(user=user).exists():\n return True\n return False", "def is_authenticated(self):\n return False", "def is_registered(self):\n if self.user == getpass.getuser():\n return True\n else:\n return False", "def current_user_has_access(self):\n return self.user_has_access(users.get_current_user())", "def is_authenticated(self) -> bool:\n return self.requester.uuid is not None", "def is_logged_in(self, email):\n wait_for_element(self.browser, self.expand_left_panel)\n expand_panel = self.browser.find_element(*self.expand_left_panel)\n custom_click(self.browser, expand_panel)\n wait_for(lambda: len(self.browser.find_element(*self.logged_user).text) > 0, delay=1, num_sec=5)\n user_logged_in = self.browser.find_element(*self.logged_user).text\n if user_logged_in == email:\n return True\n else:\n raise Exception('Failed to login by user => {}'.format(email))", "def is_active_user(self):\n\n return self.is_active", "def check(self):\n\n us = ServiceLocator.resolve(ServiceLocator.USERS)\n\n user_session = self.get()\n user = self.get_user()\n\n return user is not None and us.verify_auth_token(user_session.token, config.SESSION_EXPIRES)", "def check_user(self):\n try:\n if (self.get_user()[0][0] == self.username) and (self.check_password(self.password)):\n return True\n else:\n return False\n except:\n return False", "def is_authenticated(self):\n if self._token is None:\n self.authenticate()\n\n return self._token is not None", "def authenticated(self):\n # We don't support authentication yet\n return False", "def is_active(self):\n return self.user.is_active", "def logged_in(self, use_page=None):\n # allow page soup to be passed as argument to make get_soup calling this function faster\n if use_page is None: soup = self.get_soup(\"overview\")\n else: soup = use_page\n\n found = soup.find(\"meta\", {\"name\": \"ogame-player-name\"})\n if found is None: return False\n if str(found[\"content\"]) == self.username: return True", "def is_logged_in_user(consumer=None) -> bool:\n if not consumer:\n consumer = get_consumer()\n return bool(\n not consumer.get(\"X-Anonymous-Consumer\") and consumer.get(\"X-Consumer-Id\")\n )", "def authenticated(self):\n return self.token is not None", "def is_logged_in(session):\n if 'tid' in session:\n return {'success': 1, 'message': '你处于登录状态.', \n 'teamname': session['teamname'], 'is_zju_user': session['is_zju_user']}\n else:\n return {\"success\": 0, \"message\": \"你并未处于登录状态.\"}", "def logged_in(browser: RoboBrowser):\n login_div = browser.find('div', content=\"Login\")\n return True if not login_div else False", "def acctLogin(self):\n loggedIn = False\n if self.acctObj.login():\n loggedIn = True\n else:\n logger.warning(\"{} Authentication failed\".format(self))\n self.acctObj = None\n\n if loggedIn:\n return True\n return False", "def has_user(self, user): # pylint: disable=unused-argument\r\n return False", "def _can_login(self):\n return all([self.user.is_active, self.status, self.status_detail == \"active\"])", "def is_authenticated(self, request, **kwargs):\r\n from django.contrib.auth import get_user_model\r\n User = get_user_model()\r\n\r\n try:\r\n username, api_key = self.extract_credentials(request)\r\n except ValueError:\r\n return self._unauthorized()\r\n\r\n if not username or not api_key:\r\n return self._unauthorized()\r\n\r\n try:\r\n user = User.objects.get_by_natural_key(username)\r\n except (User.DoesNotExist, User.MultipleObjectsReturned):\r\n return self._unauthorized()\r\n\r\n if not self.check_active(user):\r\n return False\r\n\r\n request.user = user\r\n return self.get_key(user, api_key)", "def user_logged_in():\n if not session.get('user_id'):\n return \"nope\", 401\n else:\n return \"yep\", 200", "def is_authenticated(self, request, **kwargs):\r\n if not request.META.get('HTTP_AUTHORIZATION'):\r\n return self._unauthorized()\r\n\r\n try:\r\n (auth_type, data) = request.META['HTTP_AUTHORIZATION'].split()\r\n if auth_type.lower() != 'basic':\r\n return self._unauthorized()\r\n user_pass = base64.b64decode(data)\r\n except:\r\n return self._unauthorized()\r\n\r\n bits = user_pass.split(':', 1)\r\n\r\n if len(bits) != 2:\r\n return self._unauthorized()\r\n\r\n if self.backend:\r\n user = self.backend.authenticate(\r\n username=bits[0], password=bits[1])\r\n else:\r\n user = authenticate(username=bits[0], password=bits[1])\r\n\r\n if user is None:\r\n return self._unauthorized()\r\n\r\n if not self.check_active(user):\r\n return False\r\n\r\n request.user = user\r\n return True", "def is_authenticated(connection, window_info, kwargs):\n return window_info and window_info.is_authenticated", "def check_user_and_login(self) -> Response:\n pass", "def is_authenticated(self):\n if not self.token:\n return False\n\n try:\n self.lookup_token()\n return True\n except Forbidden:\n return False\n except InvalidPath:\n return False\n except InvalidRequest:\n return False", "def log_in(self, password: str) -> bool:\n return self._password == password", "def check_login(request, username=None):\r\n if request.user is None:\r\n return False\r\n\r\n # if we have a username we're told to check against, make sure the\r\n # username matches\r\n if username is not None and username != request.user.username:\r\n return False\r\n\r\n return True", "def is_active(self):\n return self.status == ACTIVE_USER", "def has_user(self):\n\t\treturn len( self.a_token ) > 0 and len( self.a_secret ) > 0", "def loginState(self, user_data):\n\t\tif self.db.request(\"getOne\", user_data):\n\t\t\treturn True;\n\t\telse:\n\t\t\treturn False;", "def is_logged_in(soup):\n logged_in = False\n login_link = soup.findAll(\"a\",{\"accesskey\":\"x\"})\n if len(login_link) > 0: \n link = login_link[0].attrs['href'] \n if link.find(\"logout\") > 0:\n logged_in = True\n return logged_in", "def check_user(self,username, password):\n safe_input = (username, password)\n vals = self.cur.execute(\"SELECT Username, Password FROM Users WHERE Username=? AND Password=?\",safe_input).fetchone()\n if vals:\n logging.info('%s was authenticated', username)\n return True\n else:\n logging.info('Failed login for %s', username)\n return False", "def check_authentication():\r\n\r\n #TODO: Reservation based authentication\r\n try:\r\n authenticated_user()\r\n except Exception as e:\r\n return e\r\n\r\n return True", "def is_correct_user(self, login, password):\n pass", "def is_logged_in (self, account):\n is_logged_in = False\n # load cookies\n account_hash = self._generate_account_hash(account=account)\n if self._load_cookies(filename=self.cookie_path + '_' + account_hash) == False:\n return False\n if self._load_data(filename=self.data_path + '_' + account_hash) == False:\n # load the profiles page (to verify the user)\n response = self._session_get(component='profiles')\n\n # parse out the needed inline information\n only_script_tags = SoupStrainer('script')\n page_soup = BeautifulSoup(response.text, 'html.parser', parse_only=only_script_tags)\n page_data = self._parse_page_contents(page_soup=page_soup)\n\n # check if the cookie is still valid\n for item in page_data:\n if 'profilesList' in dict(item).keys():\n if item['profilesList']['summary']['length'] >= 1:\n is_logged_in = True\n return is_logged_in\n return True", "def has_object_permission(self, request, view, account):\n if request.user.is_authenticated():\n if request.user.is_staff:\n return True\n return account.username == request.user.username\n return False", "def is_authenticated(self, request, **kwargs):\n logging.debug(request.META)\n if not request.META.get('HTTP_AUTHORIZATION'):\n logging.error('HTTP_AUTHORIZATION not found')\n return self._unauthorized()\n\n try:\n logging.debug('start split')\n (auth_type, data) = request.META['HTTP_AUTHORIZATION'].split()\n if auth_type.lower() != 'basic':\n logging.error('not basic')\n return self._unauthorized()\n user_pass = base64.b64decode(data).decode('utf-8')\n except:\n logging.error('plit issue')\n return self._unauthorized()\n\n bits = user_pass.split(':', 1)\n logging.debug(bits)\n if len(bits) != 2:\n logging.error('len bits issue')\n return self._unauthorized()\n\n if self.backend:\n user = self.backend.authenticate(username=bits[0], password=bits[1])\n else:\n user = authenticate(username=bits[0], password=bits[1])\n\n if user is None:\n logging.debug('username password is not correct.')\n return self._unauthorized()\n\n if not self.check_active(user):\n logging.debug('user is not active')\n return False\n request.user = user\n return True" ]
[ "0.88950294", "0.8790608", "0.8753903", "0.8581064", "0.8567044", "0.84687513", "0.84203804", "0.8413966", "0.8383255", "0.8209496", "0.82089865", "0.8092678", "0.80382335", "0.8021618", "0.79841244", "0.7928396", "0.7890733", "0.78188694", "0.77794355", "0.7776155", "0.77729654", "0.77462757", "0.7742831", "0.76829624", "0.7680131", "0.76035655", "0.7601533", "0.7595474", "0.757267", "0.75625646", "0.74810857", "0.7467262", "0.7467262", "0.7467262", "0.7467262", "0.7467262", "0.7467262", "0.7467262", "0.7467262", "0.7467262", "0.7467262", "0.7467262", "0.7467262", "0.7467262", "0.7443127", "0.73699677", "0.73590964", "0.73545504", "0.7349122", "0.7270624", "0.72696877", "0.7243561", "0.7232548", "0.7187476", "0.7183464", "0.7171943", "0.71561635", "0.7140426", "0.71366656", "0.7119026", "0.7116422", "0.70415527", "0.70003116", "0.6989533", "0.6988987", "0.6959796", "0.691174", "0.69069386", "0.68759996", "0.6860933", "0.6851553", "0.6811386", "0.6789085", "0.67846954", "0.6782115", "0.6781466", "0.6765767", "0.6749032", "0.67484796", "0.6746666", "0.67227983", "0.6682899", "0.66697824", "0.66676223", "0.6652874", "0.66503", "0.66355574", "0.6625005", "0.65868074", "0.6575347", "0.6555238", "0.65547025", "0.6550267", "0.6548364" ]
0.7648227
30
Returns true if the logged in user is an admin
def is_admin(self): if self.type == 1: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_admin(user):\n return user.is_authenticated and user.id == app.config.get('ADMIN')", "def is_admin_user(self):\n if \"is_admin\" in self._properties and self.is_admin == 'YES':\n return True\n return False", "def isAdmin():\n\tif 'username' in session and session['username'] == 'admin':\n\t\treturn True\n\telse:\n\t\treturn False", "def is_admin(self):\n if not self.current_user:\n return False\n else:\n return self.current_user in [\"1\"]", "def user_is_admin(user):\n return user in admins", "def isAdmin(self, user):\r\n if user.id in self.admins:\r\n return True\r\n return False", "def is_admin(self, user):\n return user.name in self.admins", "def is_admin(self):\n if self.user is None:\n return False\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n if self.user.is_admin:\n return True\n return False", "def check_is_admin(current_user):\n return current_user['isAdmin'] == True", "def is_current_user_admin():\n return (os.environ.get('USER_IS_ADMIN', '0')) == \"1\"", "def is_admin(self) -> bool:\n return self._is_admin", "def isAdmin(user):\n return isUserType(user, Admin)", "def is_admin(self):\r\n return self.admin", "def is_admin(self):\n return self.admin", "def is_admin(self,user):\n if user.is_superuser:\n return True\n\n if user.groups.filter(name=self.admin_group_name).count() > 0:\n return True\n else:\n return False", "def admin_flag(user_id):\n user = User.query.filter_by(id=user_id).first()\n if user.is_admin:\n return True\n return False", "def is_admin(ctx) -> bool:\n return db.user(ctx.author).is_admin", "def is_admin():\n user = users.get_current_user()\n if user:\n if users.is_current_user_admin() or is_local_admin():\n return 'Yes, you are admin'\n else:\n return \"No, you don't admin\"\n else:\n return \"You not logged in\"", "def is_admin(self, user) -> bool:\n return (\n user.is_superuser\n or user.groups.filter(pk=self.admins_group.pk).exists()\n )", "def check_admin() -> bool:\n return ctypes.windll.shell32.IsUserAnAdmin() == 1", "def is_user_admin(request):\n return request.user.is_superuser", "def is_admin(self):\n return self._is_admin", "def is_admin(self):\n return self._is_admin", "def is_admin(self) -> bool:\n\n return current_app.config[\"AUTH_ROLE_ADMIN\"] in [\n role.name for role in self.get_user_roles()\n ]", "def is_user_admin(self, user):\n return user == self.created_by", "def is_admin(username: str) -> bool:\n db = get_db()\n return int(db.get_user_by_name(username)[\"is_admin\"]) == 1", "def is_admin(self):\n if self.is_main_admin:\n return True\n if self.user is not None and self.barcamp is not None:\n if unicode(self.user._id) in self.barcamp.admins:\n return True\n return False", "def get_is_admin(self, obj):\n try:\n user = self.context.get('request').user\n except Exception:\n # raise serializers.ValidationError('Could not access request.user')\n return False\n if user == obj.admin:\n return True\n else:\n return False", "def is_main_admin(self):\n if self.user is None:\n return False\n return self.user.has_permission(\"admin\")", "def is_admin():\n # TODO(felipemonteiro): Make this more robust via a context is admin\n # lookup.\n return CONF.patrole.rbac_test_role == CONF.identity.admin_role", "def admin(self):\n if self.is_admin:\n return True\n return False", "def is_superuser(self):\n sesh = self.get_session()\n return sesh.curr_role == 'admin'", "def user_is_admin(userobj):\n from .node import Node\n from .subject import Subject\n from .period import Period\n from .assignment import Assignment\n return user_is_basenodeadmin(userobj, Node, Subject, Period, Assignment)", "def is_admin(user):\n return user.groups.filter(name='Profesores').exists()", "def is_admin(self):\n return Role.query.get(2) in self.roles", "def is_admin(self, username):\n con = dbcon()\n cur = con.cursor()\n cur.execute(\"SELECT * FROM my_users WHERE username=%(username)s\",\\\n {\"username\":username})\n res = cur.fetchone()\n if res[5].lower() == 'admin':\n return True\n return False", "def check_admin(self, user: TelegramController.User = None, id: str = None):\n\n if id == None:\n id = user.id\n\n return md5((str(id) + \"admin\").encode()).hexdigest() in self.__admins", "def is_admin():\n if platform_is(WINDOWS):\n return windll.shell32.IsUserAnAdmin()\n return os.getuid() == 0", "def check_admin_session(self):\n for session in vms.get_vm_sessions(vm_name=self.vm_name):\n if (\n session.get_console_user()\n and\n session.get_user().get_user_name().startswith(\"admin\")\n ):\n return True\n return False", "def is_admin(user):\n return get_organisations_as_admin(user).count() > 0", "def is_not_admin(user):\n return not user.is_superuser", "def is_admin(self):\n return False", "def admin_user_exists(self):\n try:\n User.objects.get(username='admin')\n except User.DoesNotExist:\n return False\n\n return True", "def isAdmin(self, nick):\n\t\tif nick in self.config[\"admins\"]:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def is_admin(author):\n if str(author).lower() in config[\"admins\"]:\n return True\n return False", "def is_superuser(self):\n return self.is_admin", "def is_user_cloud_admin(self):\n user = users.get_current_user()\n if not user:\n return False\n try:\n user_info = self.get_by_id(UserInfo, user.email())\n if user_info:\n return user_info.is_user_cloud_admin\n else:\n return False\n except Exception as err:\n logging.exception(err)\n return False", "def is_admin(self, username): #WORKS\n done = self.cur.execute(\"SELECT username FROM admins WHERE username=\\\"{}\\\"\".format(username))\n if done == 0: # If query is unsuccessful, username is not an administrator.\n return False\n else:\n return True", "def is_accessible(self):\n return current_user.is_authenticated and current_user.role == 'admin'", "def is_accessible(self):\n return current_user.is_authenticated and current_user.role == 'admin'", "def is_admin(context):\n request = context[\"request\"]\n url = resolve(request.path)\n context['is_admin'] = False\n return url.app_name == 'admin'", "def _check_admin_only(self, request):\r\n api_key = request.params.get(self.api_field, None)\r\n\r\n if request.user is None:\r\n user = self.user_fetcher(api_key=api_key)\r\n else:\r\n user = request.user\r\n\r\n if user is not None and user.is_admin:\r\n request.user = user\r\n return True", "def is_administrator(self):\n return self.can(Permission.ADMIN)", "def is_administrator(self):\n return self.can(Permission.ADMIN)", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def check_admin():\n if not current_user.is_admin:\n abort(403)", "def check_admin():\r\n if not current_user.is_admin:\r\n abort(403)", "def is_administrator(self):\n return self.rol == ProfileRoles.ADMINISTRATOR or self.user.is_staff", "def is_admin():\n # type: () -> bool\n current_os_name = os.name\n\n # Works with XP SP2 +\n if current_os_name == \"nt\":\n try:\n return IsUserAnAdmin()\n except Exception:\n raise EnvironmentError(\"Cannot check admin privileges\")\n elif current_os_name == \"posix\":\n # Check for root on Posix\n # os.getuid only exists on postix OSes\n # pylint: disable=E1101 (no-member)\n return os.getuid() == 0\n else:\n raise EnvironmentError(\n \"OS does not seem to be supported for admin check. OS: {}\".format(\n current_os_name\n )\n )", "async def is_admin(ctx):\n member = ctx.message.author\n aRole = discord.utils.get(member.guild.roles, name=ROLE_AD)\n if aRole in member.roles or member.id == 715048392408956950: return True", "def is_administrator(self):\n return False", "def check_admin():\n\tif not current_user.is_admin:\n\t\tabort(403)", "def is_staff(self):\r\n return self.is_admin", "def is_admin(self, user):\n return (acl.action_allowed(self.request, 'OperatorDashboard', '*') or\n acl.action_allowed(self.request, 'Feed', 'Curate'))", "def check_is_admin(cookie):\n return ';admin=true;' in cookie", "def IsCorpUserOrAdmin(self):\n user_email = auth_util.GetUserEmail()\n return ((user_email and user_email.endswith('@google.com')) or\n auth_util.IsCurrentUserAdmin())", "def user_is_nodeadmin(userobj):\n from .node import Node\n return user_is_basenodeadmin(userobj, Node)", "def test_is_admin_user(self):\n admin = User.objects.get(email='testadminuser@test.com')\n self.assertEqual(admin.is_staff, True)", "def user_is_admin_or_superadmin(userobj):\n if userobj.is_superuser:\n return True\n else:\n return user_is_admin(userobj)", "def is_staff(self):\n return self.is_admin", "def is_staff(self):\n return self.is_admin", "def check_is_admin(self, wormhole: str, user: int):\n query = \"SELECT 1 FROM wormhole_admin WHERE name = ? AND admin = ?\"\n query_res = self.bot.db_query(query, (wormhole, user))\n return len(query_res) > 0", "def is_accessible(self):\n if login.current_user.is_authenticated:\n return login.current_user.is_admin()\n return False", "def is_staff(self) -> bool:\n return self.is_admin", "def check_is_admin(context):\n\n init()\n # the target is user-self\n target = default_target(context)\n return _ENFORCER.authorize('context_is_admin', target, context)", "def login_as_admin():\n users.loginAsUser(\n config.VDC_ADMIN_USER, config.VDC_ADMIN_DOMAIN,\n config.VDC_PASSWORD, filter=False\n )\n return True", "def verify_user(self):\n verified = False\n if self.user.role.role_name == \"Admin\":\n verified = True\n\n return verified", "def check_is_admin(context):\n init()\n credentials = context.to_policy_values()\n target = credentials\n return _ENFORCER.authorize('admin_required', target, credentials)", "def is_staff(self):\n\t\treturn self.is_admin", "def admin(ctx):\n return ctx.message.author.permissions_in(ctx.channel).administrator", "def user_is_assignmentadmin(userobj):\n from .assignment import Assignment\n return user_is_basenodeadmin(userobj, Assignment)", "def is_admin(member: Union[discord.Member, discord.User]) -> bool:\n if not isinstance(member, discord.Member):\n return False\n return any([role.id in config.BOT_ADMIN_ROLES for role in member.roles])", "def check_is_admin(context):\n init()\n\n #the target is user-self\n credentials = context.to_dict()\n target = credentials\n\n return policy.check('context_is_admin', target, credentials)", "def can_edit_user(user):\n\tu = current_user._get_current_object()\n\treturn u==user or u.is_admin()", "def validate_admin(self, request):\n\n self.validate_login(request)\n\n if request.session['id'] not in self.admins:\n handler.logHelper.log_it_visit(request, __name__ + '.validate_admin', authorized=False)\n raise PermissionDenied('You need to be an admin to access this page.')", "def is_admin(self, is_admin):\n\n self._is_admin = is_admin", "def check_admin(self, *args, **kwargs):\n if not users.is_current_user_admin():\n self.response.write(\n '<div style=\"padding-top: 200px; height:178px; width: 500px; color: white; margin: 0 auto; font-size: 52px; text-align: center; background: url(\\'http://3.bp.blogspot.com/_d_q1e2dFExM/TNWbWrJJ7xI/AAAAAAAAAjU/JnjBiTSA1xg/s1600/Bank+Vault.jpg\\')\">Forbidden Access <a style=\\'color: white;\\' href=\\'%s\\'>Login</a></div>' %\n users.create_login_url(self.request.path_url + self.request.query_string))\n return\n else:\n return handler(self, *args, **kwargs)", "def user_is_periodadmin(userobj):\n from .period import Period\n return user_is_basenodeadmin(userobj, Period)", "def get_editable(self, user):\n return user.get('role') == 'admin'", "def has_super_access():\n current_user = frappe.get_doc('User', frappe.session.user)\n roles = set([role.role for role in current_user.roles])\n return bool(roles & {'Administrator', 'Instructor', 'Education Manager', 'System Manager', 'Academic User'})", "def has_access_to_admin_console(self):\n return self.is_superuser or self.has_perm('user.can_view_admin_console')", "async def assert_user_is_admin(auth: Auth, requester: Requester) -> None:\n is_admin = await auth.is_server_admin(requester)\n if not is_admin:\n raise AuthError(HTTPStatus.FORBIDDEN, \"You are not a server admin\")", "def authorized(self, user):\n\n return self.admin.id.getUnhashed() == user.id.getUnhashed()", "def has_admins(cls, server, bucket=None):\n\t\tres = cls.find_on({'type': 'user', 'admin': True}, server, bucket)\n\t\treturn True if res and len(res) > 0 else False", "def test_func(self, user):\n return self.get_object().admin == user", "def is_staff(self):\n # Simplest possible answer: All admins are staff\n return self.is_admin", "def is_staff(self):\n # Simplest possible answer: All admins are staff\n return self.is_admin", "def is_staff(self):\n # Simplest possible answer: All admins are staff\n return self.is_admin", "def get_is_admin():\n try:\n return ctypes.windll.shell32.IsUserAnAdmin()\n except:\n return \"Could not get the UAC level.\"", "def is_user_allowed(self, user):\n return user.is_staff" ]
[ "0.88923126", "0.8880638", "0.8843605", "0.87530965", "0.87412965", "0.86163646", "0.8612412", "0.85848016", "0.84613305", "0.8420776", "0.84073126", "0.840208", "0.8374834", "0.83690375", "0.835545", "0.8350487", "0.8346385", "0.8346304", "0.83000356", "0.8297389", "0.828466", "0.8266288", "0.8266288", "0.8252766", "0.8195621", "0.818433", "0.81677186", "0.8152489", "0.81507516", "0.81432587", "0.8110384", "0.80539495", "0.7957466", "0.79545826", "0.79240423", "0.7910742", "0.7864869", "0.7851695", "0.7850405", "0.7845059", "0.7843409", "0.7834", "0.7832082", "0.77894855", "0.7776684", "0.7759556", "0.77120674", "0.77060527", "0.7678168", "0.7678168", "0.76574785", "0.76312387", "0.7630629", "0.7630629", "0.7615884", "0.7615884", "0.76106846", "0.760987", "0.7603737", "0.75897384", "0.7580208", "0.7572348", "0.7565247", "0.75402313", "0.7521425", "0.7491831", "0.7453641", "0.74137074", "0.74022645", "0.73788506", "0.73788506", "0.73403436", "0.7338185", "0.73013294", "0.72949547", "0.72718734", "0.7241707", "0.7231609", "0.72270036", "0.72248864", "0.7215641", "0.71888065", "0.71848905", "0.71520007", "0.71483845", "0.71389985", "0.71069795", "0.7104924", "0.7104869", "0.71021515", "0.70988244", "0.7073712", "0.7047566", "0.70446503", "0.70423436", "0.70333606", "0.70333606", "0.70333606", "0.7024039", "0.6979632" ]
0.7866901
36
Returns the id of the user
def get_id(self): return self.id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_user_id(self):\n return self.id_user", "def get_id(self):\n return self.user_id", "def get_id(self) -> int:\n return self.user_id", "def id(self) -> int:\n return self.user.id", "def user_id(self):\n return self.status.user[\"id\"]", "def get_user_id(self):\n raise NotImplementedError", "def get_id(self): \n\t\treturn (self.user_id)", "def user_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"user_id\")", "def user_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"user_id\")", "def user_id(self):\n return self._user_id", "def user_id(self):\n return self._user_id", "def user_id(self):\n return self._user_id", "def user_id(self):\n return self._user_id", "def user_id(self):\n return self._user_id", "def user_id(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"user_id\")", "def user_id(self):\n return json_loads(self.user_json).get('id')", "def user_id(self) -> str:\n return self._user_id", "def user_id(self) -> str:\n return self._user_id", "def user_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user_id\")", "def user_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"user_id\")", "def user_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"user_id\")", "def get_id(self):\n return self.username", "def get_id(self):\n return self.username", "def get_id(self):\n return self.username", "def user_id(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"user_id\")", "def user_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_id\")", "def user_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_id\")", "def user_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"user_id\")", "def get_id(self):\r\n return self.username", "def user_id(self):\n # type: () -> string_types\n return self._user_id", "def user_id(self):\n return lamin_user_settings().id", "def get_user_id():\n user_id = session.get(\"user_id\")\n return user_id if user_id else None", "def get_user_id(self, user):\n\n found_user = self.search(user)\n\n if found_user:\n return found_user[\"data\"][0][\"id\"]\n else:\n raise UserNotFound(\"User \" + user + \" not found.\")", "def _get_user_id(self, user: Optional[Dict[str, Any]]) -> Optional[str]:\n return user[\"id\"] if user and \"id\" in user else None", "def get_user(self):\n return str(self.request.user.id)", "def getUserID(self):\n\t\treturn self.UserID", "def get_user_id(self, details, response):\n return details[\"user_id\"]", "def get_user_id():\n csc_name = get_user_csc_name()\n if csc_name:\n return csc_name\n haka_id = get_user_haka_identifier()\n if haka_id:\n return haka_id\n return None", "def getId(self):\n return self.getUserName()", "def getId(self):\n return self.getUserName()", "def get_userid(self):\n user_id = \"\"\n if self.is_valid():\n user_id = self.__httprequest.session[\"lti_user_id\"]\n return user_id", "def get_user_id(self, details, response):\n return response['uid']", "def get_current_user_id():\n user = get_current_user()\n return user.pk if user and user.is_authenticated else None", "def get_id(self, username):\n\n users_list = self.get_list()\n for user_info in users_list:\n if user_info['username'] == username:\n return user_info['id']\n # return None\n raise UserNotFoundException(\"User {0} not found\".format(username))", "def get_user(id):\n pass", "async def user_id(\n event,\n user_id: ('user_id', 'Get the id of an other user?', 'user') = None,\n):\n if user_id is None:\n user_id = event.user.id\n \n return str(user_id)", "def get_id(self):\n return self.uid", "def user_id(self) -> str:\n return self.app_config()[\"metadata.user.id\"]", "def get_new_id(self) -> str:\n user = self.get_template(list_entries=False)\n return user.id", "def get_user_id(self, details, response):\n return details['username']", "def get_userid():\n return _userid()", "def get_user_id(self, session, **kwargs):\n return None", "def get_user_primary_key(self, request):\r\n try:\r\n return request.user.pk\r\n except AttributeError:\r\n return ''", "def userid(self):\n mtool = getToolByName(self.context, 'portal_membership')\n return mtool.getAuthenticatedMember().getId()", "def identify(cls, user_id):\n return cls.query.get(user_id)", "def user_id(self):\n return text_type(hash(self.username))", "def get_user_id():\n return os.getuid()", "def get_current_user(self):\n\n if self._user_id:\n return self._user_id\n endpoint = \"/me\"\n response = self._send(endpoint, \"GET\")\n user_id = response.json()[\"id\"]\n self._user_id = user_id\n return user_id", "def get_user_id(username):\n if not user_exists(username):\n return -1\n sql = \"SELECT id \" \\\n \"FROM users \" \\\n \"WHERE username=:username\"\n result = db.session.execute(sql, {\"username\": username})\n user_id = result.fetchone()[0]\n return user_id", "def get_user_id(self):\r\n message = self.q(css='BODY').text[0].strip()\r\n match = re.search(r' user_id ([^$]+)$', message)\r\n return match.groups()[0] if match else None", "def user_id_str(self):\n return str(self.status.user['id'])", "def user(self):\n u = self.user_info\n return self.user_model.get_by_id(u['user_id']) if u else None", "def created_by_user_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"created_by_user_id\")", "def get_test_id(self):\n test_username = \"testusername\"\n test_user_id = db.session.query(User.id).filter_by(username = test_username).first()\n print(test_user_id)\n return test_user_id", "def get_user_id(username):\n # rv = query_db('select user_id from user where username = ?',\n # [username], one=True)\n # return rv[0] if rv else None\n # db = get_db()\n # print \"LOOKHERE UID\"\n rv = mongo.db.users.find_one({'username': username}, {'_id': []})\n print rv['_id']\n return rv['_id'] if rv else None", "def get_user_id(username):\n rv = query_db('select user_id from user where username = %s',\n [username], one=True)\n return rv[0] if rv else None", "def get_current_user_id(data):\n current_user = api.user.get_current()\n if current_user:\n return current_user.getId()\n return None", "def _get_user_id(username):\n user_id = select(u.user_id for u in UserInformationData if u.username == username).first()\n\n return user_id", "def get_user_id(username):\n rv = query_db('select user_id from user where username = ?',\n [username], one=True)\n return rv[0] if rv else None", "def get_user_id(username):\n rv = query_db('select user_id from user where username = ?',\n [username], one=True)\n return rv[0] if rv else None", "def get_user_id(username):\n rv = query_db('select user_id from user where username = ?',\n [username], one=True)\n return rv[0] if rv else None", "def get_user_id(self, details, response):\n return response.get(\"sub\")", "def get_userid(self):\n return util.kbase_env.user", "def get_accessible_user_id(self):\n ### DATABASE CODE GOES HERE\n return 1", "def get_identifier(self, request):\n return request.user.username", "def get_user_id(self, name):\n if '@' in name:\n params = {\n 'method': 'flickr.people.findByEmail',\n 'find_email': name\n }\n else:\n params = {\n 'method': 'flickr.people.findByUsername',\n 'username': name\n }\n response = self.oauth_session.get(self.API_ENDPOINT, params=params)\n json_response = response.json()\n if json_response['stat'] == 'fail':\n raise FlickrError(json_response['message'])\n return json_response['user']['id']", "def get_identifier(self, request):\r\n return request.user.username", "def __int__(self):\r\n return self.userid", "def created_by_user_id(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"created_by_user_id\")", "def get_user_id(username):\n rv = query_db('select user_id from users where username = ?',\n [username], one=True)\n return rv[0] if rv else None", "def get_user_id(email):\n try:\n user = session.query(User).filter_by(email=email).one()\n return user.id\n except:\n return None", "def get_user_id(username):\n rv = query_db('SELECT user_id FROM users where username = ?', \n [username], one=True)\n return rv[0] if rv else None", "def getUserID(email):\r\n try:\r\n user = session.query(User_info).filter_by(email=email).one()\r\n return user.id\r\n except Exception as e:\r\n return None", "def get_id(self):\n return self.email", "def get_id(self):\n return self.email", "def get_user_id(sdk, username):\n users = sdk.users.get_by_username(username)[\"users\"]\n if not users:\n raise UserDoesNotExistError(username)\n return users[0][\"userUid\"]", "def _getLoggedinUserId(self):\n securityManager = getSecurityManager()\n return securityManager.getUser()._login", "def _get_user_id(self, name):\n try:\n apiResponse = twitchAPI.twitchAPIGet(\"/users\", {\"login\": name}) #Try to get user id from API\n userID = apiResponse[\"data\"][0][\"id\"]\n except (KeyError, APIUnavailable):\n userID = input(\"Please enter the user id of the user: \")\n except IndexError: #If Twitch API does not return user id\n print(\"That user does not exist on Twitch.\")\n userID = False\n return(userID)", "def user_huid(self) -> Optional[UUID]:\n return self.user.user_huid", "def getUserID(email):\r\n try:\r\n session = DBSession()\r\n return session.query(User).filter_by(email=email).one().id\r\n except:\r\n return None", "def get_user_id(email):\n try:\n user = session.query(User).filter_by(email=email).one()\n return user.id\n except Exception as e:\n print 'No user found for ' + email + ': ' + str(e)\n return None", "def user_id_from_username(self, username: str) -> int:\n return int(self.user_info_by_username(username).pk)", "def get_user_id(session, email):\n try:\n user = session.query(User).filter_by(email=email).one()\n return user.id\n\n except BaseException:\n return None", "def get_id(self, email):\n\n query = self._db.User.select(self._db.User.c.email == email)\n query = query.with_only_columns([self._db.User.c.id_, ])\n\n record = query.execute().fetchone()\n return record[0]", "def users_user_id_get(userId): # noqa: E501\n base.check_session()\n return _cleanuser(_finduser(userId))", "def get_user_id(email):\n try:\n user = session.query(User).filter_by(email=email).one()\n return user.id\n except NoResultFound:\n return -1\n except MultipleResultsFound:\n return -1", "def get_userid_profile(db, user_id):\n return db['user'].find_one({'_id': user_id})", "def getUserID(email):\n try:\n user = session.query(User).filter_by(email=email).one()\n return user.id\n except:\n return None", "def fetch_current_user_id(s):", "def _get_user_id(self):\n auth_response = self._slack_client.api_call(\"auth.test\")\n\n if auth_response.get(\"ok\") is not None and auth_response[\"ok\"]:\n bot_id = auth_response[\"user_id\"]\n logger.info(\"Connected to slack with user id: {}\".format(bot_id))\n return bot_id\n else:\n raise PermissionError(auth_response[\"error\"])", "def get_userid(email, name):\n user = session.query(User).filter_by(email=email).first()\n\n if user:\n return user.id\n else:\n user = User(email=email, name=name)\n session.add(user)\n session.commit()\n return user.id" ]
[ "0.87912023", "0.8627861", "0.8567077", "0.8435428", "0.8355682", "0.82900876", "0.8260854", "0.8231441", "0.8231441", "0.82138103", "0.82138103", "0.82138103", "0.82138103", "0.82138103", "0.82041234", "0.8196352", "0.8126279", "0.8126279", "0.8086236", "0.8086236", "0.8080791", "0.80761266", "0.80761266", "0.80761266", "0.8062972", "0.8039875", "0.8039875", "0.8039875", "0.80239123", "0.80071855", "0.79140306", "0.7910011", "0.78571516", "0.7794613", "0.7771452", "0.7726183", "0.7688242", "0.7682307", "0.76499367", "0.76499367", "0.75971687", "0.759037", "0.756036", "0.75532013", "0.74888813", "0.74830496", "0.7479268", "0.74687225", "0.7456848", "0.74101037", "0.7383293", "0.7336175", "0.7328559", "0.7325287", "0.73055106", "0.73017174", "0.728428", "0.7263137", "0.72628903", "0.72549087", "0.7239469", "0.72363293", "0.7233222", "0.72272843", "0.7222717", "0.72158676", "0.71981007", "0.7195517", "0.7189305", "0.7189305", "0.7189305", "0.7179366", "0.7156229", "0.7149648", "0.71459216", "0.7141641", "0.7132005", "0.71245825", "0.7086691", "0.70813453", "0.7075369", "0.7072656", "0.70664865", "0.70620054", "0.70620054", "0.7012691", "0.7009709", "0.7004867", "0.70023096", "0.6999746", "0.69848096", "0.6981063", "0.6980187", "0.6979085", "0.6974829", "0.6973623", "0.6961824", "0.69615555", "0.69606", "0.69439876", "0.69330126" ]
0.0
-1
Returns the logged in user after adding the wanted user to his followers list
def follow(self, user): if not self.is_following(user): self.followed.append(user) return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def follow_user(cls, user, following):\r\n pass", "def follow(request, usertofollow):\n to_follow = Member.objects.get(user__username=usertofollow)\n user = Member.objects.get(user=request.user)\n user.following.add(to_follow)\n user.save()\n return redirect(request.META['HTTP_REFERER'])", "def follow_user(cls, user, following):\n pass", "def author_following(self):\n\t\tpass", "def user_follow():\n data = request.get_json(force=True)\n follower = User.query.get(data['follower'])\n following = User.query.get(data['following'])\n follower.followcheck.append(following)\n db.session.commit()\n return {'followed': True}", "def actor_user(self):\n if self.actor:\n return User.objects.get(id=self.actor['id'])", "def _user_follower_info(self, uid: int = 0) -> List[_InstagramUser]:\n # If no uid was specified, use the authenticated user's uid\n if uid == 0:\n uid = self.uid\n\n followers: List[Dict[str, Any]] = self.api.getTotalFollowers(uid)\n user_followers = list([_InstagramUser(x) for x in followers])\n return user_followers", "def follow(self, followerId, followeeId):\n\n # 把 followeeId append到他的 follow 属性中\n if followerId == followeeId: # 不能自己关注自己\n return\n # 实例化一个user(followerID)\n follower = UserInfo()\n follower.user_id = followerId \n follower.follows.append(followeeId) \n self.user_pool[followerId] = follower", "def _user_following_info(self, uid: int = 0) -> List[_InstagramUser]:\n # If no uid was specified, use the authenticated user's uid\n if uid == 0:\n uid = self.uid\n\n followings: List[Dict[str, Any]] = self.api.getTotalFollowings(uid)\n user_followings = list([_InstagramUser(x) for x in followings])\n return user_followings", "def follow(self, followerId: int, followeeId: int) -> None:\n self.user_followed[followerId].append(followeeId)", "def follow_user(username):\n user_ID = before_request()\n user_ID = None\n if user_ID != None:\n user_ID = str(g.user['_id'])\n if not g.user:\n abort(401)\n whom_id = get_user_id(username)\n if whom_id is None:\n abort(404)\n mongo.db.users.update({'_id': g.user['_id']}, {\n '$push': {'follows': whom_id}})\n flash('You are now following \"%s\"' % username)\n if redis_obj.get(user_ID):\n return redirect(url_for('user_timeline', username=username, userId=pickle.loads(r.get(user_ID))))\n else:\n redis_obj.delete(str(g.user['_id']))\n print \"Invalidating cache after Follow\"\n return redirect(url_for('user_timeline', username=username))", "def follow_user(username):\n if not g.user:\n abort(401)\n whom_id = get_user_id(username)\n if whom_id is None:\n abort(404)\n db = get_db()\n db.execute('insert into follower (who_id, whom_id) values (?, ?)',\n [session['user_id'], whom_id])\n db.commit()\n flash('You are now following \"%s\"' % username)\n return redirect(url_for('user_timeline', username=username))", "def followUser(following):\n\n cur, user_id, con = initialise(3, True)\n cur.execute(\"INSERT INTO followers (user, following) VALUES ((SELECT username FROM users WHERE id = ?), ?)\", (user_id, following))\n finish(con)", "def follow(self, followerId: int, followeeId: int) -> None:\n if followerId == followeeId: return\n self.users[followerId].add(followeeId)", "def follow(self, followerId: int, followeeId: int) -> None:\n if followerId == followeeId:\n return\n if followerId not in self.users.keys():\n self.users[followerId] = user()\n if followeeId not in self.users.keys():\n self.users[followeeId] = user()\n self.users[followerId].followees[followeeId] = self.users[followeeId]", "def get_one_user():", "def add_follow(follow_id):\n\n want_to_follow_user = User.query.get_or_404(follow_id)\n if want_to_follow_user.private:\n # =========== NEED TO IMPLEMENT ====================\n # send them a request to follow\n want_to_follow_user.from_users.append(g.user) \n db.session.commit()\n flash(\"Your request has been sent\", \"success\")\n return redirect(f\"/users/{g.user.id}/following\")\n\n g.user.following.append(want_to_follow_user)\n db.session.commit()\n\n return redirect(f\"/users/{g.user.id}/following\")", "def get_user(self):\n return self.get('users/self')", "def get_user(self):\n return self.user", "def get_user(self):\n return self.user", "def follow(self, followerId: int, followeeId: int) -> None:\n if followeeId not in self.users:\n self._create_user(followeeId)\n if followerId not in self.users:\n self._create_user(followerId)\n self.users[followerId].add(followeeId)", "def getFollowers():\n\n cur, user_id = initialise(3)\n cur.execute(\"SELECT following FROM followers WHERE user = (SELECT username FROM users WHERE id = ?)\", [user_id])\n tempFollowers = cur.fetchall()\n followers = []\n for follower in tempFollowers:\n followers.append(follower[0])\n return followers", "def follow(whomUserName,whoUserName):\n\n whomuser = query_db('select * from user where username = ?',\n [whomUserName], one=True)\n whouser = query_db('select * from user where username = ?',\n [whoUserName], one=True)\n\n\n followed = query_db('''select 1 from follower where\n follower.who_id = ? and follower.whom_id = ?''',\n [whouser['user_id'], whomuser['user_id']],one=True) is not None\n\n if whouser is None:\n return jsonify({'message':'User trying to follow another user which does not exist'}),404\n\n if whomuser is None:\n return jsonify({'message':'User getting followed does not exist yet'}),404\n\n if not followed:\n db = get_db()\n\n db.execute('''insert into follower (\n who_id, whom_id) values (?, ?)''',\n [whouser['user_id'], whomuser['user_id']])\n db.commit()\n flash('Operation successful')\n return jsonify({'message': 'Successfully following'}), 201\n else:\n return jsonify({'message':'Specified user is already following another user'}),403", "def user(self):\n u = self.user_info\n return self.user_model.get_by_id(u['user_id']) if u else None", "def follow(self, followerId: int, followeeId: int) -> None:\n if followeeId not in self.followList.get(followerId, [followerId]):\n self.followList[followerId] = self.followList.get(followerId, [followerId]) + [followeeId]\n # print(self.followList)", "def follow(self, follower, followee):\n pass", "def follow(current_user,user_id):\n if request.method == \"POST\":\n #follee = request.get_json('user_id')\n if User.query.filter_by(userid= user_id):\n follow = Follows(userid =user_id, follower_id =current_user.userid)\n db.session.add(follow)\n db.session.commit()\n return jsonify({'message' :'You are now following'})\n return jsonify({'message' :'User doesnt exist..Try again'})\n return jsonify({'errors' : 'Method Invalid'})", "def get_current_user(self):\n user_id = self.get_secure_cookie(\"blogdemo_user\")\n if not user_id: return None\n user = self.db.get(\"SELECT * FROM authors WHERE id = %s\", int(user_id))\n return user", "def get_user(self):\n return None", "def userFollowers(nick):\n if (len(nick) != 1):\n print \"Has d'introduir només un nick\"\n return\n i.userFollow(nick[0])", "def users_followers(user_id):\n\n user = User.query.get_or_404(user_id)\n return render_template('users/followers.html', user=user)", "def get_user(self):\n raise NotImplementedError", "def follow_friend():\n print \"followuser\"\n username = request.args.get('username')\n print \"JSON Data\", username\n # username= req_data[username]\n whom_id = get_user_id(username)\n print \"whom_id:\", whom_id\n if whom_id is None:\n abort(404)\n follow_query(whom_id)\n flash('You are now following \"%s\"' % username)\n name = {'name of following user': username}\n R_SERVER.delete(user_timeline_key)\n return jsonify(Username=name, Status_code=status.HTTP_200_OK)", "def author(self):\r\n return self.user", "def user(self):\n if self._user is None:\n pk, full_name = self.owner.split(',')\n pk = int(pk)\n self._user = User.objects.get(pk=pk)\n return self._user", "def add_following(self, user_id):\n sleep(360) # too much follows => function ban\n self.following.append(user_id)\n return perform_with_ran_delay(self.instagram.follow, user_id)", "def me():\n return current_user.get()", "async def get_user(event):\n if event.reply_to_msg_id:\n previous_message = await event.get_reply_message()\n replied_user = await event.client(GetFullUserRequest(previous_message.from_id))\n else:\n user = event.pattern_match.group(1)\n if user.isnumeric():\n user = int(user)\n\n if not user:\n self_user = await event.client.get_me()\n user = self_user.id\n\n if event.message.entities is not None:\n probable_user_mention_entity = event.message.entities[0]\n\n if isinstance(probable_user_mention_entity, MessageEntityMentionName):\n user_id = probable_user_mention_entity.user_id\n replied_user = await event.client(GetFullUserRequest(user_id))\n return replied_user\n try:\n user_object = await event.client.get_entity(user)\n replied_user = await event.client(GetFullUserRequest(user_object.id))\n\n except (TypeError, ValueError):\n await event.edit(\"`I don't slap aliens, they ugly AF !!`\")\n return None\n\n return replied_user", "async def get_user(event):\n if event.reply_to_msg_id:\n previous_message = await event.get_reply_message()\n replied_user = await event.client(\n GetFullUserRequest(previous_message.sender_id)\n )\n else:\n user = event.pattern_match.group(1)\n\n if user.isnumeric():\n user = int(user)\n\n if not user:\n self_user = await event.client.get_me()\n user = self_user.id\n\n if event.message.entities is not None:\n probable_user_mention_entity = event.message.entities[0]\n\n if isinstance(probable_user_mention_entity, MessageEntityMentionName):\n user_id = probable_user_mention_entity.user_id\n replied_user = await event.client(GetFullUserRequest(user_id))\n return replied_user\n try:\n user_object = await event.client.get_entity(user)\n replied_user = await event.client(GetFullUserRequest(user_object.id))\n\n except (TypeError, ValueError):\n await event.edit(\"`I don't slap aliens, they ugly AF !!`\")\n return None\n\n return replied_user", "def target_user(self):\n return self.request.user", "def get(self, request):\n # Retrieve the user from the request if they have been authenticated\n current_user = request.user\n # Get the following & followers username list\n # And the following & followers count for the current user\n user_following_data = get_user_following_data(current_user)\n # Return the follower details for the current user\n return Response(\n {\n \"message\": FOLLOW_USER_MSGS['MY_FOLLOWERS_SUCCESSFUL'],\n \"following\": user_following_data[\"following\"],\n \"followers\": user_following_data[\"followers\"],\n \"followingCount\": user_following_data[\"followingCount\"],\n \"followersCount\": user_following_data[\"followersCount\"]\n },\n status=status.HTTP_200_OK\n )", "def user(self, uid):", "def get_user(self):\n return str(self.request.user.id)", "def add_to_following(sender, instance, created, **kwargs):\r\n sender_= instance.sender\r\n receiver_ = instance.receiver\r\n if instance.status == 'accepted':\r\n sender_.following.add(receiver_.user)", "def get_user(self):\n if \"user\" not in self._data:\n self._data[\"user\"] = User.objects.get(pk=self.kwargs[\"user_id\"])\n return self._data[\"user\"]", "def add_user_to_g():\n \n if CURRENT_USER in session:\n g.user = User.query.get(session[CURRENT_USER])\n\n else:\n g.user = None", "def user(self):\n return self.owner.user", "def follow(self, user):\n if not self.is_following(user):\n f = Follow(follower=self, followed=user)\n db.session.add(f)", "def getToUser(self):\n return self.toUser", "def add(self, user: U) -> None:\n ...", "def show_following(user_id):\n\n\n user = User.query.get_or_404(user_id)\n return render_template('users/following.html', user=user)", "def get_current_user(self):\n user_id = self.get_secure_cookie(\"blogdemo_user\")\n if not user_id: return None\n user = self.db.bucket('authors').get(str(user_id))\n if not user.exists: return None\n return user.data", "def add_follow(follow_id):\n followed_user = User.query.get_or_404(follow_id)\n if not g.user or g.user.id == follow_id or followed_user.is_blocking(g.user):\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n g.user.following.append(followed_user)\n db.session.commit()\n\n return redirect(f\"/users/{g.user.id}/following\")", "def follow(self, followerId, followeeId):\r\n if followerId != followeeId:\r\n self.follows[followerId].add(followeeId)", "def get_user(current_user):\n for user in user_db:\n if user['email'] == current_user:\n return user", "def user(self):\n pass", "def checkUsers(u1, u2):\n if type(u1) == int:\n try:\n user1 = mt.User.objects.filter(Id = u1).get()\n except:\n\n return None\n else:\n user1 = u1\n try:\n user2 = mt.User.objects.filter(Id = u2).get()\n except:\n return user1\n mt.follows(\n followee= user1,\n follower= user2\n ).save()\n return user1", "def post(self, request, username):\n\n # Retrieve the user from the user table if the user exists\n try:\n user_details = User.objects.get(username=username)\n current_user = request.user\n # If a user is trying to follow themselves then stop the request\n if user_details.profile.id == current_user.profile.id:\n return Response(\n {\"errors\": FOLLOW_USER_MSGS['CANNOT_FOLLOW_SELF']},\n status=status.HTTP_400_BAD_REQUEST\n )\n # Otherwise follow the author the current user has indicated\n current_user.profile.follows.add(user_details.profile)\n\n # notify user of new follower\n send_notifications(request,\n notification_type=\"user_followed\",\n instance=current_user,\n recipients=[user_details])\n\n # Get the following & followers username list\n # And the following & followers count for the current user\n user_following_data = get_user_following_data(current_user)\n return Response(\n {\n \"message\": FOLLOW_USER_MSGS['USER_FOLLOW_SUCCESSFUL'],\n \"following\": user_following_data[\"following\"],\n \"followers\": user_following_data[\"followers\"],\n \"followingCount\": user_following_data[\"followingCount\"],\n \"followersCount\": user_following_data[\"followersCount\"]\n },\n status=status.HTTP_201_CREATED\n )\n except User.DoesNotExist:\n return Response(\n {\"errors\": FOLLOW_USER_MSGS['USER_NOT_FOUND']},\n status=status.HTTP_404_NOT_FOUND\n )", "def users_followers(user_id):\n\n if not g.user:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n user = User.query.get_or_404(user_id)\n users_blocking = [block.user_blocking_id for block in Blocks.query.all() if block.user_being_blocked_id == g.user.id]\n likes = [message for message in user.likes if message.user_id not in users_blocking]\n return render_template('users/followers.html', user=user, likes=likes)", "def get_follow(self):\n return self.follow", "def following_and_storing(self, user_obj):\n if self.following(user_obj['user']):\n self.monitored_users.append({'user': user_obj['user'], 'username': user_obj['username'],\n 'followDate': datetime.now().timestamp()})", "def get_following_by_user(request):\n response, status_code = get_followings(request)\n if status_code != 200:\n return JsonResponse(response, status=status_code)\n serialize_data = FollowingSerializer(response, many=False).data\n return JsonResponse(serialize_data, status=status_code, safe=False)", "def followed_by(self, user_id):\n\n url = \"https://api.instagram.com/v1/users/{0}/followed-by?access_token={1}\".format(user_id, self.access_token)\n\n request = requests.get(url)\n return request.json()", "def get_current_user(self):\n return None", "def user_following_gql(self, user_id: int, amount: int = 0) -> list:\n user_id = int(user_id)\n end_cursor = None\n users = []\n variables = {\n \"id\": user_id,\n \"include_reel\": True,\n \"fetch_mutual\": False,\n \"first\": 24\n }\n while True:\n if end_cursor:\n variables[\"after\"] = end_cursor\n data = self.public_graphql_request(\n variables, query_hash=\"e7e2f4da4b02303f74f0841279e52d76\"\n )\n if not data[\"user\"] and not users:\n raise UserNotFound(user_id=user_id, **data)\n page_info = json_value(\n data, \"user\", \"edge_follow\", \"page_info\", default={}\n )\n edges = json_value(\n data, \"user\", \"edge_follow\", \"edges\", default=[]\n )\n for edge in edges:\n users.append(extract_user_short(edge[\"node\"]))\n end_cursor = page_info.get(\"end_cursor\")\n if not page_info.get(\"has_next_page\") or not end_cursor:\n break\n if amount and len(users) >= amount:\n break\n # time.sleep(sleep)\n if amount:\n users = users[:amount]\n return users", "def getUser(self):\n user = users.get_current_user()\n if not user:\n self.redirect(users.create_login_url(self.request.uri))\n else:\n return user", "def get_followers(user):\n if user.has_key('followers_list'):\n pass\n else:\n if user.has_key('followers_count'):\n if user['followers_count'] > 4999:\n pages = user['followers_count'] / 5000\n f_list = []\n for page in range(pages):\n try:\n follower_set = api.GetFollowers(user_id=user['id'], cursor=page, count=5000)\n friends_list = []\n for follower in follower_set:\n twitter_users.update({'id':follower.GetId()},follower.AsDict(),upsert=True)\n friends_list.append(follower.GetId())\n f_list = friends_list + f_list\n time.sleep(60)\n user['followers_list'] = f_list\n twitter_users.update({'id': user['id']}, user)\n print \"\\n\\nGot %s followers out of %s listed\" % (len(f_list), user['followers_count'])\n except Exception, e:\n print str(e)\n time.sleep(60)\n else:\n try:\n follower_set = api.GetFollowers(user_id=user['id'], count=5000)\n friends_list = []\n for follower in follower_set:\n twitter_users.update({'id':follower.GetId()},follower.AsDict(),upsert=True)\n friends_list.append(follower.GetId())\n user['followers_list'] = friends_list\n twitter_users.update({'id': user['id']}, user)\n print \"\\n\\nGot %s followers out of %s listed\" % (len(friends_list), user['followers_count'])\n except Exception, e:\n print str(e)\n time.sleep(60)", "def current_user(self, **kwargs):\n return self.me(**kwargs)", "def followed_by(self):\r\n return relationships.FollowedBy(self)", "def get_current_user(self):\n return self.current_user", "def addUser(self, user):\r\n self.users.append(user)\r\n return len(self.users)-1", "def getFollowers():\n followers = []\n next_max_id = True\n while next_max_id:\n # first iteration hack\n if next_max_id is True:\n next_max_id = ''\n\n _ = GetInstagramAnswer.igApi.getUserFollowers(GetInstagramAnswer.igApi.username_id, maxid=next_max_id)\n followers.extend(GetInstagramAnswer.igApi.LastJson.get('users',[]))\n next_max_id = GetInstagramAnswer.igApi.LastJson.get('next_max_id','')\n return \"You have currently \"+str(len(followers))+\" Followers on Instagram.\"", "def follow_user(self, user):\n self.nav_user(user)\n follow_button = self.driver.find_element_by_xpath(\n \"//button[contains(text(), 'Follow')]\")\n follow_button.click()\n time.sleep(1)\n self.driver.get(self.base_url)", "def user_follow_users(self, ids=None, **kwargs):\n return self._put(\n API.MY_FOLLOWING.value, type=\"user\", ids=\",\".join(ids or []), **kwargs\n )", "def get_current_user(request): \n user_id = request.session.get('user_id', False)\n \n return Member.objects.get(id=user_id)", "def select_user_following(args):\n is_parameter_exists([\n constants.ID\n ], args)\n\n # Request User\n request_user = args[constants.USER]\n\n # Requested User ID\n requested_user_id = args[constants.ID]\n\n # Page Number\n page_number = 1 if constants.PAGE_NUMBER not in args else int(args[constants.PAGE_NUMBER])\n\n # Check User Id\n if not User.objects.filter(id=requested_user_id).exists():\n raise ApiError(constants.NOT_EXIST_OBJECT)\n\n # Following QuerySet\n queryset = UserFollow.objects.filter(following_user=requested_user_id).values_list('followed_user', flat=True)\n\n # User Ids\n user_ids = get_results_from_queryset(queryset, 10, page_number)\n\n # is_finished\n is_finished = not user_ids.has_next()\n\n # Filter Query\n filter_query = Q(id__in=user_ids)\n\n # Users\n users, _, _ = __get_users(filter_query, request_user, 10)\n\n return users, page_number, is_finished", "def get_user(request: Request):\n return request.user", "def follow(self, followerId, followeeId):\n if followerId in self.follows:\n self.follows[followerId].add(followeeId)\n else:\n self.follows[followerId] = set([followeeId])", "def add_user_to_g():\n if CURR_USER_KEY in session:\n g.user = User.query.get(session[CURR_USER_KEY])\n\n else:\n g.user = None", "def get_followings(request):\n user_id = request.GET.get(\"user_id\")\n if not user_id:\n return {\"error\": \"User Id should be provided\"}, 400\n following_data = Following.objects.filter(user_profile_id=user_id, is_active=True).first()\n return following_data, 200", "def created_user(self):\n return self._created_user", "def get_user_id(self):\n return self.id_user", "def owner(self) -> discord.User:\n if self.config.owner_id:\n return self.get_user(self.config.owner_id)\n if self.owner_ids:\n return self.get_user(self.config.owner_ids[0])\n return None", "def get_object(self):\n return self.request.user", "def get_object(self):\n return self.request.user", "def get_object(self):\n return self.request.user", "def get_object(self):\n return self.request.user", "def get_object(self):\n return self.request.user", "def get_user(id):\n pass", "def followers(congressDict, twitterAPI):\n most = twitterAPI.get_user(list(congressDict.items())[0][1]) # Choose an arbitrary starting point from the dictionary and assign it their user details.\n least = most\n for name in congressDict:\n tempAPI = twitterAPI.get_user(congressDict[name]) # Get the user details of each congress members' twitter handle.\n numFollowers = tempAPI._json['followers_count']\n if (numFollowers > most._json['followers_count']): # If the follower count is greater than most, replace the user details with current one.\n most = tempAPI\n elif (numFollowers < least._json['followers_count']): # If the follower count is lower than least, replace the user details with the current one.\n least = tempAPI\n return [most._json[\"name\"], least._json[\"name\"]]", "def user(self):\n u = self.user_info\n return self.user_model.get_by_id(u['user_id']) if u else None", "def follow_user(self:'InstaClient', user:str, nav_to_user:bool=True):\n # Check User Vadility\n profile = self.get_profile(user)\n if not profile:\n raise InvalidUserError(user)\n\n # Navigate to User Page\n self._nav_user(user, check_user=False)\n \n if self._check_existence(EC.presence_of_element_located((By.XPATH, Paths.MESSAGE_USER_BTN))):\n # User already followed\n pass\n else:\n follow_button = self._find_element(EC.presence_of_element_located((By.XPATH, Paths.FOLLOW_BTN)), url=ClientUrls.NAV_USER.format(user))\n self._press_button(follow_button)\n profile.requested_by_viewer = True\n return profile", "def get_object(self):\n return User.objects.get(username=self.request.user.username)", "def add_user(self, attrs):\n pass", "def follow(self, user_index, following_index):\n if user_index >= self.num_users or following_index >= self.num_users:\n raise ValueError(\n f\"Number of users is {self.num_users}, but indices \"\n f\"{user_index} and {following_index} were requested.\"\n )\n if self.users_hat[following_index, user_index] == 0:\n self.users_hat[following_index, user_index] = 1\n elif self.is_verbose():\n self.log(f\"User {following_index} was already following user {user_index}\")", "def user_follow(self, user_id: int) -> bool:\n assert self.user_id, \"Login required\"\n user_id = int(user_id)\n if user_id in self._users_following.get(self.user_id, []):\n self.logger.debug(\"User %s already followed\", user_id)\n return False\n data = self.with_action_data({\"user_id\": user_id})\n result = self.private_request(f\"friendships/create/{user_id}/\", data)\n if self.user_id in self._users_following:\n self._users_following.pop(self.user_id) # reset\n return result[\"friendship_status\"][\"following\"] is True", "def get_queryset(self, *args, **kwargs):\n following_username = self.kwargs.get(self.look_url_kwarg)\n following_users = FollowUser.objects.filter(\n following_username=following_username)\n\n return following_users", "def my_get_user(users_list, user_id):\n for user in users_list:\n if user.get(\"user_id\") == user_id:\n return user\n return None", "def user(self):\n return self._push.get('user', None)" ]
[ "0.7044998", "0.69360614", "0.6906444", "0.6870493", "0.67471176", "0.66945994", "0.66936237", "0.668267", "0.6638689", "0.65982777", "0.6567574", "0.65394175", "0.6528088", "0.6518445", "0.64401454", "0.6416909", "0.6398086", "0.6381833", "0.63553584", "0.63553584", "0.6350465", "0.63263625", "0.6305007", "0.6303222", "0.62997025", "0.62849957", "0.62846375", "0.62629235", "0.6206243", "0.61844134", "0.618406", "0.6180341", "0.61708814", "0.61486816", "0.61389786", "0.61306214", "0.6129847", "0.61106503", "0.6109269", "0.61015093", "0.608964", "0.6080447", "0.60637003", "0.60471314", "0.60341406", "0.6014736", "0.6014412", "0.6006301", "0.6005864", "0.59997535", "0.5997092", "0.5995834", "0.59919614", "0.59889936", "0.59850556", "0.5979546", "0.5979161", "0.5977076", "0.59769106", "0.59752184", "0.59337616", "0.5922739", "0.59219813", "0.5915432", "0.5913267", "0.59120286", "0.590383", "0.5896157", "0.5894813", "0.5888959", "0.5888887", "0.58881044", "0.58880395", "0.588536", "0.58826625", "0.587243", "0.5867339", "0.5862555", "0.5861763", "0.58578426", "0.5849514", "0.58492357", "0.5848579", "0.58481103", "0.58481103", "0.58481103", "0.58481103", "0.58481103", "0.58471817", "0.58458203", "0.581197", "0.5811311", "0.5809334", "0.5806933", "0.5805576", "0.58052397", "0.58023626", "0.58008116", "0.5799232" ]
0.6222603
29
Returns the logged in user after removing the wanted user from his followers list
def unfollow(self, user): if self.is_following(user): self.followed.remove(user) return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unfollowUser(following):\n \n cur, user_id, con = initialise(3, True)\n cur.execute(\"DELETE FROM followers WHERE user = (SELECT username FROM users WHERE id = ?) AND following = ?\", (user_id, following))\n finish(con)", "def unfollow(self, user):\n f = self.followed.filter_by(followed_id=user.id).first()\n if f:\n db.session.delete(f)", "def unfollow_user(username):\n user_ID = before_request()\n user_ID = None\n if user_ID != None:\n user_ID = str(g.user['_id'])\n if not g.user:\n abort(401)\n whom_id = get_user_id(username)\n if whom_id is None:\n abort(404)\n mongo.db.users.update({'_id': g.user['_id']}, {\n '$pull': {'follows': whom_id}})\n flash('You are no longer following \"%s\"' % username)\n if redis_obj.get(user_ID):\n return redirect(url_for('user_timeline', username=username, userId=pickle.loads(redis_obj.get(user_ID))))\n else:\n redis_obj.delete(session['user_id'])\n print \"Invalidating cache after Unfollow\"\n return redirect(url_for('user_timeline', username=username))", "def user_unfollow():\n data = request.get_json(force=True)\n follower = User.query.get(data['follower'])\n following = User.query.get(data['following'])\n follower.followcheck.remove(following)\n db.session.commit()\n return {'unfollowed': True}", "def unfollow_user(username):\n if not g.user:\n abort(401)\n whom_id = get_user_id(username)\n if whom_id is None:\n abort(404)\n db = get_db()\n db.execute('delete from follower where who_id=? and whom_id=?',\n [session['user_id'], whom_id])\n db.commit()\n flash('You are no longer following \"%s\"' % username)\n return redirect(url_for('user_timeline', username=username))", "def removeFollower(self,id):\n # DELETE /followers/$id\n pass", "def unfollow(self, followerId: int, followeeId: int) -> None:\n if followeeId not in self.users:\n self._create_user(followeeId)\n if followerId not in self.users:\n self._create_user(followerId)\n if followerId != followeeId and followeeId in self.users[followerId]:\n self.users[followerId].remove(followeeId)", "def delete(self, request, *args, **kwargs):\n\n following_username = token_payload(request)\n\n followed_username = self.kwargs.get(self.look_url_kwarg)\n try:\n followed_user = User.objects.get(username=followed_username)\n followed_user_id = followed_user.id\n\n is_following = FollowUser.objects.filter(\n following_username=following_username)\n for user in is_following:\n if user.following_username == following_username and user.followed_username == followed_username:\n returned_user = FollowUser.objects.get(pk=user.id)\n returned_user.delete()\n\n followed_user_profile = Profile.objects.get(\n profile_user=followed_user_id)\n serializer_profile = self.serializer_classprofile(\n followed_user_profile)\n return Response({\n \"user\": following_username,\n \"unfollowing\": serializer_profile.data\n })\n return Response({\n \"message\": 'User has been unfollowed or You are unfollowing a user you were not orignally following'\n }, status=status.HTTP_400_BAD_REQUEST)\n except:\n return Response({\n \"message\": 'User does not exist'\n }, status=status.HTTP_400_BAD_REQUEST)", "def unfollow(self, followerId: int, followeeId: int) -> None:\n if followerId != followeeId and followeeId in self.followList.get(followerId, []):\n self.followList[followerId].remove(followeeId)\n # print(self.followList)", "def unfollowing_and_removing(self, user_id):\n if self.unfollowing(user_id):\n ind = [i for i, j in enumerate(self.monitored_users) if j.get('user', '') == user_id]\n if ind:\n self.monitored_users.remove(self.monitored_users[ind[0]])", "def unfollow(request, usertostopfollow):\n stop_follow = Member.objects.get(user__username=usertostopfollow)\n user = Member.objects.get(user=request.user)\n user.following.remove(stop_follow)\n user.save()\n return redirect(request.META['HTTP_REFERER'])", "def unfollow(self, followerId: int, followeeId: int) -> None:\n following = self.user_followed[followerId]\n if followeeId in following:\n following.remove(followeeId)\n self.user_followed[followerId] = following", "def remove_user(self, user_id):\n if user_id in self:\n user = self[user_id]\n del self[user_id]\n return user", "def unfollow(self, followerId, followeeId):\n # 在user_pool 中查询这个用户 follower\n if self.user_pool[followerId]:\n # 如果在用户的关注列表中才删除\n if followeeId in self.user_pool[followerId].follows:\n self.user_pool[followerId].follows.remove(followeeId)", "def unfollow(self, followerId: int, followeeId: int) -> None:\n self.users[followerId].discard(followeeId)", "def remove(self, user_id):\n pass", "def unfollow(self, followerId: int, followeeId: int) -> None:\n if followerId not in self.users.keys() or followeeId not in self.users.keys():\n return\n if followeeId not in self.users[followerId].followees.keys():\n return\n self.users[followerId].followees.pop(followeeId)\n\n\n\n # Your Twitter object will be instantiated and called as such:\n # obj = Twitter()\n # obj.postTweet(userId,tweetId)\n # param_2 = obj.getNewsFeed(userId)\n # obj.follow(followerId,followeeId)\n # obj.unfollow(followerId,followeeId)", "def delete(self, request, username):\n followed_user_exists = User.objects.filter(username=username).exists()\n if not followed_user_exists:\n return Response({'error': 'user not found'},\n status.HTTP_404_NOT_FOUND)\n followed_user = User.objects.get(username=username)\n user_exists = Follow.is_user_already_followed(\n followed_user_id=followed_user.id,\n user_id=request.user.id\n )\n if user_exists:\n instance = Follow.objects.filter(\n user=self.request.user.id, followed_user=followed_user.id\n )\n instance.delete()\n return Response({'message': 'user unfollowed'},\n status.HTTP_200_OK)\n return Response({'message': 'user not in followers'},\n status.HTTP_404_NOT_FOUND)", "def unfollow_user(request, course_id, followed_user_id):\r\n user = cc.User.from_django_user(request.user)\r\n followed_user = cc.User.find(followed_user_id)\r\n user.unfollow(followed_user)\r\n return JsonResponse({})", "def delete(self, request, username):\n\n # Retrieve the user from the user table if the user exists\n try:\n user_to_unfollow = User.objects.get(username=username)\n current_user = request.user\n # If a user is trying to unfollow themselves then stop the request\n if user_to_unfollow.profile.id == current_user.profile.id:\n return Response(\n {\"errors\": FOLLOW_USER_MSGS['CANNOT_UNFOLLOW_SELF']},\n status=status.HTTP_400_BAD_REQUEST\n )\n # Check if the user to be unfollowed\n # is in the current users following list\n try:\n profile_id = user_to_unfollow.profile.id\n user_being_followed = CustomFollows.objects.get(\n to_profile_id=profile_id,\n from_profile_id=current_user.profile.id\n )\n # If not tell the user the request can't happen\n # Because they don't follow the user\n except Exception as e:\n return Response(\n {\n \"errors\": FOLLOW_USER_MSGS['USER_UNFOLLOWED_ALREADY']\n },\n status=status.HTTP_400_BAD_REQUEST\n )\n # Otherwise unfollow the user as requested\n current_user.profile.follows.remove(user_to_unfollow.profile)\n # Get the following & followers username list\n # And the following & followers count for the current user\n user_following_data = get_user_following_data(current_user)\n return Response(\n {\n \"message\": FOLLOW_USER_MSGS['USER_UNFOLLOW_SUCCESSFUL'],\n \"following\": user_following_data[\"following\"],\n \"followers\": user_following_data[\"followers\"],\n \"followingCount\": user_following_data[\"followingCount\"],\n \"followersCount\": user_following_data[\"followersCount\"]\n },\n status=status.HTTP_200_OK\n )\n # End request if we cannot find the user we want to unfollow.\n except User.DoesNotExist:\n return Response(\n {\"errors\": FOLLOW_USER_MSGS['USER_NOT_FOUND']},\n status=status.HTTP_404_NOT_FOUND\n )", "def delete_user():", "def pop_user(self):\n return self._pop_user()", "def test_remove_followers(self):\n pass", "def test_un_following_existing_user(self):\n response = self.client.delete(\n reverse(\n 'follow',\n kwargs={'username': self.followed['user'].get('username')}\n )\n )\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)", "def delete_user(self):\n User.user_list.remove(self)", "def delete_user(self):\n User.user_list.remove(self)", "def delete_user(self):\n User.user_list.remove(self)", "def unfriend(self, remove):\n remover_friends_list = self # person terminating the friendship \n \n # remove friend from remover friend list\n remover_friends_list.remove_friend(removee)\n\n #remove friend from removee friend list\n friends_list = FriendList.objects.get(user=removee)\n friend_list.remove_friend(self.user)", "def unfollow_friend(username):\n\n if not g.user:\n print \"401\"\n abort(401)\n whom_id = get_user_id(username)\n print whom_id\n if whom_id is None:\n abort(404)\n unfollow_query(whom_id)\n flash('You are no longer following \"%s\"' % username)\n name = {'name of unfollowing user': username}\n ############### REDIS cache invalidate #####################\n R_SERVER.delete(user_timeline_key)\n return jsonify(Username=name, Status_code=status.HTTP_200_OK)", "def unfollow(self, followerId, followeeId):\n if followerId in self.follows:\n if followeeId in self.follows[followerId]:\n self.follows[followerId].remove(followeeId)", "def remove_user(self):\n self.currentuser = None\n self.carlocked = False", "def delete_user():\n #TODO user delete\n pass", "def delete_follow_request(request):\n username = request.POST[\"user\"]\n requester = get_user_from_username(request.user, username)\n\n follow_request = get_object_or_404(\n models.UserFollowRequest, user_subject=requester, user_object=request.user\n )\n follow_request.raise_not_deletable(request.user)\n\n follow_request.delete()\n return redirect(f\"/user/{request.user.localname}\")", "def unfriend(self, removee):\n remover_friends_list = self # person terminating the friendship\n # Remove friend from remover friend list\n remover_friends_list.remove_friend(removee)\n # Remove friend from removee's friend list\n friends_list = FriendList.objects.get(user=removee)\n friends_list.remove_friend(self.user)", "def unfollow(self, followerId: int, followeeId: int) -> None:\n # Time Complexity: O(1)\n if followerId in self.followees and followeeId in self.followees[followerId]:\n self.followees[followerId].remove(followeeId)", "def remove_user(self, u: \"Node\") -> None:\n\n if u in self.users_:\n self.users_[u] -= 1\n if self.users_[u] == 0:\n del self.users_[u]", "def remove_friend():\n if request.method == 'POST':\n username = get_username()\n user_id = get_id_from_username(username)\n friend_to_remove = get_id_from_username(request.form['remove_user'])\n if not friend_to_remove or friend_to_remove==user_id:\n return redirect(url_for('message.converse'))\n remove_friend_db(user_id, friend_to_remove)\n return redirect(url_for('message.converse'))", "def _onRemove(self, event):\n sel = self.userlist.GetSelection()\n if sel >= 0:\n c.removeUser(self.userlist.GetString(sel))\n self.userlist.Delete(sel)\n del self.users[sel]\n if len(self.users) >= 0:\n self.userlist.SetSelection(0)\n else:\n self.userlist.SetSelection(-1)", "def _remove(users, room_name):\n global users_removed\n users_removed = []\n\n try:\n\n for word in users['message']['text'].split():\n\n if word == 'myself':\n user = users['message']['sender']['name']\n check_result = redis.srem(room_name, \"<\" + user + \">\")\n \n if check_result == 1:\n users_removed.append(\"<\" + user + \">\")\n else:\n users_removed.append('Not found ->> ' + \"<\" + user + \">\")\n\n check_continue = 1\n text = '```User removed: %s ```' % (','.join(users_removed))\n\n for _item in range(len(users['message']['text'].split())):\n\n _item = _item + 1\n\n try:\n _type = users['message']['annotations'][_item]['userMention']['user']['type']\n user = users['message']['annotations'][_item]['userMention']['user']['name']\n \n if _type == 'BOT':\n\n if check_continue == 1:\n continue\n else:\n text = 'Please add user with @'\n continue\n \n user = users['message']['annotations'][_item]['userMention']['user']['name']\n check_result = redis.srem(room_name, \"<\" + user + \">\")\n\n except:\n pass\n\n if check_result == 1:\n users_removed.append(\"<\" + user + \">\")\n else:\n users_removed.append(\"Not found ->> \" + \"<\" + user + \">\")\n text = \"```Removed users: %s ```\" % (','.join(list(set(users_removed))))\n return text\n except:\n\n text = 'Please add user with @'\n return text", "def unfollow(self, followerId: int, followeeId: int) -> None:\n if followeeId in self.follow_map[followerId]:\n self.follow_map[followerId].remove(followeeId)", "def user_unfollow(self, user_id: int) -> bool:\n assert self.user_id, \"Login required\"\n user_id = int(user_id)\n data = self.with_action_data({\"user_id\": user_id})\n result = self.private_request(f\"friendships/destroy/{user_id}/\", data)\n if self.user_id in self._users_following:\n self._users_following[self.user_id].pop(user_id, None)\n return result[\"friendship_status\"][\"following\"] is False", "def remove_user(users, curr_username, user_role, request_ip):\n #TODO: error checking\n log_connector.add_log('DELETE USER', \"Removed {} user(s)\".format(len(users)), curr_username, user_role, request_ip)\n user_connector.remove_user(users)", "def test_unfollowing_existing_user_not_authenticated(self):\n self.client.credentials()\n response = self.client.delete(\n reverse(\n 'follow',\n kwargs={'username': self.followed['user'].get('username')}\n )\n )\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)", "def remove_user(self, username):\n\n row = self.c.execute(\"SELECT * FROM profiles WHERE name =?\",\n (username,))\n for i in row:\n user = i[1]\n print(user)\n if user == username:\n self.c.execute(\"SELECT id FROM profiles WHERE name=?\",\n (username,))\n i_d = self.c.fetchone()[0]\n self.c.execute(\"DELETE FROM events WHERE user_id=?\", (i_d,))\n self.c.execute(\"DELETE FROM profiles WHERE name=?\", (username,))\n self.conn.commit()\n return True\n else:\n print ('User not found.')", "def unfollow(self, followerId, followeeId):\n if followerId in self.follow_map and followeeId in self.follow_map[followerId]:\n self.follow_map[followerId].remove(followeeId)", "def remove_user(self, username): # remove only users from json file\n return self._user(username=username, remove=True)", "def delete_user(self):\n\n User.user_list.remove(self)", "def rm_favoriting_user_id(self, circuit_id, user_id):\n key = ':'.join(\n [CIRCUIT_FAV_USRS_1, \n str(circuit_id), \n CIRCUIT_FAV_USRS_2]\n )\n self.RS.srem(key, user_id)", "def deleteUser(self, userList, index):\n\n if(self.adminAccess):\n ret = userList.pop(index)\n print(\"User has been deleted\")\n \n return userList", "def unfollow(self, followerId, followeeId):\r\n self.follows[followerId].discard(followeeId)", "def unfriend(self, removee):\n\t\tremover_friend_list = self # person terminating the friendship\n\n\t\t# Remove friend from friend request\n\t\tremover_friend_list.remove_friend(removee)\n\n\t\t# Remove friend from the removeee friend list\n\t\tfriends_list = FriendList.objects.get(user=removee)\n\t\tfriends_list.remove_friend(self.user)", "def delete_by(self, user):\n if user.is_superuser or user is self.added_by:\n self.delete()", "def remove(self, user):\r\n url = '{0}/{1}'.format(self.get_url(), user)\r\n\r\n return http.Request('DELETE', url), parsers.parse_empty", "def del_user(self, username):\n pass", "def delete_user(self) -> 'outputs.ActingUserResponse':\n return pulumi.get(self, \"delete_user\")", "def delete_user():\n del globalopts.appdata[request.user]\n del globalopts.users[request.user]\n return \"\", 200", "def remove(self, user):\n if user != self.head:\n user.group = None\n user.save()\n self.players.remove(user)", "def delete_user(id):\n pass", "def unfollow(self, user_index, following_index):\n if user_index >= self.num_users or following_index >= self.num_users:\n raise ValueError(\n f\"Number of users is {self.num_users}, but indices \"\n f\"{user_index} and {following_index} were requested.\"\n )\n if self.users_hat[following_index, user_index] == 1:\n self.users_hat[following_index, user_index] = 0\n elif self.is_verbose():\n self.log(f\"User {following_index} was not following user {user_index}\")", "def stop_following(follow_id):\n\n followed_user = User.query.get(follow_id)\n g.user.following.remove(followed_user)\n db.session.commit()\n\n return redirect(f\"/users/{g.user.id}/following\")", "def remove_user(user):\n # user.confirmed = False\n # user = get_user_by_phone(phone_num)\n db.session.delete(user)\n db.session.commit()\n\n return user\n # DELETE FROM users WHERE user.phone_num == phone)", "def del_user(request):\r\n mdict = request.matchdict\r\n\r\n # Submit a username.\r\n del_username = mdict.get('username', None)\r\n\r\n if del_username is None:\r\n LOG.error('No username to remove.')\r\n request.response.status_int = 400\r\n return _api_response(request, {\r\n 'error': 'Bad Request: No username to remove.',\r\n })\r\n\r\n u = UserMgr.get(username=del_username)\r\n\r\n if not u:\r\n LOG.error('Username not found.')\r\n request.response.status_int = 404\r\n return _api_response(request, {\r\n 'error': 'User not found.',\r\n })\r\n\r\n try:\r\n # First delete all the tag references for this user's bookmarks.\r\n res = DBSession.query(Bmark.bid).filter(Bmark.username == u.username)\r\n bids = [b[0] for b in res]\r\n\r\n qry = bmarks_tags.delete(bmarks_tags.c.bmark_id.in_(bids))\r\n qry.execute()\r\n\r\n # Delete all of the bmarks for this year.\r\n Bmark.query.filter(Bmark.username == u.username).delete()\r\n DBSession.delete(u)\r\n return _api_response(request, {\r\n 'success': True,\r\n 'message': 'Removed user: ' + del_username\r\n })\r\n except Exception, exc:\r\n # There might be cascade issues or something that causes us to fail in\r\n # removing.\r\n LOG.error(exc)\r\n request.response.status_int = 500\r\n return _api_response(request, {\r\n 'error': 'Bad Request: ' + str(exc)\r\n })", "def remove(self, update, context):\n\n telegram_user = update.message.from_user\n if len(context.args) != 1:\n message = (\n \"To remove a subscriptions from your list please use /remove <entryname>. To see all your \"\n \"subscriptions along with their entry names use /list ! \"\n )\n update.message.reply_text(message)\n return\n\n entry = self.db.get_user_bookmark(telegram_id=telegram_user.id, alias=context.args[0])\n if entry:\n self.db.remove_user_bookmark(telegram_id=telegram_user.id, url=entry[0])\n message = \"I removed \" + context.args[0] + \" from your subscriptions!\"\n update.message.reply_text(message)\n else:\n message = (\n \"I can not find an entry with label \"\n + context.args[0]\n + \"in your subscriptions! Please check your subscriptions using /list and use the delete command \"\n \"again! \"\n )\n update.message.reply_text(message)", "def _remove_user(self):\n name = False\n while not name: #While name not set\n name = input(\"Please enter the username of the user you would like to remove: \").lower()\n userID = self._get_user_id(name)\n if not userID:\n name = False\n command = \"remove_user {0}\\r\\n\".format(userID)\n return(command)", "def stop_following(follow_id):\n\n if not g.user:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n followed_user = User.query.get_or_404(follow_id)\n g.user.following.remove(followed_user)\n db.session.commit()\n\n return redirect(f\"/users/{g.user.id}/following\")", "def fusion_api_remove_user(self, name=None, uri=None, api=None, headers=None):\n return self.user.delete(name, uri, api, headers)", "def remove_user(self, email):\n try:\n self.users.pop(email)\n except KeyError:\n raise UserDoesNotExist", "def onUserDeletion(event):\n request = getRequest()\n if not IProductLayer.providedBy(request):\n return\n\n client = getUtility(IAdminClient)\n xmpp_users = getUtility(IXMPPUsers)\n\n principal_id = event.principal\n principal_jid = xmpp_users.getUserJID(principal_id)\n\n pass_storage = getUtility(IXMPPPasswordStorage)\n pass_storage.remove(principal_id)\n\n d = users.deletePrincipal(client, principal_jid)\n return d", "def removeUser(self, fullName):\n logger.debug(\"Func: removeUser\")\n\n # old Name removeUser\n currentDB = self._loadUsers()\n del currentDB[fullName]\n self._dumpJson(currentDB, self._pathsDict[\"usersFile\"])\n self._usersDict = currentDB\n return None, None", "def onUserDeletion(event):\n client = getUtility(IAdminClient)\n xmpp_users = getUtility(IXMPPUsers)\n storage = getUtility(IPubSubStorage)\n\n principal_id = event.principal\n principal_jid = xmpp_users.getUserJID(principal_id)\n\n if principal_id in storage.leaf_nodes:\n storage.leaf_nodes.remove(principal_id)\n if principal_id in storage.publishers:\n del storage.publishers[principal_id]\n if principal_id in storage.node_items:\n del storage.node_items[principal_id]\n if principal_id in storage.collections['people']:\n storage.collections['people'].remove(principal_id)\n\n pass_storage = getUtility(IXMPPPasswordStorage)\n pass_storage.remove(principal_id)\n\n d = deletePrincipal(client, principal_jid)\n return d", "def unfollow(self, followerId: 'int', followeeId: 'int') -> 'None':\n self.followees[followerId].discard(followeeId)", "def remove_user(user_id):\n user = Users.query.get(user_id)\n if user_id in [0, 1]:\n return 'Removal of default User #%s (%s) is forbidden.' % (user_id, user.login), 'warning'\n db_session.delete(user)\n db_session.commit()\n return 'User #%s (%s) has been deleted.' % (user_id, user.login), 'success'", "def user_disappears(self, user):\n pass", "def delete_model(self, request, obj):\n From = User.objects.get(id=obj.From.id)\n To = User.objects.get(id=obj.To.id)\n From.following_numDe()\n To.followed_numDe()\n obj.delete()", "def unfollow_profile(self):\n self.find_clickable_element(self.ISFOLLOWED_BTN).click()", "async def removeuser(self, ctx, user: discord.Member):\n\n if check_key(user.id):\n delete_key(user.id)\n await self.bot.say(\"{}, you are way out of this league.\".format(user.mention))\n else:\n await self.bot.say(\"That user does not exist in this league.\")", "def unfollow_me(self):\n return self.follow_me('unfollow_topic')", "def deleted_user(request):\n auth.logout(request)\n messages.success(request, \"Your profile has been deleted. Please contact us if you want to undo this.\")\n return redirect(reverse('index'))", "def follow_user(cls, user, following):\r\n pass", "def unfollow(self, followerId: int, followeeId: int) -> None:\n self.followees[followerId].discard(followeeId)", "def remove_users(caller, role, *users):\r\n # can always remove self (at this layer)\r\n if not(len(users) == 1 and caller == users[0]):\r\n _check_caller_authority(caller, role)\r\n role.remove_users(*users)", "def on_removeuser(self, username):\n self.users.remove(username)\n print ('%s left the room.' % username)", "def remove_admin(self, project_id, user_id):\n current_user = request.environ.get('repoze.who.identity')['user']\n user = controller_globals._get_user_from_email(current_user.email)\n\n # make sure we're actually the project lead\n if not self._current_user_leads_review(project_id):\n return \"<font color='red'>tsk, tsk. you're not the project lead, %s.</font>\" % user.fullname\n\n leader_to_remove = Session.query(model.User).filter_by(id=user_id).one()\n review = self._get_review_from_id(project_id)\n review.leaders.remove(leader_to_remove)\n Session.add(review)\n Session.commit()\n\n redirect(url(controller=\"review\", action=\"admin\", project_id=project_id))", "def unfollow(self, other):\n\t\tif self.follows(other):\n\t\t\tself.followed.remove(other)", "def view_remove_user(self, user, username):\r\n user.realm._checker.removeUser(username)", "def auto_unfollow_nonfollowers():\n\n following = set(t.friends.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n followers = set(t.followers.ids(screen_name=TWITTER_HANDLE)[\"ids\"])\n\n # put user IDs here that you want to keep following even if they don't\n # follow you back\n users_keep_following = set([])\n\n not_following_back = following - followers\n\n # make sure the \"already followed\" file exists\n if not os.path.isfile(ALREADY_FOLLOWED_FILE):\n with open(ALREADY_FOLLOWED_FILE, \"w\") as out_file:\n out_file.write(\"\")\n\n # update the \"already followed\" file with users who didn't follow back\n already_followed = set(not_following_back)\n af_list = []\n with open(ALREADY_FOLLOWED_FILE) as in_file:\n for line in in_file:\n af_list.append(int(line))\n\n already_followed.update(set(af_list))\n del af_list\n\n with open(ALREADY_FOLLOWED_FILE, \"w\") as out_file:\n for val in already_followed:\n out_file.write(str(val) + \"\\n\")\n\n for user_id in not_following_back:\n if user_id not in users_keep_following:\n t.friendships.destroy(user_id=user_id)\n print(\"unfollowed %d\" % (user_id))", "def author_following(self):\n\t\tpass", "def delete(self):\n\n user_id = get_jwt_identity()\n user = user_crud.get(user_id)\n if not user:\n abort(404, message=\"User not Found\")\n all_tokens = auth_crud.get_user_tokens(user_id)\n tokens = [token.to_dict() for token in all_tokens]\n for token in tokens:\n auth_crud.revoke_token(token['id'], user_id)\n user = user_crud.remove(user_id)\n\n return {'msg': 'User Removed'}", "def test_unfollow_user_without_auth(self):\n with self.settings(\n EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend'):\n self.register_user(self.user1)\n self.client.post(self.follow_url, format='json')\n response = self.client.delete(self.unfollow_url,\n data=self.followed_user)\n self.assertEqual(response.data['detail'],\n \"Authentication credentials were not provided.\")\n\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_unfollowing_non_existing_user(self):\n response = self.client.delete(\n reverse(\n 'follow',\n kwargs={'username': 'NotThere'}\n )\n )\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)", "def user_playlist_unfollow(self, user, playlist_id, **kwargs):\n return self._delete(\n \"users/%s/playlists/%s/followers\" % (user, playlist_id), **kwargs\n )", "def staff_remove(request):\n username = request.params['remove']\n user = models.User.get_by_username(username)\n user.staff = False\n return httpexceptions.HTTPSeeOther(\n location=request.route_url('admin_staff'))", "def get_sentinel_user():\n return get_user_model().objects.get_or_create(username='deleted user').first()", "def getFollowers():\n\n cur, user_id = initialise(3)\n cur.execute(\"SELECT following FROM followers WHERE user = (SELECT username FROM users WHERE id = ?)\", [user_id])\n tempFollowers = cur.fetchall()\n followers = []\n for follower in tempFollowers:\n followers.append(follower[0])\n return followers", "def reject_follow(made_request_id, approver_id):\n\n if not g.user.id == approver_id:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\"), 403\n\n wanted_to_follow_user = User.query.get_or_404(made_request_id)\n g.user.from_users.remove(wanted_to_follow_user)\n db.session.commit()\n flash(f\"Follow request from {wanted_to_follow_user.username} rejected.\", \"success\")\n\n return redirect(f\"/users/{g.user.id}\")", "def channel_removeowner(token, channel_id, u_id):\n # Check if token is valid and raise AccessError if not\n curr_id = database.get_current_user(token)\n # gets current channel data\n curr_channel = database.get_channel_data(channel_id)\n # gets the permissions of current user from database\n user_perms = database.get_permission_dict(curr_id)\n\n u_id_permission = database.get_permission_dict(u_id)\n if u_id_permission[\"permission_id\"] == 1:\n raise error.AccessError(description=\"user being removed is the owner of the slackr\")\n\n # checks if u_id is not an owner of the channel\n # also checks if current auth user is an owner of the channel\n is_u_owner = False\n is_curr_owner = False\n for owner_id in curr_channel[\"owner_ids\"]:\n if u_id == owner_id:\n is_u_owner = True\n if curr_id == owner_id:\n is_curr_owner = True\n if is_u_owner is False:\n raise error.InputError(description=\"user being removed is not an owner of the channel\")\n\n\n # if the auth user is owner of slackr, allows him to remove u_id as owner\n if user_perms[\"permission_id\"] == 1:\n # removes the user from channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # if the auth user is an owner of the channel, allow him to remove u_id as owner of channel\n elif is_curr_owner is True:\n # adds the user into channel_owner\n curr_channel[\"owner_ids\"].remove(u_id)\n # else the auth user is not an owner and thus cannot use addowner\n else:\n raise error.AccessError(description=\"\"\"Authorised user user is not an owner of the channel,\n or of the slackr\"\"\")", "def unfriend(self, user_id, target_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n if target_id is None or len(target_id) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.delete_friend(user_id, target_id)", "def _user_delete(sender, instance, using, **kwargs):\n Booking.objects.filter(requester=instance).update(\n requester=get_sentinel_user(instance.group)\n )", "def remove_user(self, email):\n return run_transaction(\n self.sessionfactory,\n lambda session: remove_user_txn(session, email))", "def unfollow(username):\n follow_form = FollowForm()\n unfollow_form = UnfollowForm()\n\n if unfollow_form.validate_on_submit():\n try:\n current_user.unfollow(username)\n flash('Unfollowed {}'.format(username))\n except ValueError as excep:\n flash(str(excep))\n return render_template('user_history.html',\n username=username,\n follow_form=follow_form,\n unfollow_form=unfollow_form)\n\n return render_template('user_history.html',\n username=username,\n follow_form=follow_form,\n unfollow_form=unfollow_form)" ]
[ "0.7169295", "0.70720047", "0.70225513", "0.699361", "0.6943185", "0.682216", "0.6751137", "0.67458344", "0.6741086", "0.6738047", "0.6737277", "0.6707055", "0.66615844", "0.6618556", "0.6605784", "0.6594128", "0.65536183", "0.6531541", "0.6502671", "0.6478856", "0.64270645", "0.6386949", "0.63730955", "0.63557", "0.635173", "0.635173", "0.635173", "0.63329494", "0.6327335", "0.6309968", "0.6284286", "0.627003", "0.62681854", "0.6265805", "0.6249155", "0.62380797", "0.62123746", "0.6208475", "0.6192651", "0.6165077", "0.61405295", "0.61282945", "0.61005497", "0.6094172", "0.60859257", "0.6083459", "0.60790837", "0.60442585", "0.60070646", "0.6006908", "0.6003245", "0.5998903", "0.5991501", "0.5990848", "0.59900767", "0.5989706", "0.5987541", "0.59701294", "0.5960098", "0.59539247", "0.5953452", "0.59527725", "0.59405565", "0.5927349", "0.5921064", "0.59168595", "0.5914433", "0.5906906", "0.5902731", "0.5901519", "0.5894958", "0.5879183", "0.5874065", "0.5859014", "0.58507174", "0.5846768", "0.5834417", "0.583422", "0.5822393", "0.58210754", "0.5820766", "0.5820664", "0.5815014", "0.58138", "0.58135366", "0.5808244", "0.5802004", "0.57948285", "0.5790052", "0.578862", "0.5768945", "0.5764873", "0.57522005", "0.57428735", "0.5737553", "0.5730244", "0.5726366", "0.5717167", "0.5716724", "0.5716476" ]
0.65360326
17
Returns true if the current user is following the desired user
def is_following(self, user): return self.followed.filter(followers.c.followed_id == user.id).count() > 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_user_following(self, user_id):\n return user_id in self.following", "def is_following(self, you, them):\n if self.filter(from_user=you, to_user=them).count() > 0:\n return True\n return False", "def is_following(self, user):\n return self.followed.filter_by(\n followed_id=user.id).first() is not None", "def is_follower(self, you, them):\n if self.filter(from_user=them, to_user=you).count() > 0:\n return True\n return False", "def is_followed_by(self, user):\n return self.followers.filter_by(\n follower_id=user.id).first() is not None", "def is_authenticated_user_following(self, username=None,id=None):\n if not self.is_authenticated:\n raise PicplzError(\"is_authenticated_user_following requires an authenticated API instance\")\n \n return None", "def is_following_by_username(self, id):\n return self.followed.filter(followers.c.followed_id == id).count() > 0", "def user_follow(self, user_id: int) -> bool:\n assert self.user_id, \"Login required\"\n user_id = int(user_id)\n if user_id in self._users_following.get(self.user_id, []):\n self.logger.debug(\"User %s already followed\", user_id)\n return False\n data = self.with_action_data({\"user_id\": user_id})\n result = self.private_request(f\"friendships/create/{user_id}/\", data)\n if self.user_id in self._users_following:\n self._users_following.pop(self.user_id) # reset\n return result[\"friendship_status\"][\"following\"] is True", "def belongs_to_user(self) -> bool:\n return flask.g.user is not None and flask.g.user.id == getattr(\n self, 'user_id', False\n )", "def follows(self, other):\n\t\treturn self.followed.filter(followers.c.followed_by == other.id).count() > 0", "def follow_user(cls, user, following):\r\n pass", "def test_is_following(self):\n\n self.u1.following.append(self.u2)\n db.session.commit()\n\n self.assertTrue(self.u1.is_following(self.u2))\n self.assertFalse(self.u2.is_following(self.u1))", "def is_user_player(self, user):\n return self.user == user", "async def is_following(self, TargetId: int):\n\n data = {\"targetUserIds\": [TargetId]}\n e = await self.request.request(url=f'https://friends.roblox.com/v1/user/following-exists',\n method='post',\n data=data)\n return e['followings'][0]['isFollowing']", "def follow_user(cls, user, following):\n pass", "def test_is_followed_by(self):\n\n self.u1.followers.append(self.u2)\n db.session.commit()\n\n self.assertTrue(self.u1.is_followed_by(self.u2))\n self.assertFalse(self.u2.is_followed_by(self.u1))", "def same_user_or_shiftleader(self, user):\n try:\n return (\n self.get_object().userid == user\n or user.is_superuser\n or user.userprofile.has_shift_leader_rights\n )\n except UserProfile.DoesNotExist:\n return False", "def is_following(source_id, destination_id):\n if Forward.objects.filter(source_id=source_id,\n destination_id=destination_id):\n return True\n return False", "def has_user(self, user): # pylint: disable=unused-argument\r\n return False", "def author_following(self):\n\t\tpass", "def follow(self, user_index, following_index):\n if user_index >= self.num_users or following_index >= self.num_users:\n raise ValueError(\n f\"Number of users is {self.num_users}, but indices \"\n f\"{user_index} and {following_index} were requested.\"\n )\n if self.users_hat[following_index, user_index] == 0:\n self.users_hat[following_index, user_index] = 1\n elif self.is_verbose():\n self.log(f\"User {following_index} was already following user {user_index}\")", "def user_playlist_is_following(self, playlist_id, user_ids, **kwargs):\n return self._get(\n API.PLAYLIST_FOLLOWERS_CONTAINS.value.format( # pylint: disable=no-member\n playlist_id=playlist_id\n ),\n ids=\",\".join(user_ids),\n **kwargs,\n )", "def follow(self, user):\n if not self.is_following(user):\n self.followed.append(user)\n return self", "def follow(self, user):\n if not self.is_following(user):\n self.followed.append(user)\n return self", "def user_present(ctx: Context, channel: TextChannel) -> bool:\n for member in channel.members:\n if member.id == ctx.author.id:\n return True\n\n return False", "def _has_following_ratio_of(self, user: _InstagramUser, ratio: float) -> bool:\n follower_count = len(self._user_follower_info(uid=user.uid))\n following_count = len(self._user_following_info(uid=user.uid))\n\n if follower_count == 0:\n return True\n\n return (following_count / follower_count) > ratio", "def current_user_has_access(self):\n return self.user_has_access(users.get_current_user())", "def user_follow():\n data = request.get_json(force=True)\n follower = User.query.get(data['follower'])\n following = User.query.get(data['following'])\n follower.followcheck.append(following)\n db.session.commit()\n return {'followed': True}", "def checkIfUserIsCurrent(self,userId : str) -> bool:\n\n if userId == userId[0]:\n return True\n else:\n return False", "def is_user_playing(self, user):\n return user in self.active_games", "def followed_by(self):\r\n return relationships.FollowedBy(self)", "def doesfollow(user):\n return jsonify({\n 'follows': isFollowed(g.user,user)\n })", "def followed_by(self):\n return relationships.FollowedBy(self)", "def is_owned_by(self, user):\n return user and user.id == self.user_id", "def user_is_same(current_user_number, past_user_numbers):\n if current_user_number == past_user_numbers:\n return True\n else:\n return False", "def ref_user_flag(self):\n try:\n ref = User.objects.get(\n associated_emails__email__iexact=self.reference_email,\n associated_emails__is_verified=True)\n return True\n except ObjectDoesNotExist:\n return False", "def follow(self, user):\n if not self.is_following(user):\n f = Follow(follower=self, followed=user)\n db.session.add(f)", "def test_following_existing_user(self):\n response = self.client.post(\n reverse(\n 'follow',\n kwargs={'username': self.followed['user'].get('username')}\n )\n )\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)", "def is_user_change_required(self):\n return self.__running_user != self.__desired_user", "def follow_user(self, user):\n self.nav_user(user)\n follow_button = self.driver.find_element_by_xpath(\n \"//button[contains(text(), 'Follow')]\")\n follow_button.click()\n time.sleep(1)\n self.driver.get(self.base_url)", "def following(self):\n return self.data.get(\"following\")", "def check_active(self, user):\r\n if not self.require_active:\r\n # Ignore & move on.\r\n return True\r\n\r\n return user.is_active", "def is_me(self, m):\n return m.author == self.client.user", "def test_func(self):\n answer = self.get_object()\n return True if self.request.user == answer.author or self.request.user.is_superuser else False", "def is_user_already_followed(followed_user_id, user_id):\n result = Follow.objects.filter(followed_user=followed_user_id,\n user=user_id).exists()\n return result", "def follow(request, usertofollow):\n to_follow = Member.objects.get(user__username=usertofollow)\n user = Member.objects.get(user=request.user)\n user.following.add(to_follow)\n user.save()\n return redirect(request.META['HTTP_REFERER'])", "def _is_follow_request(environ, result):\n r = Request(environ)\n if r.params.get(\"action\") == \"follow\":\n return True\n else:\n return False", "def was_followed(sender, instance, created, **kwargs):\n\n sendr = User.objects.get(id=instance.user_id)\n followed = User.objects.get(id=instance.followed_user_id)\n if created:\n notify.send(sender=sendr, recipient=followed, verb='followed',\n description=\"{} followed you.\".format(sendr.username))", "def is_visible_to(self, user):\n return True", "def user_unfollow(self, user_id: int) -> bool:\n assert self.user_id, \"Login required\"\n user_id = int(user_id)\n data = self.with_action_data({\"user_id\": user_id})\n result = self.private_request(f\"friendships/destroy/{user_id}/\", data)\n if self.user_id in self._users_following:\n self._users_following[self.user_id].pop(user_id, None)\n return result[\"friendship_status\"][\"following\"] is False", "def get_is_self(self, obj: Profile) -> bool:\n request: HttpRequest = self.context.get('request')\n if request:\n if request.user.is_authenticated:\n return obj == request.user.profile\n return False", "def save_following(self, user, path=None):\n # Redis has an end_cursor if we've collected this data before\n end_cursor = self.redis.get(''.join(['gh:', user.login, ':following:endCursor']))\n if end_cursor:\n end_cursor = end_cursor.decode('utf-8')\n end_cursor = ''.join(['\"', end_cursor, '\"'])\n following = u.following(first=100, after=end_cursor)\n else:\n following = u.following(first=100)\n if not following:\n return False\n while True:\n if following['data']['user']['following']['edges']:\n index = ''.join(['gh_following-', self.timestamp])\n self._write_to_datastore(index=index,\n doc_type='GithubFollowing',\n document=following,\n login=user.login,\n path=path)\n has_next_page = following['data']['user']['following']['pageInfo']['hasNextPage']\n end_cursor = following['data']['user']['following']['pageInfo']['endCursor']\n if has_next_page:\n following = u.following(first=100, after=end_cursor)\n else:\n # Cache the end_cursor where we last collected data\n self.redis.set(''.join(['gh:', u.login, ':following:endCursor']), end_cursor)\n break\n else:\n break\n\n return True", "def test_person_is_following(self):\n person = Person.objects.create(\n username='tom', email='tom@gmail.com', password='fake_password'\n )\n self.assertFalse(person.is_following(1))\n show = FollowedShows.objects.create(\n user=person,\n show_name='show1',\n show_id=1,\n air_days='monday, tuesday',\n air_time='10:00',\n summary='summary here',\n network='network here'\n )\n self.assertTrue(person.is_following(show.show_id))", "def check_inflight_already_running(self, user: Identifier) -> bool:\n with self._lock:\n for flow in self.in_flight:\n if flow.requestor == user:\n return True\n return False", "def test_user_already_followed(self):\n self.authorize_user(self.user)\n with self.settings(\n EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend'):\n self.register_user(self.user1)\n self.client.post(self.follow_url, format='json')\n response2 = self.client.post(self.follow_url, format='json')\n self.assertEqual(response2.content,\n b'{\"detail\": {\"error\": \"user already followed\"}}')\n self.assertEqual(response2.status_code, status.HTTP_400_BAD_REQUEST)", "def can_add_player(self, user):\n user_profile = user.get_profile()\n if user_profile.credits < self.entrance_fee:\n return False\n if self.is_user_playing(user):\n return False\n return True", "def follow(self, other):\n\t\tif not self.follows(other):\n\t\t\tself.followed.append(other)", "def test_func(self):\n if not self.request.user.is_authenticated:\n return False\n if self.request.user.is_staff:\n return True\n return self.get_user() == self.request.user", "def follow_user(self:'InstaClient', user:str, nav_to_user:bool=True):\n # Check User Vadility\n profile = self.get_profile(user)\n if not profile:\n raise InvalidUserError(user)\n\n # Navigate to User Page\n self._nav_user(user, check_user=False)\n \n if self._check_existence(EC.presence_of_element_located((By.XPATH, Paths.MESSAGE_USER_BTN))):\n # User already followed\n pass\n else:\n follow_button = self._find_element(EC.presence_of_element_located((By.XPATH, Paths.FOLLOW_BTN)), url=ClientUrls.NAV_USER.format(user))\n self._press_button(follow_button)\n profile.requested_by_viewer = True\n return profile", "def logged_in(self):\n return self.user is not None", "def is_registered(self):\n if self.user == getpass.getuser():\n return True\n else:\n return False", "def is_participant(self):\n if self.user is None:\n return False\n if unicode(self.user._id) in self.barcamp.event.participants:\n return True\n return False", "def authorizes(self, user):\n return self.owner == user or self.workers.filter(pk=user.id).exists()", "def accept_follow_request(request):\n username = request.POST[\"user\"]\n requester = get_user_from_username(request.user, username)\n\n try:\n follow_request = models.UserFollowRequest.objects.get(\n user_subject=requester, user_object=request.user\n )\n except models.UserFollowRequest.DoesNotExist:\n # Request already dealt with.\n return redirect(request.user.local_path)\n follow_request.accept()\n\n return redirect(request.user.local_path)", "def can_update_user(cls, db_tuple, target, actor):\n return target.user_id == actor.user_id", "def test_following(self):\n\n follow1 = Follows(\n user_being_followed_id = self.testuser2.id,\n user_following_id = self.testuser.id\n )\n follow2 = Follows(\n user_being_followed_id = self.testuser3.id,\n user_following_id = self.testuser.id\n )\n\n db.session.add_all((follow1, follow2))\n db.session.commit()\n\n with self.client as c:\n with c.session_transaction() as session:\n session[CURR_USER_KEY] = self.testuser.id\n \n response = c.get(f\"/users/{self.testuser.id}/following\")\n data = str(response.data)\n\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"@bob\", data)\n self.assertIn(\"@carl\", data)\n self.assertNotIn(\"@alvin\", data)", "def following_and_storing(self, user_obj):\n if self.following(user_obj['user']):\n self.monitored_users.append({'user': user_obj['user'], 'username': user_obj['username'],\n 'followDate': datetime.now().timestamp()})", "def user_appears(self, user):\n pass", "def check_user(self):\n try:\n if self.get_customer()[0][0] == self.dni:\n return True\n else:\n return False\n except:\n return False", "def follow(self, follower, followee):\n pass", "def test_func(self):\n member_to_finish = self.get_object()\n return self.request.user.rfid == member_to_finish.rfid", "def test_user_follow_self(self):\n with self.settings(\n EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend'):\n self.authorize_user(self.user)\n response = self.client.post(self.follow_self_url, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def following_changed(sender, action, instance, *args, **kwargs):\n\n # m2mchanged.connect specified in apps.py\n\n following = instance.following.all()\n creator = instance.user\n\n if creator in following:\n raise ValidationError (\"can't like own post\")", "def follows(self, name):\r\n url = '{0}/{1}/{2}'.format(self.get_url(), 'following', name)\r\n\r\n return http.Request('GET', url), resource.parse_boolean", "def test_func(self):\n post = self.get_object()\n if self.request.user == post.author:\n return True\n return False", "def is_active_user(self):\n\n return self.is_active", "def is_user(self, user='') -> int:\n try:\n if user in self.users:\n return(1)\n else:\n return(0)\n except Exception as error:\n print(f\"Error: self.is_user({user}) -> {error}\")", "def author_is_me(message: discord.Message) -> bool:\n return message.author == config.bot.user", "def liked_by(self, user):\n return Likes.objects.filter(recipe=self, chef=user).exists()", "def test_get_list_of_following_users_with_auth(self):\n with self.settings(\n EMAIL_BACKEND='django.core.mail.backends.locmem.EmailBackend'):\n self.authorize_user(self.user)\n self.register_user(self.user1)\n self.client.post(self.follow_url, format='json')\n response = self.client.get(self.following_list_url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_func(self):\r\n post = self.get_object()\r\n if self.request.user == post.author:\r\n return True\r\n return False", "def test_func(self):\r\n post = self.get_object()\r\n if self.request.user == post.author:\r\n return True\r\n return False", "def test_func(self):\r\n post = self.get_object()\r\n if self.request.user == post.author:\r\n return True\r\n return False", "def logged_in(self):\n return self.auth.get_user_by_session() is not None", "def is_profile_complete(self):\n return bool(self.fullname and self.username and self.email)", "def user_has_access(self, user):\n if self.visibility == self.PUBLIC:\n return True\n elif self.visibility == self.PRIVATE and self.created_by == user:\n return True\n elif self.visibility in (self.ORG_ONLY, self.ORG_ONLY_NO_EXTERNAL):\n if user.external and self.visibility == self.ORG_ONLY_NO_EXTERNAL:\n return False\n elif self.organization.memberships.filter(user=user).count() >= 1:\n return True\n return False", "def has_access(self, user):\n if user.is_superuser:\n return True\n return self.user_objects(user).filter(id=self.id).exists()", "def is_active(self):\n return self.status == ACTIVE_USER", "def gatekeeper():\n\n if user.name in GATEKEEPERS:\n return True\n\n return False", "def is_leader(self):\n return self.__is_leader", "def early_return(bot:Bot, ctx:Context):\n return ctx.message.author.bot or ctx.message.author.id == bot.user.id", "def can_be_viewed_by(self,user):\n return True", "def show_following(user_id):\n\n\n user = User.query.get_or_404(user_id)\n return render_template('users/following.html', user=user)", "def test_follow_manually_approved(self):\n activity = {\n \"@context\": \"https://www.w3.org/ns/activitystreams\",\n \"id\": \"https://example.com/users/rat/follows/123\",\n \"type\": \"Follow\",\n \"actor\": \"https://example.com/users/rat\",\n \"object\": \"https://example.com/user/mouse\",\n }\n\n self.local_user.manually_approves_followers = True\n self.local_user.save(\n broadcast=False, update_fields=[\"manually_approves_followers\"]\n )\n\n with patch(\"bookwyrm.models.activitypub_mixin.broadcast_task.apply_async\"):\n views.inbox.activity_task(activity)\n\n # notification created\n notification = models.Notification.objects.get()\n self.assertEqual(notification.user, self.local_user)\n self.assertEqual(notification.notification_type, \"FOLLOW_REQUEST\")\n\n # the request should exist\n request = models.UserFollowRequest.objects.get()\n self.assertEqual(request.user_subject, self.remote_user)\n self.assertEqual(request.user_object, self.local_user)\n\n # the follow relationship should not exist\n follow = models.UserFollows.objects.all()\n self.assertEqual(list(follow), [])", "def is_logged_in(self) -> bool:\n return self.id is not None and self.username is not None", "def is_participant(self,user):\n if user.is_superuser:\n return True\n\n if user.groups.filter(name=self.participants_group_name).count() > 0:\n return True\n else:\n return False", "def still_deciding(self):\n for player in self.players:\n if isinstance(player, user.User):\n if not player.has_played:\n return True\n return False", "def is_eligible(self, myself):\n if self.author().screen_name == myself.screen_name:\n log_.debug(\"Not replying to my own tweets\")\n return False\n if self.is_retweet():\n log_.debug(\"Not processing pure retweets\")\n return False\n return True", "def follows(self):\r\n return relationships.Follows(self)" ]
[ "0.7629527", "0.7531042", "0.7460013", "0.73141354", "0.72661006", "0.70735085", "0.70591176", "0.7049096", "0.6914572", "0.69122463", "0.68232995", "0.6788982", "0.67763567", "0.6769098", "0.6714947", "0.65794", "0.6538119", "0.652213", "0.6521558", "0.64769983", "0.64418167", "0.64249146", "0.63472104", "0.63472104", "0.6337133", "0.63186246", "0.6311464", "0.63054794", "0.62878215", "0.6269004", "0.6267999", "0.6235971", "0.6171771", "0.61068225", "0.60893387", "0.6071489", "0.6061535", "0.6047455", "0.60352004", "0.60232085", "0.599043", "0.5983538", "0.59715176", "0.5964473", "0.59626156", "0.595811", "0.59545606", "0.5939112", "0.5935984", "0.5934003", "0.5921513", "0.5916213", "0.59051514", "0.5867161", "0.58584785", "0.5838697", "0.582179", "0.58127296", "0.578747", "0.57797843", "0.57761806", "0.5770214", "0.5755082", "0.5749518", "0.5748161", "0.573074", "0.57289755", "0.57272846", "0.5726887", "0.5716321", "0.57141", "0.5707292", "0.57056195", "0.57047826", "0.56861633", "0.5684884", "0.56845057", "0.5681332", "0.56799924", "0.56756353", "0.5672672", "0.5672672", "0.5672672", "0.56680423", "0.5663431", "0.5659798", "0.5656082", "0.56489426", "0.56360656", "0.5634647", "0.5631955", "0.5615115", "0.5610024", "0.5607456", "0.5595033", "0.559239", "0.5585263", "0.5585249", "0.55837804" ]
0.7424364
4